From ffdaf5c63fd13c1338d1cef783784f2e8a600b95 Mon Sep 17 00:00:00 2001 From: Knut Ahlers Date: Mon, 29 May 2017 11:16:47 +0200 Subject: [PATCH] Migrate and update Godeps Signed-off-by: Knut Ahlers --- Godeps/Godeps.json | 148 +- Godeps/_workspace/.gitignore | 2 - .../aws/aws-sdk-go/aws/awsutil/copy_test.go | 201 - .../aws-sdk-go/aws/awsutil/path_value_test.go | 68 - .../github.com/aws/aws-sdk-go/aws/config.go | 254 - .../aws/aws-sdk-go/aws/config_test.go | 87 - .../aws/aws-sdk-go/aws/convutil_test.go | 438 - .../aws/credentials/chain_provider_test.go | 73 - .../aws/credentials/credentials_test.go | 62 - .../aws/credentials/ec2_role_provider.go | 162 - .../aws/credentials/ec2_role_provider_test.go | 108 - .../aws/credentials/env_provider_test.go | 70 - .../shared_credentials_provider_test.go | 77 - .../aws/credentials/static_provider_test.go | 34 - .../stscreds/assume_role_provider.go | 120 - .../stscreds/assume_role_provider_test.go | 58 - .../aws/aws-sdk-go/aws/handler_functions.go | 157 - .../aws-sdk-go/aws/handler_functions_test.go | 81 - .../github.com/aws/aws-sdk-go/aws/handlers.go | 85 - .../aws/aws-sdk-go/aws/handlers_test.go | 31 - .../aws/aws-sdk-go/aws/param_validator.go | 89 - .../aws-sdk-go/aws/param_validator_test.go | 84 - .../github.com/aws/aws-sdk-go/aws/request.go | 312 - .../aws-sdk-go/aws/request_pagination_test.go | 305 - .../aws/aws-sdk-go/aws/request_test.go | 225 - .../github.com/aws/aws-sdk-go/aws/service.go | 194 - .../github.com/aws/aws-sdk-go/aws/types.go | 55 - .../internal/endpoints/endpoints.go | 31 - .../internal/endpoints/endpoints.json | 77 - .../internal/endpoints/endpoints_map.go | 89 - .../internal/endpoints/endpoints_test.go | 28 - .../internal/protocol/query/build_test.go | 1482 -- .../internal/protocol/query/unmarshal.go | 29 - .../protocol/query/unmarshal_error.go | 33 - .../internal/protocol/query/unmarshal_test.go | 1418 -- .../internal/protocol/rest/build.go | 208 - .../internal/protocol/restxml/build_test.go | 2717 --- .../internal/protocol/restxml/restxml.go | 55 - .../protocol/restxml/unmarshal_test.go | 1310 -- .../internal/signer/v4/functional_test.go | 43 - .../aws/aws-sdk-go/internal/signer/v4/v4.go | 364 - .../aws-sdk-go/internal/signer/v4/v4_test.go | 245 - .../aws/aws-sdk-go/service/s3/api.go | 6637 ------ .../aws-sdk-go/service/s3/bucket_location.go | 42 - .../service/s3/bucket_location_test.go | 75 - .../aws-sdk-go/service/s3/customizations.go | 32 - .../service/s3/customizations_test.go | 90 - .../aws-sdk-go/service/s3/examples_test.go | 1928 -- .../service/s3/host_style_bucket.go | 53 - .../service/s3/host_style_bucket_test.go | 61 - .../service/s3/s3iface/interface.go | 236 - .../service/s3/s3iface/interface_test.go | 15 - .../service/s3/s3manager/download.go | 257 - .../service/s3/s3manager/download_test.go | 165 - .../aws-sdk-go/service/s3/s3manager/upload.go | 562 - .../service/s3/s3manager/upload_test.go | 438 - .../aws/aws-sdk-go/service/s3/service.go | 57 - .../aws/aws-sdk-go/service/s3/sse_test.go | 81 - .../aws-sdk-go/service/s3/unmarshal_error.go | 42 - .../service/s3/unmarshal_error_test.go | 53 - .../src/github.com/spf13/cobra/.travis.yml | 8 - .../src/github.com/spf13/cobra/README.md | 485 - .../spf13/cobra/bash_completions.go | 370 - .../spf13/cobra/bash_completions_test.go | 80 - .../src/github.com/spf13/cobra/cobra.go | 112 - .../src/github.com/spf13/cobra/cobra_test.go | 965 - .../github.com/spf13/cobra/command_test.go | 90 - .../src/github.com/spf13/cobra/md_docs.go | 138 - .../src/github.com/spf13/cobra/md_docs.md | 81 - .../github.com/spf13/cobra/md_docs_test.go | 67 - .../src/github.com/spf13/pflag/.travis.yml | 8 - .../src/github.com/spf13/pflag/bool_test.go | 180 - .../github.com/spf13/pflag/example_test.go | 77 - .../src/github.com/spf13/pflag/export_test.go | 29 - .../src/github.com/spf13/pflag/flag_test.go | 755 - .../github.com/spf13/pflag/int_slice_test.go | 49 - .../spf13/pflag/string_slice_test.go | 44 - .../src/github.com/vaughan0/go-ini/LICENSE | 14 - .../src/github.com/vaughan0/go-ini/README.md | 70 - .../src/github.com/vaughan0/go-ini/ini.go | 123 - .../vaughan0/go-ini/ini_linux_test.go | 43 - .../github.com/vaughan0/go-ini/ini_test.go | 89 - .../src/github.com/vaughan0/go-ini/test.ini | 2 - s3.go | 4 +- vendor/github.com/aws/aws-sdk-go/LICENSE.txt | 202 + vendor/github.com/aws/aws-sdk-go/NOTICE.txt | 3 + .../aws/aws-sdk-go/aws/awserr/error.go | 56 +- .../aws/aws-sdk-go/aws/awserr/types.go | 87 +- .../aws/aws-sdk-go/aws/awsutil/copy.go | 21 +- .../aws/aws-sdk-go/aws/awsutil/equal.go | 27 + .../aws/aws-sdk-go/aws/awsutil/path_value.go | 117 +- .../aws/aws-sdk-go/aws/awsutil/prettify.go | 10 + .../aws-sdk-go/aws/awsutil/string_value.go | 89 + .../aws/aws-sdk-go/aws/client/client.go | 90 + .../aws-sdk-go/aws/client/default_retryer.go | 96 + .../aws/aws-sdk-go/aws/client/logger.go | 108 + .../aws/client/metadata/client_info.go | 12 + .../github.com/aws/aws-sdk-go/aws/config.go | 470 + .../github.com/aws/aws-sdk-go/aws/context.go | 71 + .../aws/aws-sdk-go/aws/context_1_6.go | 41 + .../aws/aws-sdk-go/aws/context_1_7.go | 9 + .../aws/aws-sdk-go/aws/convert_types.go | 24 +- .../aws-sdk-go/aws/corehandlers/handlers.go | 242 + .../aws/corehandlers/param_validator.go | 17 + .../aws/credentials/chain_provider.go | 43 +- .../aws-sdk-go/aws/credentials/credentials.go | 40 +- .../ec2rolecreds/ec2_role_provider.go | 178 + .../aws/credentials/endpointcreds/provider.go | 191 + .../aws/credentials/env_provider.go | 9 +- .../aws-sdk-go/aws/credentials/example.ini | 4 + .../shared_credentials_provider.go | 54 +- .../aws/credentials/static_provider.go | 17 +- .../stscreds/assume_role_provider.go | 298 + .../aws/aws-sdk-go/aws/defaults/defaults.go | 163 + vendor/github.com/aws/aws-sdk-go/aws/doc.go | 56 + .../aws/aws-sdk-go/aws/ec2metadata/api.go | 162 + .../aws/aws-sdk-go/aws/ec2metadata/service.go | 124 + .../aws/aws-sdk-go/aws/endpoints/decode.go | 133 + .../aws/aws-sdk-go/aws/endpoints/defaults.go | 2221 ++ .../aws/aws-sdk-go/aws/endpoints/doc.go | 66 + .../aws/aws-sdk-go/aws/endpoints/endpoints.go | 439 + .../aws/aws-sdk-go/aws/endpoints/v3model.go | 303 + .../aws/endpoints/v3model_codegen.go | 337 + .../github.com/aws/aws-sdk-go/aws/errors.go | 17 + .../aws/aws-sdk-go/aws/jsonvalue.go | 12 + .../github.com/aws/aws-sdk-go/aws/logger.go | 23 + .../aws/request/connection_reset_error.go | 19 + .../request/connection_reset_error_other.go | 11 + .../aws/aws-sdk-go/aws/request/handlers.go | 256 + .../aws-sdk-go/aws/request/http_request.go | 24 + .../aws-sdk-go/aws/request/offset_reader.go | 58 + .../aws/aws-sdk-go/aws/request/request.go | 575 + .../aws/aws-sdk-go/aws/request/request_1_7.go | 39 + .../aws/aws-sdk-go/aws/request/request_1_8.go | 33 + .../aws-sdk-go/aws/request/request_context.go | 14 + .../aws/request/request_context_1_6.go | 14 + .../aws/request/request_pagination.go | 236 + .../aws/aws-sdk-go/aws/request/retryer.go | 161 + .../aws/request/timeout_read_closer.go | 94 + .../aws/aws-sdk-go/aws/request/validation.go | 234 + .../aws/aws-sdk-go/aws/request/waiter.go | 287 + .../aws/aws-sdk-go/aws/session/doc.go | 273 + .../aws/aws-sdk-go/aws/session/env_config.go | 208 + .../aws/aws-sdk-go/aws/session/session.go | 599 + .../aws-sdk-go/aws/session/shared_config.go | 295 + .../aws-sdk-go/aws/signer/v4/header_rules.go | 82 + .../aws/aws-sdk-go/aws/signer/v4/options.go | 7 + .../aws/aws-sdk-go/aws/signer/v4/uri_path.go | 24 + .../aws/aws-sdk-go/aws/signer/v4/v4.go | 761 + vendor/github.com/aws/aws-sdk-go/aws/types.go | 118 + vendor/github.com/aws/aws-sdk-go/aws/url.go | 12 + .../github.com/aws/aws-sdk-go/aws/url_1_7.go | 29 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../private/protocol/idempotency.go | 75 + .../private}/protocol/query/build.go | 15 +- .../protocol/query/queryutil/queryutil.go | 26 +- .../private/protocol/query/unmarshal.go | 35 + .../private/protocol/query/unmarshal_error.go | 66 + .../aws-sdk-go/private/protocol/rest/build.go | 290 + .../private}/protocol/rest/payload.go | 4 +- .../private}/protocol/rest/unmarshal.go | 73 +- .../private/protocol/restxml/restxml.go | 69 + .../aws-sdk-go/private/protocol/unmarshal.go | 21 + .../private}/protocol/xml/xmlutil/build.go | 25 +- .../protocol/xml/xmlutil/unmarshal.go | 16 +- .../protocol/xml/xmlutil/xml_to_struct.go | 50 +- .../aws/aws-sdk-go/service/s3/api.go | 19245 ++++++++++++++++ .../aws-sdk-go/service/s3/bucket_location.go | 106 + .../aws/aws-sdk-go/service/s3/content_md5.go | 4 +- .../aws-sdk-go/service/s3/customizations.go | 46 + .../aws/aws-sdk-go/service/s3/doc.go | 78 + .../aws/aws-sdk-go/service/s3/doc_custom.go | 109 + .../aws/aws-sdk-go/service/s3/errors.go | 48 + .../service/s3/host_style_bucket.go | 162 + .../service/s3/platform_handlers.go | 8 + .../service/s3/platform_handlers_go1.6.go | 28 + .../aws/aws-sdk-go/service/s3/service.go | 93 + .../aws/aws-sdk-go/service/s3/sse.go | 8 +- .../aws-sdk-go/service/s3/statusok_error.go | 35 + .../aws-sdk-go/service/s3/unmarshal_error.go | 103 + .../aws/aws-sdk-go/service/s3/waiters.go | 214 + .../aws/aws-sdk-go/service/sts/api.go | 2365 ++ .../aws-sdk-go/service/sts/customizations.go | 12 + .../aws/aws-sdk-go/service/sts/doc.go | 124 + .../aws/aws-sdk-go/service/sts/errors.go | 73 + .../aws/aws-sdk-go/service/sts/service.go | 93 + vendor/github.com/go-ini/ini/.gitignore | 5 + vendor/github.com/go-ini/ini/.travis.yml | 16 + vendor/github.com/go-ini/ini/LICENSE | 191 + vendor/github.com/go-ini/ini/Makefile | 12 + vendor/github.com/go-ini/ini/README.md | 740 + vendor/github.com/go-ini/ini/README_ZH.md | 727 + vendor/github.com/go-ini/ini/error.go | 32 + vendor/github.com/go-ini/ini/ini.go | 538 + vendor/github.com/go-ini/ini/key.go | 633 + vendor/github.com/go-ini/ini/parser.go | 359 + vendor/github.com/go-ini/ini/section.go | 232 + vendor/github.com/go-ini/ini/struct.go | 431 + .../inconshreveable/mousetrap/LICENSE | 0 .../inconshreveable/mousetrap/README.md | 0 .../inconshreveable/mousetrap/trap_others.go | 0 .../inconshreveable/mousetrap/trap_windows.go | 0 .../mousetrap/trap_windows_1.4.go | 0 .../jmespath/go-jmespath/.gitignore | 4 + .../jmespath/go-jmespath/.travis.yml | 9 + .../github.com/jmespath/go-jmespath/LICENSE | 13 + .../github.com/jmespath/go-jmespath/Makefile | 44 + .../github.com/jmespath/go-jmespath/README.md | 7 + vendor/github.com/jmespath/go-jmespath/api.go | 49 + .../go-jmespath/astnodetype_string.go | 16 + .../jmespath/go-jmespath/functions.go | 842 + .../jmespath/go-jmespath/interpreter.go | 418 + .../github.com/jmespath/go-jmespath/lexer.go | 420 + .../github.com/jmespath/go-jmespath/parser.go | 603 + .../jmespath/go-jmespath/toktype_string.go | 16 + .../github.com/jmespath/go-jmespath/util.go | 185 + .../github.com/spf13/cobra/.gitignore | 12 + vendor/github.com/spf13/cobra/.mailmap | 3 + vendor/github.com/spf13/cobra/.travis.yml | 24 + .../github.com/spf13/cobra/LICENSE.txt | 0 vendor/github.com/spf13/cobra/README.md | 898 + .../spf13/cobra/bash_completions.go | 641 + .../spf13/cobra/bash_completions.md | 61 +- vendor/github.com/spf13/cobra/cobra.go | 173 + .../github.com/spf13/cobra/command.go | 614 +- .../github.com/spf13/cobra/command_notwin.go | 5 + vendor/github.com/spf13/cobra/command_win.go | 26 + vendor/github.com/spf13/pflag/.travis.yml | 21 + .../github.com/spf13/pflag/LICENSE | 0 .../github.com/spf13/pflag/README.md | 49 +- .../github.com/spf13/pflag/bool.go | 15 +- vendor/github.com/spf13/pflag/count.go | 94 + .../github.com/spf13/pflag/duration.go | 8 +- .../github.com/spf13/pflag/flag.go | 456 +- .../github.com/spf13/pflag/float32.go | 15 +- .../github.com/spf13/pflag/float64.go | 15 +- vendor/github.com/spf13/pflag/golangflag.go | 104 + .../github.com/spf13/pflag/int.go | 15 +- .../github.com/spf13/pflag/int32.go | 15 +- .../github.com/spf13/pflag/int64.go | 15 +- .../github.com/spf13/pflag/int8.go | 15 +- .../github.com/spf13/pflag/int_slice.go | 41 +- .../github.com/spf13/pflag/ip.go | 13 +- .../github.com/spf13/pflag/ipmask.go | 10 +- vendor/github.com/spf13/pflag/ipnet.go | 100 + .../github.com/spf13/pflag/string.go | 12 +- vendor/github.com/spf13/pflag/string_array.go | 110 + .../github.com/spf13/pflag/string_slice.go | 74 +- .../github.com/spf13/pflag/uint.go | 15 +- .../github.com/spf13/pflag/uint16.go | 17 +- .../github.com/spf13/pflag/uint32.go | 19 +- .../github.com/spf13/pflag/uint64.go | 15 +- .../github.com/spf13/pflag/uint8.go | 15 +- 253 files changed, 44614 insertions(+), 26945 deletions(-) delete mode 100644 Godeps/_workspace/.gitignore delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convutil_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2_role_provider.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2_role_provider_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handler_functions.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handler_functions_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handlers.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handlers_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/param_validator.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/param_validator_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request_pagination_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/service.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.json delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_map.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_error.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/build.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/build_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/restxml.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/unmarshal_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/functional_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/api.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/examples_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/service.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/sse_test.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go delete mode 100644 Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go delete mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/.travis.yml delete mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/README.md delete mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/bash_completions.go delete mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/bash_completions_test.go delete mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/cobra.go delete mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/cobra_test.go delete mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/command_test.go delete mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/md_docs.go delete mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/md_docs.md delete mode 100644 Godeps/_workspace/src/github.com/spf13/cobra/md_docs_test.go delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/.travis.yml delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/bool_test.go delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/example_test.go delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/export_test.go delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/flag_test.go delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/int_slice_test.go delete mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/string_slice_test.go delete mode 100644 Godeps/_workspace/src/github.com/vaughan0/go-ini/LICENSE delete mode 100644 Godeps/_workspace/src/github.com/vaughan0/go-ini/README.md delete mode 100644 Godeps/_workspace/src/github.com/vaughan0/go-ini/ini.go delete mode 100644 Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_linux_test.go delete mode 100644 Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_test.go delete mode 100644 Godeps/_workspace/src/github.com/vaughan0/go-ini/test.ini create mode 100644 vendor/github.com/aws/aws-sdk-go/LICENSE.txt create mode 100644 vendor/github.com/aws/aws-sdk-go/NOTICE.txt rename {Godeps/_workspace/src => vendor}/github.com/aws/aws-sdk-go/aws/awserr/error.go (66%) rename {Godeps/_workspace/src => vendor}/github.com/aws/aws-sdk-go/aws/awserr/types.go (63%) rename {Godeps/_workspace/src => vendor}/github.com/aws/aws-sdk-go/aws/awsutil/copy.go (85%) create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go rename {Godeps/_workspace/src => vendor}/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go (57%) rename {Godeps/_workspace/src => vendor}/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go (92%) create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/client.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/logger.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go rename Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convutil.go => vendor/github.com/aws/aws-sdk-go/aws/convert_types.go (89%) create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go rename {Godeps/_workspace/src => vendor}/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go (69%) rename {Godeps/_workspace/src => vendor}/github.com/aws/aws-sdk-go/aws/credentials/credentials.go (84%) create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go rename {Godeps/_workspace/src => vendor}/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go (86%) rename {Godeps/_workspace/src => vendor}/github.com/aws/aws-sdk-go/aws/credentials/example.ini (69%) rename {Godeps/_workspace/src => vendor}/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go (66%) rename {Godeps/_workspace/src => vendor}/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go (60%) create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go rename {Godeps/_workspace/src => vendor}/github.com/aws/aws-sdk-go/aws/logger.go (76%) create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/validation.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/session.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/types.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url.go create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go rename {Godeps/_workspace/src => vendor}/github.com/aws/aws-sdk-go/aws/version.go (87%) create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go rename {Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal => vendor/github.com/aws/aws-sdk-go/private}/protocol/query/build.go (55%) rename {Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal => vendor/github.com/aws/aws-sdk-go/private}/protocol/query/queryutil/queryutil.go (90%) create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go rename {Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal => vendor/github.com/aws/aws-sdk-go/private}/protocol/rest/payload.go (89%) rename {Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal => vendor/github.com/aws/aws-sdk-go/private}/protocol/rest/unmarshal.go (63%) create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go rename {Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal => vendor/github.com/aws/aws-sdk-go/private}/protocol/xml/xmlutil/build.go (95%) rename {Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal => vendor/github.com/aws/aws-sdk-go/private}/protocol/xml/xmlutil/unmarshal.go (95%) rename {Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal => vendor/github.com/aws/aws-sdk-go/private}/protocol/xml/xmlutil/xml_to_struct.go (73%) create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go rename {Godeps/_workspace/src => vendor}/github.com/aws/aws-sdk-go/service/s3/content_md5.go (91%) create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/service.go rename {Godeps/_workspace/src => vendor}/github.com/aws/aws-sdk-go/service/s3/sse.go (80%) create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/api.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/doc.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/errors.go create mode 100644 vendor/github.com/aws/aws-sdk-go/service/sts/service.go create mode 100644 vendor/github.com/go-ini/ini/.gitignore create mode 100644 vendor/github.com/go-ini/ini/.travis.yml create mode 100644 vendor/github.com/go-ini/ini/LICENSE create mode 100644 vendor/github.com/go-ini/ini/Makefile create mode 100644 vendor/github.com/go-ini/ini/README.md create mode 100644 vendor/github.com/go-ini/ini/README_ZH.md create mode 100644 vendor/github.com/go-ini/ini/error.go create mode 100644 vendor/github.com/go-ini/ini/ini.go create mode 100644 vendor/github.com/go-ini/ini/key.go create mode 100644 vendor/github.com/go-ini/ini/parser.go create mode 100644 vendor/github.com/go-ini/ini/section.go create mode 100644 vendor/github.com/go-ini/ini/struct.go rename {Godeps/_workspace/src => vendor}/github.com/inconshreveable/mousetrap/LICENSE (100%) rename {Godeps/_workspace/src => vendor}/github.com/inconshreveable/mousetrap/README.md (100%) rename {Godeps/_workspace/src => vendor}/github.com/inconshreveable/mousetrap/trap_others.go (100%) rename {Godeps/_workspace/src => vendor}/github.com/inconshreveable/mousetrap/trap_windows.go (100%) rename {Godeps/_workspace/src => vendor}/github.com/inconshreveable/mousetrap/trap_windows_1.4.go (100%) create mode 100644 vendor/github.com/jmespath/go-jmespath/.gitignore create mode 100644 vendor/github.com/jmespath/go-jmespath/.travis.yml create mode 100644 vendor/github.com/jmespath/go-jmespath/LICENSE create mode 100644 vendor/github.com/jmespath/go-jmespath/Makefile create mode 100644 vendor/github.com/jmespath/go-jmespath/README.md create mode 100644 vendor/github.com/jmespath/go-jmespath/api.go create mode 100644 vendor/github.com/jmespath/go-jmespath/astnodetype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/functions.go create mode 100644 vendor/github.com/jmespath/go-jmespath/interpreter.go create mode 100644 vendor/github.com/jmespath/go-jmespath/lexer.go create mode 100644 vendor/github.com/jmespath/go-jmespath/parser.go create mode 100644 vendor/github.com/jmespath/go-jmespath/toktype_string.go create mode 100644 vendor/github.com/jmespath/go-jmespath/util.go rename {Godeps/_workspace/src => vendor}/github.com/spf13/cobra/.gitignore (56%) create mode 100644 vendor/github.com/spf13/cobra/.mailmap create mode 100644 vendor/github.com/spf13/cobra/.travis.yml rename {Godeps/_workspace/src => vendor}/github.com/spf13/cobra/LICENSE.txt (100%) create mode 100644 vendor/github.com/spf13/cobra/README.md create mode 100644 vendor/github.com/spf13/cobra/bash_completions.go rename {Godeps/_workspace/src => vendor}/github.com/spf13/cobra/bash_completions.md (73%) create mode 100644 vendor/github.com/spf13/cobra/cobra.go rename {Godeps/_workspace/src => vendor}/github.com/spf13/cobra/command.go (59%) create mode 100644 vendor/github.com/spf13/cobra/command_notwin.go create mode 100644 vendor/github.com/spf13/cobra/command_win.go create mode 100644 vendor/github.com/spf13/pflag/.travis.yml rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/LICENSE (100%) rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/README.md (76%) rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/bool.go (84%) create mode 100644 vendor/github.com/spf13/pflag/count.go rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/duration.go (87%) rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/flag.go (56%) rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/float32.go (83%) rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/float64.go (83%) create mode 100644 vendor/github.com/spf13/pflag/golangflag.go rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/int.go (83%) rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/int32.go (83%) rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/int64.go (83%) rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/int8.go (83%) rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/int_slice.go (75%) rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/ip.go (85%) rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/ipmask.go (88%) create mode 100644 vendor/github.com/spf13/pflag/ipnet.go rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/string.go (84%) create mode 100644 vendor/github.com/spf13/pflag/string_array.go rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/string_slice.go (62%) rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/uint.go (83%) rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/uint16.go (83%) rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/uint32.go (83%) rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/uint64.go (83%) rename {Godeps/_workspace/src => vendor}/github.com/spf13/pflag/uint8.go (83%) diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index 626fbe8..f508737 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -1,62 +1,152 @@ { "ImportPath": "github.com/Luzifer/s3sync", - "GoVersion": "go1.4.2", + "GoVersion": "go1.8", + "GodepVersion": "v79", + "Packages": [ + "./..." + ], "Deps": [ { "ImportPath": "github.com/aws/aws-sdk-go/aws", - "Comment": "v0.7.1-4-g1c75632", - "Rev": "1c75632eb9b77bb3d8e6416fbd8f3e98fc1367db" + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" }, { - "ImportPath": "github.com/aws/aws-sdk-go/internal/endpoints", - "Comment": "v0.7.1-4-g1c75632", - "Rev": "1c75632eb9b77bb3d8e6416fbd8f3e98fc1367db" + "ImportPath": "github.com/aws/aws-sdk-go/aws/awserr", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" }, { - "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/query", - "Comment": "v0.7.1-4-g1c75632", - "Rev": "1c75632eb9b77bb3d8e6416fbd8f3e98fc1367db" + "ImportPath": "github.com/aws/aws-sdk-go/aws/awsutil", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" }, { - "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/rest", - "Comment": "v0.7.1-4-g1c75632", - "Rev": "1c75632eb9b77bb3d8e6416fbd8f3e98fc1367db" + "ImportPath": "github.com/aws/aws-sdk-go/aws/client", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" }, { - "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/restxml", - "Comment": "v0.7.1-4-g1c75632", - "Rev": "1c75632eb9b77bb3d8e6416fbd8f3e98fc1367db" + "ImportPath": "github.com/aws/aws-sdk-go/aws/client/metadata", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" }, { - "ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil", - "Comment": "v0.7.1-4-g1c75632", - "Rev": "1c75632eb9b77bb3d8e6416fbd8f3e98fc1367db" + "ImportPath": "github.com/aws/aws-sdk-go/aws/corehandlers", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" }, { - "ImportPath": "github.com/aws/aws-sdk-go/internal/signer/v4", - "Comment": "v0.7.1-4-g1c75632", - "Rev": "1c75632eb9b77bb3d8e6416fbd8f3e98fc1367db" + "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/credentials/stscreds", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/defaults", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/ec2metadata", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/endpoints", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/request", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/session", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/aws/signer/v4", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" }, { "ImportPath": "github.com/aws/aws-sdk-go/service/s3", - "Comment": "v0.7.1-4-g1c75632", - "Rev": "1c75632eb9b77bb3d8e6416fbd8f3e98fc1367db" + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" + }, + { + "ImportPath": "github.com/aws/aws-sdk-go/service/sts", + "Comment": "v1.8.30-1-g26d4122", + "Rev": "26d4122e877bd86f43d528ad7fbde237dbbd3fa9" + }, + { + "ImportPath": "github.com/go-ini/ini", + "Comment": "v1.24.0", + "Rev": "e3c2d47c61e5333f9aa2974695dd94396eb69c75" }, { "ImportPath": "github.com/inconshreveable/mousetrap", "Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" }, + { + "ImportPath": "github.com/jmespath/go-jmespath", + "Comment": "0.2.2-14-gbd40a43", + "Rev": "bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d" + }, { "ImportPath": "github.com/spf13/cobra", - "Rev": "385fc87e4343efec233811d3d933509e8975d11a" + "Rev": "9c28e4bbd74e5c3ed7aacbc552b2cab7cfdfe744" }, { "ImportPath": "github.com/spf13/pflag", - "Rev": "67cbc198fd11dab704b214c1e629a97af392c085" - }, - { - "ImportPath": "github.com/vaughan0/go-ini", - "Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1" + "Rev": "c7e63cf4530bcd3ba943729cee0efeff2ebea63f" } ] } diff --git a/Godeps/_workspace/.gitignore b/Godeps/_workspace/.gitignore deleted file mode 100644 index f037d68..0000000 --- a/Godeps/_workspace/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -/pkg -/bin diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go deleted file mode 100644 index 4f26241..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy_test.go +++ /dev/null @@ -1,201 +0,0 @@ -package awsutil_test - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "testing" - - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/stretchr/testify/assert" -) - -func ExampleCopy() { - type Foo struct { - A int - B []*string - } - - // Create the initial value - str1 := "hello" - str2 := "bye bye" - f1 := &Foo{A: 1, B: []*string{&str1, &str2}} - - // Do the copy - var f2 Foo - awsutil.Copy(&f2, f1) - - // Print the result - fmt.Println(awsutil.Prettify(f2)) - - // Output: - // { - // A: 1, - // B: ["hello","bye bye"] - // } -} - -func TestCopy(t *testing.T) { - type Foo struct { - A int - B []*string - C map[string]*int - } - - // Create the initial value - str1 := "hello" - str2 := "bye bye" - int1 := 1 - int2 := 2 - f1 := &Foo{ - A: 1, - B: []*string{&str1, &str2}, - C: map[string]*int{ - "A": &int1, - "B": &int2, - }, - } - - // Do the copy - var f2 Foo - awsutil.Copy(&f2, f1) - - // Values are equal - assert.Equal(t, f2.A, f1.A) - assert.Equal(t, f2.B, f1.B) - assert.Equal(t, f2.C, f1.C) - - // But pointers are not! - str3 := "nothello" - int3 := 57 - f2.A = 100 - f2.B[0] = &str3 - f2.C["B"] = &int3 - assert.NotEqual(t, f2.A, f1.A) - assert.NotEqual(t, f2.B, f1.B) - assert.NotEqual(t, f2.C, f1.C) -} - -func TestCopyIgnoreNilMembers(t *testing.T) { - type Foo struct { - A *string - B []string - C map[string]string - } - - f := &Foo{} - assert.Nil(t, f.A) - assert.Nil(t, f.B) - assert.Nil(t, f.C) - - var f2 Foo - awsutil.Copy(&f2, f) - assert.Nil(t, f2.A) - assert.Nil(t, f2.B) - assert.Nil(t, f2.C) - - fcopy := awsutil.CopyOf(f) - f3 := fcopy.(*Foo) - assert.Nil(t, f3.A) - assert.Nil(t, f3.B) - assert.Nil(t, f3.C) -} - -func TestCopyPrimitive(t *testing.T) { - str := "hello" - var s string - awsutil.Copy(&s, &str) - assert.Equal(t, "hello", s) -} - -func TestCopyNil(t *testing.T) { - var s string - awsutil.Copy(&s, nil) - assert.Equal(t, "", s) -} - -func TestCopyReader(t *testing.T) { - var buf io.Reader = bytes.NewReader([]byte("hello world")) - var r io.Reader - awsutil.Copy(&r, buf) - b, err := ioutil.ReadAll(r) - assert.NoError(t, err) - assert.Equal(t, []byte("hello world"), b) - - // empty bytes because this is not a deep copy - b, err = ioutil.ReadAll(buf) - assert.NoError(t, err) - assert.Equal(t, []byte(""), b) -} - -func TestCopyDifferentStructs(t *testing.T) { - type SrcFoo struct { - A int - B []*string - C map[string]*int - SrcUnique string - SameNameDiffType int - } - type DstFoo struct { - A int - B []*string - C map[string]*int - DstUnique int - SameNameDiffType string - } - - // Create the initial value - str1 := "hello" - str2 := "bye bye" - int1 := 1 - int2 := 2 - f1 := &SrcFoo{ - A: 1, - B: []*string{&str1, &str2}, - C: map[string]*int{ - "A": &int1, - "B": &int2, - }, - SrcUnique: "unique", - SameNameDiffType: 1, - } - - // Do the copy - var f2 DstFoo - awsutil.Copy(&f2, f1) - - // Values are equal - assert.Equal(t, f2.A, f1.A) - assert.Equal(t, f2.B, f1.B) - assert.Equal(t, f2.C, f1.C) - assert.Equal(t, "unique", f1.SrcUnique) - assert.Equal(t, 1, f1.SameNameDiffType) - assert.Equal(t, 0, f2.DstUnique) - assert.Equal(t, "", f2.SameNameDiffType) -} - -func ExampleCopyOf() { - type Foo struct { - A int - B []*string - } - - // Create the initial value - str1 := "hello" - str2 := "bye bye" - f1 := &Foo{A: 1, B: []*string{&str1, &str2}} - - // Do the copy - v := awsutil.CopyOf(f1) - var f2 *Foo = v.(*Foo) - - // Print the result - fmt.Println(awsutil.Prettify(f2)) - - // Output: - // { - // A: 1, - // B: ["hello","bye bye"] - // } -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go deleted file mode 100644 index 0da6b06..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package awsutil_test - -import ( - "testing" - - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/stretchr/testify/assert" -) - -type Struct struct { - A []Struct - z []Struct - B *Struct - D *Struct - C string -} - -var data = Struct{ - A: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}}, - z: []Struct{{C: "value1"}, {C: "value2"}, {C: "value3"}}, - B: &Struct{B: &Struct{C: "terminal"}, D: &Struct{C: "terminal2"}}, - C: "initial", -} - -func TestValueAtPathSuccess(t *testing.T) { - assert.Equal(t, []interface{}{"initial"}, awsutil.ValuesAtPath(data, "C")) - assert.Equal(t, []interface{}{"value1"}, awsutil.ValuesAtPath(data, "A[0].C")) - assert.Equal(t, []interface{}{"value2"}, awsutil.ValuesAtPath(data, "A[1].C")) - assert.Equal(t, []interface{}{"value3"}, awsutil.ValuesAtPath(data, "A[2].C")) - assert.Equal(t, []interface{}{"value3"}, awsutil.ValuesAtAnyPath(data, "a[2].c")) - assert.Equal(t, []interface{}{"value3"}, awsutil.ValuesAtPath(data, "A[-1].C")) - assert.Equal(t, []interface{}{"value1", "value2", "value3"}, awsutil.ValuesAtPath(data, "A[].C")) - assert.Equal(t, []interface{}{"terminal"}, awsutil.ValuesAtPath(data, "B . B . C")) - assert.Equal(t, []interface{}{"terminal", "terminal2"}, awsutil.ValuesAtPath(data, "B.*.C")) - assert.Equal(t, []interface{}{"initial"}, awsutil.ValuesAtPath(data, "A.D.X || C")) -} - -func TestValueAtPathFailure(t *testing.T) { - assert.Equal(t, []interface{}(nil), awsutil.ValuesAtPath(data, "C.x")) - assert.Equal(t, []interface{}(nil), awsutil.ValuesAtPath(data, ".x")) - assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(data, "X.Y.Z")) - assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(data, "A[100].C")) - assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(data, "A[3].C")) - assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(data, "B.B.C.Z")) - assert.Equal(t, []interface{}(nil), awsutil.ValuesAtPath(data, "z[-1].C")) - assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(nil, "A.B.C")) - assert.Equal(t, []interface{}{}, awsutil.ValuesAtPath(Struct{}, "A")) -} - -func TestSetValueAtPathSuccess(t *testing.T) { - var s Struct - awsutil.SetValueAtPath(&s, "C", "test1") - awsutil.SetValueAtPath(&s, "B.B.C", "test2") - awsutil.SetValueAtPath(&s, "B.D.C", "test3") - assert.Equal(t, "test1", s.C) - assert.Equal(t, "test2", s.B.B.C) - assert.Equal(t, "test3", s.B.D.C) - - awsutil.SetValueAtPath(&s, "B.*.C", "test0") - assert.Equal(t, "test0", s.B.B.C) - assert.Equal(t, "test0", s.B.D.C) - - var s2 Struct - awsutil.SetValueAtAnyPath(&s2, "b.b.c", "test0") - assert.Equal(t, "test0", s2.B.B.C) - awsutil.SetValueAtAnyPath(&s2, "A", []Struct{{}}) - assert.Equal(t, []Struct{{}}, s2.A) -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go deleted file mode 100644 index c27bdd3..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config.go +++ /dev/null @@ -1,254 +0,0 @@ -package aws - -import ( - "net/http" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws/credentials" -) - -// DefaultChainCredentials is a Credentials which will find the first available -// credentials Value from the list of Providers. -// -// This should be used in the default case. Once the type of credentials are -// known switching to the specific Credentials will be more efficient. -var DefaultChainCredentials = credentials.NewChainCredentials( - []credentials.Provider{ - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - &credentials.EC2RoleProvider{ExpiryWindow: 5 * time.Minute}, - }) - -// The default number of retries for a service. The value of -1 indicates that -// the service specific retry default will be used. -const DefaultRetries = -1 - -// DefaultConfig is the default all service configuration will be based off of. -// By default, all clients use this structure for initialization options unless -// a custom configuration object is passed in. -// -// You may modify this global structure to change all default configuration -// in the SDK. Note that configuration options are copied by value, so any -// modifications must happen before constructing a client. -var DefaultConfig = NewConfig(). - WithCredentials(DefaultChainCredentials). - WithRegion(os.Getenv("AWS_REGION")). - WithHTTPClient(http.DefaultClient). - WithMaxRetries(DefaultRetries). - WithLogger(NewDefaultLogger()). - WithLogLevel(LogOff) - -// A Config provides service configuration for service clients. By default, -// all clients will use the {DefaultConfig} structure. -type Config struct { - // The credentials object to use when signing requests. Defaults to - // {DefaultChainCredentials}. - Credentials *credentials.Credentials - - // An optional endpoint URL (hostname only or fully qualified URI) - // that overrides the default generated endpoint for a client. Set this - // to `""` to use the default generated endpoint. - // - // @note You must still provide a `Region` value when specifying an - // endpoint for a client. - Endpoint *string - - // The region to send requests to. This parameter is required and must - // be configured globally or on a per-client basis unless otherwise - // noted. A full list of regions is found in the "Regions and Endpoints" - // document. - // - // @see http://docs.aws.amazon.com/general/latest/gr/rande.html - // AWS Regions and Endpoints - Region *string - - // Set this to `true` to disable SSL when sending requests. Defaults - // to `false`. - DisableSSL *bool - - // The HTTP client to use when sending requests. Defaults to - // `http.DefaultClient`. - HTTPClient *http.Client - - // An integer value representing the logging level. The default log level - // is zero (LogOff), which represents no logging. To enable logging set - // to a LogLevel Value. - LogLevel *LogLevelType - - // The logger writer interface to write logging messages to. Defaults to - // standard out. - Logger Logger - - // The maximum number of times that a request will be retried for failures. - // Defaults to -1, which defers the max retry setting to the service specific - // configuration. - MaxRetries *int - - // Disables semantic parameter validation, which validates input for missing - // required fields and/or other semantic request input errors. - DisableParamValidation *bool - - // Disables the computation of request and response checksums, e.g., - // CRC32 checksums in Amazon DynamoDB. - DisableComputeChecksums *bool - - // Set this to `true` to force the request to use path-style addressing, - // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will - // use virtual hosted bucket addressing when possible - // (`http://BUCKET.s3.amazonaws.com/KEY`). - // - // @note This configuration option is specific to the Amazon S3 service. - // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html - // Amazon S3: Virtual Hosting of Buckets - S3ForcePathStyle *bool -} - -// NewConfig returns a new Config pointer that can be chained with builder methods to -// set multiple configuration values inline without using pointers. -// -// svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) -// -func NewConfig() *Config { - return &Config{} -} - -// WithCredentials sets a config Credentials value returning a Config pointer -// for chaining. -func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { - c.Credentials = creds - return c -} - -// WithEndpoint sets a config Endpoint value returning a Config pointer for -// chaining. -func (c *Config) WithEndpoint(endpoint string) *Config { - c.Endpoint = &endpoint - return c -} - -// WithRegion sets a config Region value returning a Config pointer for -// chaining. -func (c *Config) WithRegion(region string) *Config { - c.Region = ®ion - return c -} - -// WithDisableSSL sets a config DisableSSL value returning a Config pointer -// for chaining. -func (c *Config) WithDisableSSL(disable bool) *Config { - c.DisableSSL = &disable - return c -} - -// WithHTTPClient sets a config HTTPClient value returning a Config pointer -// for chaining. -func (c *Config) WithHTTPClient(client *http.Client) *Config { - c.HTTPClient = client - return c -} - -// WithMaxRetries sets a config MaxRetries value returning a Config pointer -// for chaining. -func (c *Config) WithMaxRetries(max int) *Config { - c.MaxRetries = &max - return c -} - -// WithDisableParamValidation sets a config DisableParamValidation value -// returning a Config pointer for chaining. -func (c *Config) WithDisableParamValidation(disable bool) *Config { - c.DisableParamValidation = &disable - return c -} - -// WithDisableComputeChecksums sets a config DisableComputeChecksums value -// returning a Config pointer for chaining. -func (c *Config) WithDisableComputeChecksums(disable bool) *Config { - c.DisableComputeChecksums = &disable - return c -} - -// WithLogLevel sets a config LogLevel value returning a Config pointer for -// chaining. -func (c *Config) WithLogLevel(level LogLevelType) *Config { - c.LogLevel = &level - return c -} - -// WithLogger sets a config Logger value returning a Config pointer for -// chaining. -func (c *Config) WithLogger(logger Logger) *Config { - c.Logger = logger - return c -} - -// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config -// pointer for chaining. -func (c *Config) WithS3ForcePathStyle(force bool) *Config { - c.S3ForcePathStyle = &force - return c -} - -// Merge returns a new Config with the other Config's attribute values merged into -// this Config. If the other Config's attribute is nil it will not be merged into -// the new Config to be returned. -func (c Config) Merge(other *Config) *Config { - if other == nil { - return &c - } - - dst := c - - if other.Credentials != nil { - dst.Credentials = other.Credentials - } - - if other.Endpoint != nil { - dst.Endpoint = other.Endpoint - } - - if other.Region != nil { - dst.Region = other.Region - } - - if other.DisableSSL != nil { - dst.DisableSSL = other.DisableSSL - } - - if other.HTTPClient != nil { - dst.HTTPClient = other.HTTPClient - } - - if other.LogLevel != nil { - dst.LogLevel = other.LogLevel - } - - if other.Logger != nil { - dst.Logger = other.Logger - } - - if other.MaxRetries != nil { - dst.MaxRetries = other.MaxRetries - } - - if other.DisableParamValidation != nil { - dst.DisableParamValidation = other.DisableParamValidation - } - - if other.DisableComputeChecksums != nil { - dst.DisableComputeChecksums = other.DisableComputeChecksums - } - - if other.S3ForcePathStyle != nil { - dst.S3ForcePathStyle = other.S3ForcePathStyle - } - - return &dst -} - -// Copy will return a shallow copy of the Config object. -func (c Config) Copy() *Config { - dst := c - return &dst -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config_test.go deleted file mode 100644 index ddf3f39..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/config_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package aws - -import ( - "net/http" - "reflect" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws/credentials" -) - -var testCredentials = credentials.NewChainCredentials([]credentials.Provider{ - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{ - Filename: "TestFilename", - Profile: "TestProfile"}, - &credentials.EC2RoleProvider{ExpiryWindow: 5 * time.Minute}, -}) - -var copyTestConfig = Config{ - Credentials: testCredentials, - Endpoint: String("CopyTestEndpoint"), - Region: String("COPY_TEST_AWS_REGION"), - DisableSSL: Bool(true), - HTTPClient: http.DefaultClient, - LogLevel: LogLevel(LogDebug), - Logger: NewDefaultLogger(), - MaxRetries: Int(DefaultRetries), - DisableParamValidation: Bool(true), - DisableComputeChecksums: Bool(true), - S3ForcePathStyle: Bool(true), -} - -func TestCopy(t *testing.T) { - want := copyTestConfig - got := copyTestConfig.Copy() - if !reflect.DeepEqual(*got, want) { - t.Errorf("Copy() = %+v", got) - t.Errorf(" want %+v", want) - } -} - -func TestCopyReturnsNewInstance(t *testing.T) { - want := copyTestConfig - got := copyTestConfig.Copy() - if got == &want { - t.Errorf("Copy() = %p; want different instance as source %p", got, &want) - } -} - -var mergeTestZeroValueConfig = Config{} - -var mergeTestConfig = Config{ - Credentials: testCredentials, - Endpoint: String("MergeTestEndpoint"), - Region: String("MERGE_TEST_AWS_REGION"), - DisableSSL: Bool(true), - HTTPClient: http.DefaultClient, - LogLevel: LogLevel(LogDebug), - Logger: NewDefaultLogger(), - MaxRetries: Int(10), - DisableParamValidation: Bool(true), - DisableComputeChecksums: Bool(true), - S3ForcePathStyle: Bool(true), -} - -var mergeTests = []struct { - cfg *Config - in *Config - want *Config -}{ - {&Config{}, nil, &Config{}}, - {&Config{}, &mergeTestZeroValueConfig, &Config{}}, - {&Config{}, &mergeTestConfig, &mergeTestConfig}, -} - -func TestMerge(t *testing.T) { - for i, tt := range mergeTests { - got := tt.cfg.Merge(tt.in) - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("Config %d %+v", i, tt.cfg) - t.Errorf(" Merge(%+v)", tt.in) - t.Errorf(" got %+v", got) - t.Errorf(" want %+v", tt.want) - } - } -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convutil_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convutil_test.go deleted file mode 100644 index 6d06cd3..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convutil_test.go +++ /dev/null @@ -1,438 +0,0 @@ -package aws_test - -import ( - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/stretchr/testify/assert" -) - -var testCasesStringSlice = [][]string{ - {"a", "b", "c", "d", "e"}, - {"a", "b", "", "", "e"}, -} - -func TestStringSlice(t *testing.T) { - for idx, in := range testCasesStringSlice { - if in == nil { - continue - } - out := aws.StringSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := aws.StringValueSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesStringValueSlice = [][]*string{ - {aws.String("a"), aws.String("b"), nil, aws.String("c")}, -} - -func TestStringValueSlice(t *testing.T) { - for idx, in := range testCasesStringValueSlice { - if in == nil { - continue - } - out := aws.StringValueSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - if in[i] == nil { - assert.Empty(t, out[i], "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) - } - } - - out2 := aws.StringSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - for i := range out2 { - if in[i] == nil { - assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) - } - } - } -} - -var testCasesStringMap = []map[string]string{ - {"a": "1", "b": "2", "c": "3"}, -} - -func TestStringMap(t *testing.T) { - for idx, in := range testCasesStringMap { - if in == nil { - continue - } - out := aws.StringMap(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := aws.StringValueMap(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesBoolSlice = [][]bool{ - {true, true, false, false}, -} - -func TestBoolSlice(t *testing.T) { - for idx, in := range testCasesBoolSlice { - if in == nil { - continue - } - out := aws.BoolSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := aws.BoolValueSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesBoolValueSlice = [][]*bool{} - -func TestBoolValueSlice(t *testing.T) { - for idx, in := range testCasesBoolValueSlice { - if in == nil { - continue - } - out := aws.BoolValueSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - if in[i] == nil { - assert.Empty(t, out[i], "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) - } - } - - out2 := aws.BoolSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - for i := range out2 { - if in[i] == nil { - assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) - } - } - } -} - -var testCasesBoolMap = []map[string]bool{ - {"a": true, "b": false, "c": true}, -} - -func TestBoolMap(t *testing.T) { - for idx, in := range testCasesBoolMap { - if in == nil { - continue - } - out := aws.BoolMap(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := aws.BoolValueMap(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesIntSlice = [][]int{ - {1, 2, 3, 4}, -} - -func TestIntSlice(t *testing.T) { - for idx, in := range testCasesIntSlice { - if in == nil { - continue - } - out := aws.IntSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := aws.IntValueSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesIntValueSlice = [][]*int{} - -func TestIntValueSlice(t *testing.T) { - for idx, in := range testCasesIntValueSlice { - if in == nil { - continue - } - out := aws.IntValueSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - if in[i] == nil { - assert.Empty(t, out[i], "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) - } - } - - out2 := aws.IntSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - for i := range out2 { - if in[i] == nil { - assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) - } - } - } -} - -var testCasesIntMap = []map[string]int{ - {"a": 3, "b": 2, "c": 1}, -} - -func TestIntMap(t *testing.T) { - for idx, in := range testCasesIntMap { - if in == nil { - continue - } - out := aws.IntMap(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := aws.IntValueMap(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesInt64Slice = [][]int64{ - {1, 2, 3, 4}, -} - -func TestInt64Slice(t *testing.T) { - for idx, in := range testCasesInt64Slice { - if in == nil { - continue - } - out := aws.Int64Slice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := aws.Int64ValueSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesInt64ValueSlice = [][]*int64{} - -func TestInt64ValueSlice(t *testing.T) { - for idx, in := range testCasesInt64ValueSlice { - if in == nil { - continue - } - out := aws.Int64ValueSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - if in[i] == nil { - assert.Empty(t, out[i], "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) - } - } - - out2 := aws.Int64Slice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - for i := range out2 { - if in[i] == nil { - assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) - } - } - } -} - -var testCasesInt64Map = []map[string]int64{ - {"a": 3, "b": 2, "c": 1}, -} - -func TestInt64Map(t *testing.T) { - for idx, in := range testCasesInt64Map { - if in == nil { - continue - } - out := aws.Int64Map(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := aws.Int64ValueMap(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesFloat64Slice = [][]float64{ - {1, 2, 3, 4}, -} - -func TestFloat64Slice(t *testing.T) { - for idx, in := range testCasesFloat64Slice { - if in == nil { - continue - } - out := aws.Float64Slice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := aws.Float64ValueSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesFloat64ValueSlice = [][]*float64{} - -func TestFloat64ValueSlice(t *testing.T) { - for idx, in := range testCasesFloat64ValueSlice { - if in == nil { - continue - } - out := aws.Float64ValueSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - if in[i] == nil { - assert.Empty(t, out[i], "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) - } - } - - out2 := aws.Float64Slice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - for i := range out2 { - if in[i] == nil { - assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) - } - } - } -} - -var testCasesFloat64Map = []map[string]float64{ - {"a": 3, "b": 2, "c": 1}, -} - -func TestFloat64Map(t *testing.T) { - for idx, in := range testCasesFloat64Map { - if in == nil { - continue - } - out := aws.Float64Map(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := aws.Float64ValueMap(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesTimeSlice = [][]time.Time{ - {time.Now(), time.Now().AddDate(100, 0, 0)}, -} - -func TestTimeSlice(t *testing.T) { - for idx, in := range testCasesTimeSlice { - if in == nil { - continue - } - out := aws.TimeSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := aws.TimeValueSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} - -var testCasesTimeValueSlice = [][]*time.Time{} - -func TestTimeValueSlice(t *testing.T) { - for idx, in := range testCasesTimeValueSlice { - if in == nil { - continue - } - out := aws.TimeValueSlice(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - if in[i] == nil { - assert.Empty(t, out[i], "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, *(in[i]), out[i], "Unexpected value at idx %d", idx) - } - } - - out2 := aws.TimeSlice(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - for i := range out2 { - if in[i] == nil { - assert.Empty(t, *(out2[i]), "Unexpected value at idx %d", idx) - } else { - assert.Equal(t, in[i], out2[i], "Unexpected value at idx %d", idx) - } - } - } -} - -var testCasesTimeMap = []map[string]time.Time{ - {"a": time.Now().AddDate(-100, 0, 0), "b": time.Now()}, -} - -func TestTimeMap(t *testing.T) { - for idx, in := range testCasesTimeMap { - if in == nil { - continue - } - out := aws.TimeMap(in) - assert.Len(t, out, len(in), "Unexpected len at idx %d", idx) - for i := range out { - assert.Equal(t, in[i], *(out[i]), "Unexpected value at idx %d", idx) - } - - out2 := aws.TimeValueMap(out) - assert.Len(t, out2, len(in), "Unexpected len at idx %d", idx) - assert.Equal(t, in, out2, "Unexpected value at idx %d", idx) - } -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go deleted file mode 100644 index 4fba22f..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package credentials - -import ( - "testing" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/stretchr/testify/assert" -) - -func TestChainProviderGet(t *testing.T) { - p := &ChainProvider{ - Providers: []Provider{ - &stubProvider{err: awserr.New("FirstError", "first provider error", nil)}, - &stubProvider{err: awserr.New("SecondError", "second provider error", nil)}, - &stubProvider{ - creds: Value{ - AccessKeyID: "AKID", - SecretAccessKey: "SECRET", - SessionToken: "", - }, - }, - }, - } - - creds, err := p.Retrieve() - assert.Nil(t, err, "Expect no error") - assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match") - assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match") - assert.Empty(t, creds.SessionToken, "Expect session token to be empty") -} - -func TestChainProviderIsExpired(t *testing.T) { - stubProvider := &stubProvider{expired: true} - p := &ChainProvider{ - Providers: []Provider{ - stubProvider, - }, - } - - assert.True(t, p.IsExpired(), "Expect expired to be true before any Retrieve") - _, err := p.Retrieve() - assert.Nil(t, err, "Expect no error") - assert.False(t, p.IsExpired(), "Expect not expired after retrieve") - - stubProvider.expired = true - assert.True(t, p.IsExpired(), "Expect return of expired provider") - - _, err = p.Retrieve() - assert.False(t, p.IsExpired(), "Expect not expired after retrieve") -} - -func TestChainProviderWithNoProvider(t *testing.T) { - p := &ChainProvider{ - Providers: []Provider{}, - } - - assert.True(t, p.IsExpired(), "Expect expired with no providers") - _, err := p.Retrieve() - assert.Equal(t, ErrNoValidProvidersFoundInChain, err, "Expect no providers error returned") -} - -func TestChainProviderWithNoValidProvider(t *testing.T) { - p := &ChainProvider{ - Providers: []Provider{ - &stubProvider{err: awserr.New("FirstError", "first provider error", nil)}, - &stubProvider{err: awserr.New("SecondError", "second provider error", nil)}, - }, - } - - assert.True(t, p.IsExpired(), "Expect expired with no providers") - _, err := p.Retrieve() - assert.Equal(t, ErrNoValidProvidersFoundInChain, err, "Expect no providers error returned") -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go deleted file mode 100644 index 99c2b47..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package credentials - -import ( - "testing" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/stretchr/testify/assert" -) - -type stubProvider struct { - creds Value - expired bool - err error -} - -func (s *stubProvider) Retrieve() (Value, error) { - s.expired = false - return s.creds, s.err -} -func (s *stubProvider) IsExpired() bool { - return s.expired -} - -func TestCredentialsGet(t *testing.T) { - c := NewCredentials(&stubProvider{ - creds: Value{ - AccessKeyID: "AKID", - SecretAccessKey: "SECRET", - SessionToken: "", - }, - expired: true, - }) - - creds, err := c.Get() - assert.Nil(t, err, "Expected no error") - assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match") - assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match") - assert.Empty(t, creds.SessionToken, "Expect session token to be empty") -} - -func TestCredentialsGetWithError(t *testing.T) { - c := NewCredentials(&stubProvider{err: awserr.New("provider error", "", nil), expired: true}) - - _, err := c.Get() - assert.Equal(t, "provider error", err.(awserr.Error).Code(), "Expected provider error") -} - -func TestCredentialsExpire(t *testing.T) { - stub := &stubProvider{} - c := NewCredentials(stub) - - stub.expired = false - assert.True(t, c.IsExpired(), "Expected to start out expired") - c.Expire() - assert.True(t, c.IsExpired(), "Expected to be expired") - - c.forceRefresh = false - assert.False(t, c.IsExpired(), "Expected not to be expired") - - stub.expired = true - assert.True(t, c.IsExpired(), "Expected to be expired") -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2_role_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2_role_provider.go deleted file mode 100644 index 0eecbe3..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2_role_provider.go +++ /dev/null @@ -1,162 +0,0 @@ -package credentials - -import ( - "bufio" - "encoding/json" - "fmt" - "net/http" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -const metadataCredentialsEndpoint = "http://169.254.169.254/latest/meta-data/iam/security-credentials/" - -// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if -// those credentials are expired. -// -// Example how to configure the EC2RoleProvider with custom http Client, Endpoint -// or ExpiryWindow -// -// p := &credentials.EC2RoleProvider{ -// // Pass in a custom timeout to be used when requesting -// // IAM EC2 Role credentials. -// Client: &http.Client{ -// Timeout: 10 * time.Second, -// }, -// // Use default EC2 Role metadata endpoint, Alternate endpoints can be -// // specified setting Endpoint to something else. -// Endpoint: "", -// // Do not use early expiry of credentials. If a non zero value is -// // specified the credentials will be expired early -// ExpiryWindow: 0, -// } -type EC2RoleProvider struct { - Expiry - - // Endpoint must be fully quantified URL - Endpoint string - - // HTTP client to use when connecting to EC2 service - Client *http.Client - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration -} - -// NewEC2RoleCredentials returns a pointer to a new Credentials object -// wrapping the EC2RoleProvider. -// -// Takes a custom http.Client which can be configured for custom handling of -// things such as timeout. -// -// Endpoint is the URL that the EC2RoleProvider will connect to when retrieving -// role and credentials. -// -// Window is the expiry window that will be subtracted from the expiry returned -// by the role credential request. This is done so that the credentials will -// expire sooner than their actual lifespan. -func NewEC2RoleCredentials(client *http.Client, endpoint string, window time.Duration) *Credentials { - return NewCredentials(&EC2RoleProvider{ - Endpoint: endpoint, - Client: client, - ExpiryWindow: window, - }) -} - -// Retrieve retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired credentials. -func (m *EC2RoleProvider) Retrieve() (Value, error) { - if m.Client == nil { - m.Client = http.DefaultClient - } - if m.Endpoint == "" { - m.Endpoint = metadataCredentialsEndpoint - } - - credsList, err := requestCredList(m.Client, m.Endpoint) - if err != nil { - return Value{}, err - } - - if len(credsList) == 0 { - return Value{}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) - } - credsName := credsList[0] - - roleCreds, err := requestCred(m.Client, m.Endpoint, credsName) - if err != nil { - return Value{}, err - } - - m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) - - return Value{ - AccessKeyID: roleCreds.AccessKeyID, - SecretAccessKey: roleCreds.SecretAccessKey, - SessionToken: roleCreds.Token, - }, nil -} - -// A ec2RoleCredRespBody provides the shape for deserializing credential -// request responses. -type ec2RoleCredRespBody struct { - Expiration time.Time - AccessKeyID string - SecretAccessKey string - Token string -} - -// requestCredList requests a list of credentials from the EC2 service. -// If there are no credentials, or there is an error making or receiving the request -func requestCredList(client *http.Client, endpoint string) ([]string, error) { - resp, err := client.Get(endpoint) - if err != nil { - return nil, awserr.New("ListEC2Role", "failed to list EC2 Roles", err) - } - defer resp.Body.Close() - - credsList := []string{} - s := bufio.NewScanner(resp.Body) - for s.Scan() { - credsList = append(credsList, s.Text()) - } - - if err := s.Err(); err != nil { - return nil, awserr.New("ReadEC2Role", "failed to read list of EC2 Roles", err) - } - - return credsList, nil -} - -// requestCred requests the credentials for a specific credentials from the EC2 service. -// -// If the credentials cannot be found, or there is an error reading the response -// and error will be returned. -func requestCred(client *http.Client, endpoint, credsName string) (*ec2RoleCredRespBody, error) { - resp, err := client.Get(endpoint + credsName) - if err != nil { - return nil, awserr.New("GetEC2RoleCredentials", - fmt.Sprintf("failed to get %s EC2 Role credentials", credsName), - err) - } - defer resp.Body.Close() - - respCreds := &ec2RoleCredRespBody{} - if err := json.NewDecoder(resp.Body).Decode(respCreds); err != nil { - return nil, awserr.New("DecodeEC2RoleCredentials", - fmt.Sprintf("failed to decode %s EC2 Role credentials", credsName), - err) - } - - return respCreds, nil -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2_role_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2_role_provider_test.go deleted file mode 100644 index da1549a..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/ec2_role_provider_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package credentials - -import ( - "fmt" - "github.com/stretchr/testify/assert" - "net/http" - "net/http/httptest" - "testing" - "time" -) - -func initTestServer(expireOn string) *httptest.Server { - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.RequestURI == "/" { - fmt.Fprintln(w, "/creds") - } else { - fmt.Fprintf(w, `{ - "AccessKeyId" : "accessKey", - "SecretAccessKey" : "secret", - "Token" : "token", - "Expiration" : "%s" -}`, expireOn) - } - })) - - return server -} - -func TestEC2RoleProvider(t *testing.T) { - server := initTestServer("2014-12-16T01:51:37Z") - defer server.Close() - - p := &EC2RoleProvider{Client: http.DefaultClient, Endpoint: server.URL} - - creds, err := p.Retrieve() - assert.Nil(t, err, "Expect no error") - - assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") - assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") - assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") -} - -func TestEC2RoleProviderIsExpired(t *testing.T) { - server := initTestServer("2014-12-16T01:51:37Z") - defer server.Close() - - p := &EC2RoleProvider{Client: http.DefaultClient, Endpoint: server.URL} - p.CurrentTime = func() time.Time { - return time.Date(2014, 12, 15, 21, 26, 0, 0, time.UTC) - } - - assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.") - - _, err := p.Retrieve() - assert.Nil(t, err, "Expect no error") - - assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.") - - p.CurrentTime = func() time.Time { - return time.Date(3014, 12, 15, 21, 26, 0, 0, time.UTC) - } - - assert.True(t, p.IsExpired(), "Expect creds to be expired.") -} - -func TestEC2RoleProviderExpiryWindowIsExpired(t *testing.T) { - server := initTestServer("2014-12-16T01:51:37Z") - defer server.Close() - - p := &EC2RoleProvider{Client: http.DefaultClient, Endpoint: server.URL, ExpiryWindow: time.Hour * 1} - p.CurrentTime = func() time.Time { - return time.Date(2014, 12, 15, 0, 51, 37, 0, time.UTC) - } - - assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve.") - - _, err := p.Retrieve() - assert.Nil(t, err, "Expect no error") - - assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve.") - - p.CurrentTime = func() time.Time { - return time.Date(2014, 12, 16, 0, 55, 37, 0, time.UTC) - } - - assert.True(t, p.IsExpired(), "Expect creds to be expired.") -} - -func BenchmarkEC2RoleProvider(b *testing.B) { - server := initTestServer("2014-12-16T01:51:37Z") - defer server.Close() - - p := &EC2RoleProvider{Client: http.DefaultClient, Endpoint: server.URL} - _, err := p.Retrieve() - if err != nil { - b.Fatal(err) - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - _, err := p.Retrieve() - if err != nil { - b.Fatal(err) - } - } - }) -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go deleted file mode 100644 index 53f6ce2..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package credentials - -import ( - "github.com/stretchr/testify/assert" - "os" - "testing" -) - -func TestEnvProviderRetrieve(t *testing.T) { - os.Clearenv() - os.Setenv("AWS_ACCESS_KEY_ID", "access") - os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") - os.Setenv("AWS_SESSION_TOKEN", "token") - - e := EnvProvider{} - creds, err := e.Retrieve() - assert.Nil(t, err, "Expect no error") - - assert.Equal(t, "access", creds.AccessKeyID, "Expect access key ID to match") - assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") - assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") -} - -func TestEnvProviderIsExpired(t *testing.T) { - os.Clearenv() - os.Setenv("AWS_ACCESS_KEY_ID", "access") - os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") - os.Setenv("AWS_SESSION_TOKEN", "token") - - e := EnvProvider{} - - assert.True(t, e.IsExpired(), "Expect creds to be expired before retrieve.") - - _, err := e.Retrieve() - assert.Nil(t, err, "Expect no error") - - assert.False(t, e.IsExpired(), "Expect creds to not be expired after retrieve.") -} - -func TestEnvProviderNoAccessKeyID(t *testing.T) { - os.Clearenv() - os.Setenv("AWS_SECRET_ACCESS_KEY", "secret") - - e := EnvProvider{} - creds, err := e.Retrieve() - assert.Equal(t, ErrAccessKeyIDNotFound, err, "ErrAccessKeyIDNotFound expected, but was %#v error: %#v", creds, err) -} - -func TestEnvProviderNoSecretAccessKey(t *testing.T) { - os.Clearenv() - os.Setenv("AWS_ACCESS_KEY_ID", "access") - - e := EnvProvider{} - creds, err := e.Retrieve() - assert.Equal(t, ErrSecretAccessKeyNotFound, err, "ErrSecretAccessKeyNotFound expected, but was %#v error: %#v", creds, err) -} - -func TestEnvProviderAlternateNames(t *testing.T) { - os.Clearenv() - os.Setenv("AWS_ACCESS_KEY", "access") - os.Setenv("AWS_SECRET_KEY", "secret") - - e := EnvProvider{} - creds, err := e.Retrieve() - assert.Nil(t, err, "Expect no error") - - assert.Equal(t, "access", creds.AccessKeyID, "Expected access key ID") - assert.Equal(t, "secret", creds.SecretAccessKey, "Expected secret access key") - assert.Empty(t, creds.SessionToken, "Expected no token") -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go deleted file mode 100644 index 3621d56..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package credentials - -import ( - "github.com/stretchr/testify/assert" - "os" - "testing" -) - -func TestSharedCredentialsProvider(t *testing.T) { - os.Clearenv() - - p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""} - creds, err := p.Retrieve() - assert.Nil(t, err, "Expect no error") - - assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") - assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") - assert.Equal(t, "token", creds.SessionToken, "Expect session token to match") -} - -func TestSharedCredentialsProviderIsExpired(t *testing.T) { - os.Clearenv() - - p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""} - - assert.True(t, p.IsExpired(), "Expect creds to be expired before retrieve") - - _, err := p.Retrieve() - assert.Nil(t, err, "Expect no error") - - assert.False(t, p.IsExpired(), "Expect creds to not be expired after retrieve") -} - -func TestSharedCredentialsProviderWithAWS_PROFILE(t *testing.T) { - os.Clearenv() - os.Setenv("AWS_PROFILE", "no_token") - - p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""} - creds, err := p.Retrieve() - assert.Nil(t, err, "Expect no error") - - assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") - assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") - assert.Empty(t, creds.SessionToken, "Expect no token") -} - -func TestSharedCredentialsProviderWithoutTokenFromProfile(t *testing.T) { - os.Clearenv() - - p := SharedCredentialsProvider{Filename: "example.ini", Profile: "no_token"} - creds, err := p.Retrieve() - assert.Nil(t, err, "Expect no error") - - assert.Equal(t, "accessKey", creds.AccessKeyID, "Expect access key ID to match") - assert.Equal(t, "secret", creds.SecretAccessKey, "Expect secret access key to match") - assert.Empty(t, creds.SessionToken, "Expect no token") -} - -func BenchmarkSharedCredentialsProvider(b *testing.B) { - os.Clearenv() - - p := SharedCredentialsProvider{Filename: "example.ini", Profile: ""} - _, err := p.Retrieve() - if err != nil { - b.Fatal(err) - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - _, err := p.Retrieve() - if err != nil { - b.Fatal(err) - } - } - }) -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go deleted file mode 100644 index ea01236..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package credentials - -import ( - "github.com/stretchr/testify/assert" - "testing" -) - -func TestStaticProviderGet(t *testing.T) { - s := StaticProvider{ - Value: Value{ - AccessKeyID: "AKID", - SecretAccessKey: "SECRET", - SessionToken: "", - }, - } - - creds, err := s.Retrieve() - assert.Nil(t, err, "Expect no error") - assert.Equal(t, "AKID", creds.AccessKeyID, "Expect access key ID to match") - assert.Equal(t, "SECRET", creds.SecretAccessKey, "Expect secret access key to match") - assert.Empty(t, creds.SessionToken, "Expect no session token") -} - -func TestStaticProviderIsExpired(t *testing.T) { - s := StaticProvider{ - Value: Value{ - AccessKeyID: "AKID", - SecretAccessKey: "SECRET", - SessionToken: "", - }, - } - - assert.False(t, s.IsExpired(), "Expect static credentials to never expire") -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go deleted file mode 100644 index 80890b9..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go +++ /dev/null @@ -1,120 +0,0 @@ -// Package stscreds are credential Providers to retrieve STS AWS credentials. -// -// STS provides multiple ways to retrieve credentials which can be used when making -// future AWS service API operation calls. -package stscreds - -import ( - "fmt" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/service/sts" - "time" -) - -// AssumeRoler represents the minimal subset of the STS client API used by this provider. -type AssumeRoler interface { - AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) -} - -// AssumeRoleProvider retrieves temporary credentials from the STS service, and -// keeps track of their expiration time. This provider must be used explicitly, -// as it is not included in the credentials chain. -// -// Example how to configure a service to use this provider: -// -// config := &aws.Config{ -// Credentials: stscreds.NewCredentials(nil, "arn-of-the-role-to-assume", 10*time.Second), -// }) -// // Use config for creating your AWS service. -// -// Example how to obtain customised credentials: -// -// provider := &stscreds.Provider{ -// // Extend the duration to 1 hour. -// Duration: time.Hour, -// // Custom role name. -// RoleSessionName: "custom-session-name", -// } -// creds := credentials.NewCredentials(provider) -// -type AssumeRoleProvider struct { - credentials.Expiry - - // Custom STS client. If not set the default STS client will be used. - Client AssumeRoler - - // Role to be assumed. - RoleARN string - - // Session name, if you wish to reuse the credentials elsewhere. - RoleSessionName string - - // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. - Duration time.Duration - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration -} - -// NewCredentials returns a pointer to a new Credentials object wrapping the -// AssumeRoleProvider. The credentials will expire every 15 minutes and the -// role will be named after a nanosecond timestamp of this operation. -// -// The sts and roleARN parameters are used for building the "AssumeRole" call. -// Pass nil as sts to use the default client. -// -// Window is the expiry window that will be subtracted from the expiry returned -// by the role credential request. This is done so that the credentials will -// expire sooner than their actual lifespan. -func NewCredentials(client AssumeRoler, roleARN string, window time.Duration) *credentials.Credentials { - return credentials.NewCredentials(&AssumeRoleProvider{ - Client: client, - RoleARN: roleARN, - ExpiryWindow: window, - }) -} - -// Retrieve generates a new set of temporary credentials using STS. -func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { - - // Apply defaults where parameters are not set. - if p.Client == nil { - p.Client = sts.New(nil) - } - if p.RoleSessionName == "" { - // Try to work out a role name that will hopefully end up unique. - p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) - } - if p.Duration == 0 { - // Expire as often as AWS permits. - p.Duration = 15 * time.Minute - } - - roleOutput, err := p.Client.AssumeRole(&sts.AssumeRoleInput{ - DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), - RoleARN: aws.String(p.RoleARN), - RoleSessionName: aws.String(p.RoleSessionName), - }) - - if err != nil { - return credentials.Value{}, err - } - - // We will proactively generate new credentials before they expire. - p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) - - return credentials.Value{ - AccessKeyID: *roleOutput.Credentials.AccessKeyID, - SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, - SessionToken: *roleOutput.Credentials.SessionToken, - }, nil -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go deleted file mode 100644 index 98b7690..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package stscreds - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/sts" - "github.com/stretchr/testify/assert" - "testing" - "time" -) - -type stubSTS struct { -} - -func (s *stubSTS) AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) { - expiry := time.Now().Add(60 * time.Minute) - return &sts.AssumeRoleOutput{ - Credentials: &sts.Credentials{ - // Just reflect the role arn to the provider. - AccessKeyID: input.RoleARN, - SecretAccessKey: aws.String("assumedSecretAccessKey"), - SessionToken: aws.String("assumedSessionToken"), - Expiration: &expiry, - }, - }, nil -} - -func TestAssumeRoleProvider(t *testing.T) { - stub := &stubSTS{} - p := &AssumeRoleProvider{ - Client: stub, - RoleARN: "roleARN", - } - - creds, err := p.Retrieve() - assert.Nil(t, err, "Expect no error") - - assert.Equal(t, "roleARN", creds.AccessKeyID, "Expect access key ID to be reflected role ARN") - assert.Equal(t, "assumedSecretAccessKey", creds.SecretAccessKey, "Expect secret access key to match") - assert.Equal(t, "assumedSessionToken", creds.SessionToken, "Expect session token to match") -} - -func BenchmarkAssumeRoleProvider(b *testing.B) { - stub := &stubSTS{} - p := &AssumeRoleProvider{ - Client: stub, - RoleARN: "roleARN", - } - - b.ResetTimer() - b.RunParallel(func(pb *testing.PB) { - for pb.Next() { - _, err := p.Retrieve() - if err != nil { - b.Fatal(err) - } - } - }) -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handler_functions.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handler_functions.go deleted file mode 100644 index 45ae880..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handler_functions.go +++ /dev/null @@ -1,157 +0,0 @@ -package aws - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "regexp" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -var sleepDelay = func(delay time.Duration) { - time.Sleep(delay) -} - -// Interface for matching types which also have a Len method. -type lener interface { - Len() int -} - -// BuildContentLength builds the content length of a request based on the body, -// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable -// to determine request body length and no "Content-Length" was specified it will panic. -func BuildContentLength(r *Request) { - if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { - length, _ := strconv.ParseInt(slength, 10, 64) - r.HTTPRequest.ContentLength = length - return - } - - var length int64 - switch body := r.Body.(type) { - case nil: - length = 0 - case lener: - length = int64(body.Len()) - case io.Seeker: - r.bodyStart, _ = body.Seek(0, 1) - end, _ := body.Seek(0, 2) - body.Seek(r.bodyStart, 0) // make sure to seek back to original location - length = end - r.bodyStart - default: - panic("Cannot get length of body, must provide `ContentLength`") - } - - r.HTTPRequest.ContentLength = length - r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) -} - -// UserAgentHandler is a request handler for injecting User agent into requests. -func UserAgentHandler(r *Request) { - r.HTTPRequest.Header.Set("User-Agent", SDKName+"/"+SDKVersion) -} - -var reStatusCode = regexp.MustCompile(`^(\d+)`) - -// SendHandler is a request handler to send service request using HTTP client. -func SendHandler(r *Request) { - var err error - r.HTTPResponse, err = r.Service.Config.HTTPClient.Do(r.HTTPRequest) - if err != nil { - // Capture the case where url.Error is returned for error processing - // response. e.g. 301 without location header comes back as string - // error and r.HTTPResponse is nil. Other url redirect errors will - // comeback in a similar method. - if e, ok := err.(*url.Error); ok { - if s := reStatusCode.FindStringSubmatch(e.Error()); s != nil { - code, _ := strconv.ParseInt(s[1], 10, 64) - r.HTTPResponse = &http.Response{ - StatusCode: int(code), - Status: http.StatusText(int(code)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - return - } - } - if r.HTTPRequest == nil { - // Add a dummy request response object to ensure the HTTPResponse - // value is consistent. - r.HTTPResponse = &http.Response{ - StatusCode: int(0), - Status: http.StatusText(int(0)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - } - // Catch all other request errors. - r.Error = awserr.New("RequestError", "send request failed", err) - r.Retryable = Bool(true) // network errors are retryable - } -} - -// ValidateResponseHandler is a request handler to validate service response. -func ValidateResponseHandler(r *Request) { - if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { - // this may be replaced by an UnmarshalError handler - r.Error = awserr.New("UnknownError", "unknown error", nil) - } -} - -// AfterRetryHandler performs final checks to determine if the request should -// be retried and how long to delay. -func AfterRetryHandler(r *Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil { - r.Retryable = Bool(r.Service.ShouldRetry(r)) - } - - if r.WillRetry() { - r.RetryDelay = r.Service.RetryRules(r) - sleepDelay(r.RetryDelay) - - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - if isCodeExpiredCreds(err.Code()) { - r.Config.Credentials.Expire() - } - } - } - - r.RetryCount++ - r.Error = nil - } -} - -var ( - // ErrMissingRegion is an error that is returned if region configuration is - // not found. - // - // @readonly - ErrMissingRegion error = awserr.New("MissingRegion", "could not find region configuration", nil) - - // ErrMissingEndpoint is an error that is returned if an endpoint cannot be - // resolved for a service. - // - // @readonly - ErrMissingEndpoint error = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) -) - -// ValidateEndpointHandler is a request handler to validate a request had the -// appropriate Region and Endpoint set. Will set r.Error if the endpoint or -// region is not valid. -func ValidateEndpointHandler(r *Request) { - if r.Service.SigningRegion == "" && StringValue(r.Service.Config.Region) == "" { - r.Error = ErrMissingRegion - } else if r.Service.Endpoint == "" { - r.Error = ErrMissingEndpoint - } -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handler_functions_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handler_functions_test.go deleted file mode 100644 index bc6a8f2..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handler_functions_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package aws - -import ( - "net/http" - "os" - "testing" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/stretchr/testify/assert" -) - -func TestValidateEndpointHandler(t *testing.T) { - os.Clearenv() - svc := NewService(NewConfig().WithRegion("us-west-2")) - svc.Handlers.Clear() - svc.Handlers.Validate.PushBack(ValidateEndpointHandler) - - req := NewRequest(svc, &Operation{Name: "Operation"}, nil, nil) - err := req.Build() - - assert.NoError(t, err) -} - -func TestValidateEndpointHandlerErrorRegion(t *testing.T) { - os.Clearenv() - svc := NewService(nil) - svc.Handlers.Clear() - svc.Handlers.Validate.PushBack(ValidateEndpointHandler) - - req := NewRequest(svc, &Operation{Name: "Operation"}, nil, nil) - err := req.Build() - - assert.Error(t, err) - assert.Equal(t, ErrMissingRegion, err) -} - -type mockCredsProvider struct { - expired bool - retrieveCalled bool -} - -func (m *mockCredsProvider) Retrieve() (credentials.Value, error) { - m.retrieveCalled = true - return credentials.Value{}, nil -} - -func (m *mockCredsProvider) IsExpired() bool { - return m.expired -} - -func TestAfterRetryRefreshCreds(t *testing.T) { - os.Clearenv() - credProvider := &mockCredsProvider{} - svc := NewService(&Config{Credentials: credentials.NewCredentials(credProvider), MaxRetries: Int(1)}) - - svc.Handlers.Clear() - svc.Handlers.ValidateResponse.PushBack(func(r *Request) { - r.Error = awserr.New("UnknownError", "", nil) - r.HTTPResponse = &http.Response{StatusCode: 400} - }) - svc.Handlers.UnmarshalError.PushBack(func(r *Request) { - r.Error = awserr.New("ExpiredTokenException", "", nil) - }) - svc.Handlers.AfterRetry.PushBack(func(r *Request) { - AfterRetryHandler(r) - }) - - assert.True(t, svc.Config.Credentials.IsExpired(), "Expect to start out expired") - assert.False(t, credProvider.retrieveCalled) - - req := NewRequest(svc, &Operation{Name: "Operation"}, nil, nil) - req.Send() - - assert.True(t, svc.Config.Credentials.IsExpired()) - assert.False(t, credProvider.retrieveCalled) - - _, err := svc.Config.Credentials.Get() - assert.NoError(t, err) - assert.True(t, credProvider.retrieveCalled) -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handlers.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handlers.go deleted file mode 100644 index 1968cb9..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handlers.go +++ /dev/null @@ -1,85 +0,0 @@ -package aws - -// A Handlers provides a collection of request handlers for various -// stages of handling requests. -type Handlers struct { - Validate HandlerList - Build HandlerList - Sign HandlerList - Send HandlerList - ValidateResponse HandlerList - Unmarshal HandlerList - UnmarshalMeta HandlerList - UnmarshalError HandlerList - Retry HandlerList - AfterRetry HandlerList -} - -// copy returns of this handler's lists. -func (h *Handlers) copy() Handlers { - return Handlers{ - Validate: h.Validate.copy(), - Build: h.Build.copy(), - Sign: h.Sign.copy(), - Send: h.Send.copy(), - ValidateResponse: h.ValidateResponse.copy(), - Unmarshal: h.Unmarshal.copy(), - UnmarshalError: h.UnmarshalError.copy(), - UnmarshalMeta: h.UnmarshalMeta.copy(), - Retry: h.Retry.copy(), - AfterRetry: h.AfterRetry.copy(), - } -} - -// Clear removes callback functions for all handlers -func (h *Handlers) Clear() { - h.Validate.Clear() - h.Build.Clear() - h.Send.Clear() - h.Sign.Clear() - h.Unmarshal.Clear() - h.UnmarshalMeta.Clear() - h.UnmarshalError.Clear() - h.ValidateResponse.Clear() - h.Retry.Clear() - h.AfterRetry.Clear() -} - -// A HandlerList manages zero or more handlers in a list. -type HandlerList struct { - list []func(*Request) -} - -// copy creates a copy of the handler list. -func (l *HandlerList) copy() HandlerList { - var n HandlerList - n.list = append([]func(*Request){}, l.list...) - return n -} - -// Clear clears the handler list. -func (l *HandlerList) Clear() { - l.list = []func(*Request){} -} - -// Len returns the number of handlers in the list. -func (l *HandlerList) Len() int { - return len(l.list) -} - -// PushBack pushes handlers f to the back of the handler list. -func (l *HandlerList) PushBack(f ...func(*Request)) { - l.list = append(l.list, f...) -} - -// PushFront pushes handlers f to the front of the handler list. -func (l *HandlerList) PushFront(f ...func(*Request)) { - l.list = append(f, l.list...) -} - -// Run executes all handlers in the list with a given request object. -func (l *HandlerList) Run(r *Request) { - for _, f := range l.list { - f(r) - } -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handlers_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handlers_test.go deleted file mode 100644 index 944e1d3..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/handlers_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package aws - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestHandlerList(t *testing.T) { - s := "" - r := &Request{} - l := HandlerList{} - l.PushBack(func(r *Request) { - s += "a" - r.Data = s - }) - l.Run(r) - assert.Equal(t, "a", s) - assert.Equal(t, "a", r.Data) -} - -func TestMultipleHandlers(t *testing.T) { - r := &Request{} - l := HandlerList{} - l.PushBack(func(r *Request) { r.Data = nil }) - l.PushFront(func(r *Request) { r.Data = Bool(true) }) - l.Run(r) - if r.Data != nil { - t.Error("Expected handler to execute") - } -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/param_validator.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/param_validator.go deleted file mode 100644 index b4e95ce..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/param_validator.go +++ /dev/null @@ -1,89 +0,0 @@ -package aws - -import ( - "fmt" - "reflect" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// ValidateParameters is a request handler to validate the input parameters. -// Validating parameters only has meaning if done prior to the request being sent. -func ValidateParameters(r *Request) { - if r.ParamsFilled() { - v := validator{errors: []string{}} - v.validateAny(reflect.ValueOf(r.Params), "") - - if count := len(v.errors); count > 0 { - format := "%d validation errors:\n- %s" - msg := fmt.Sprintf(format, count, strings.Join(v.errors, "\n- ")) - r.Error = awserr.New("InvalidParameter", msg, nil) - } - } -} - -// A validator validates values. Collects validations errors which occurs. -type validator struct { - errors []string -} - -// validateAny will validate any struct, slice or map type. All validations -// are also performed recursively for nested types. -func (v *validator) validateAny(value reflect.Value, path string) { - value = reflect.Indirect(value) - if !value.IsValid() { - return - } - - switch value.Kind() { - case reflect.Struct: - v.validateStruct(value, path) - case reflect.Slice: - for i := 0; i < value.Len(); i++ { - v.validateAny(value.Index(i), path+fmt.Sprintf("[%d]", i)) - } - case reflect.Map: - for _, n := range value.MapKeys() { - v.validateAny(value.MapIndex(n), path+fmt.Sprintf("[%q]", n.String())) - } - } -} - -// validateStruct will validate the struct value's fields. If the structure has -// nested types those types will be validated also. -func (v *validator) validateStruct(value reflect.Value, path string) { - prefix := "." - if path == "" { - prefix = "" - } - - for i := 0; i < value.Type().NumField(); i++ { - f := value.Type().Field(i) - if strings.ToLower(f.Name[0:1]) == f.Name[0:1] { - continue - } - fvalue := value.FieldByName(f.Name) - - notset := false - if f.Tag.Get("required") != "" { - switch fvalue.Kind() { - case reflect.Ptr, reflect.Slice, reflect.Map: - if fvalue.IsNil() { - notset = true - } - default: - if !fvalue.IsValid() { - notset = true - } - } - } - - if notset { - msg := "missing required parameter: " + path + prefix + f.Name - v.errors = append(v.errors, msg) - } else { - v.validateAny(fvalue, path+prefix+f.Name) - } - } -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/param_validator_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/param_validator_test.go deleted file mode 100644 index e09acf8..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/param_validator_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package aws_test - -import ( - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/stretchr/testify/assert" -) - -var service = func() *aws.Service { - s := &aws.Service{ - Config: &aws.Config{}, - ServiceName: "mock-service", - APIVersion: "2015-01-01", - } - return s -}() - -type StructShape struct { - RequiredList []*ConditionalStructShape `required:"true"` - RequiredMap map[string]*ConditionalStructShape `required:"true"` - RequiredBool *bool `required:"true"` - OptionalStruct *ConditionalStructShape - - hiddenParameter *string - - metadataStructureShape -} - -type metadataStructureShape struct { - SDKShapeTraits bool -} - -type ConditionalStructShape struct { - Name *string `required:"true"` - SDKShapeTraits bool -} - -func TestNoErrors(t *testing.T) { - input := &StructShape{ - RequiredList: []*ConditionalStructShape{}, - RequiredMap: map[string]*ConditionalStructShape{ - "key1": {Name: aws.String("Name")}, - "key2": {Name: aws.String("Name")}, - }, - RequiredBool: aws.Bool(true), - OptionalStruct: &ConditionalStructShape{Name: aws.String("Name")}, - } - - req := aws.NewRequest(service, &aws.Operation{}, input, nil) - aws.ValidateParameters(req) - assert.NoError(t, req.Error) -} - -func TestMissingRequiredParameters(t *testing.T) { - input := &StructShape{} - req := aws.NewRequest(service, &aws.Operation{}, input, nil) - aws.ValidateParameters(req) - - assert.Error(t, req.Error) - assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code()) - assert.Equal(t, "3 validation errors:\n- missing required parameter: RequiredList\n- missing required parameter: RequiredMap\n- missing required parameter: RequiredBool", req.Error.(awserr.Error).Message()) -} - -func TestNestedMissingRequiredParameters(t *testing.T) { - input := &StructShape{ - RequiredList: []*ConditionalStructShape{{}}, - RequiredMap: map[string]*ConditionalStructShape{ - "key1": {Name: aws.String("Name")}, - "key2": {}, - }, - RequiredBool: aws.Bool(true), - OptionalStruct: &ConditionalStructShape{}, - } - - req := aws.NewRequest(service, &aws.Operation{}, input, nil) - aws.ValidateParameters(req) - - assert.Error(t, req.Error) - assert.Equal(t, "InvalidParameter", req.Error.(awserr.Error).Code()) - assert.Equal(t, "3 validation errors:\n- missing required parameter: RequiredList[0].Name\n- missing required parameter: RequiredMap[\"key2\"].Name\n- missing required parameter: OptionalStruct.Name", req.Error.(awserr.Error).Message()) - -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request.go deleted file mode 100644 index f3248fc..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request.go +++ /dev/null @@ -1,312 +0,0 @@ -package aws - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" - "net/url" - "reflect" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -// A Request is the service request to be made. -type Request struct { - *Service - Handlers Handlers - Time time.Time - ExpireTime time.Duration - Operation *Operation - HTTPRequest *http.Request - HTTPResponse *http.Response - Body io.ReadSeeker - bodyStart int64 // offset from beginning of Body that the request body starts - Params interface{} - Error error - Data interface{} - RequestID string - RetryCount uint - Retryable *bool - RetryDelay time.Duration - - built bool -} - -// An Operation is the service API operation to be made. -type Operation struct { - Name string - HTTPMethod string - HTTPPath string - *Paginator -} - -// Paginator keeps track of pagination configuration for an API operation. -type Paginator struct { - InputTokens []string - OutputTokens []string - LimitToken string - TruncationToken string -} - -// NewRequest returns a new Request pointer for the service API -// operation and parameters. -// -// Params is any value of input parameters to be the request payload. -// Data is pointer value to an object which the request's response -// payload will be deserialized to. -func NewRequest(service *Service, operation *Operation, params interface{}, data interface{}) *Request { - method := operation.HTTPMethod - if method == "" { - method = "POST" - } - p := operation.HTTPPath - if p == "" { - p = "/" - } - - httpReq, _ := http.NewRequest(method, "", nil) - httpReq.URL, _ = url.Parse(service.Endpoint + p) - - r := &Request{ - Service: service, - Handlers: service.Handlers.copy(), - Time: time.Now(), - ExpireTime: 0, - Operation: operation, - HTTPRequest: httpReq, - Body: nil, - Params: params, - Error: nil, - Data: data, - } - r.SetBufferBody([]byte{}) - - return r -} - -// WillRetry returns if the request's can be retried. -func (r *Request) WillRetry() bool { - return r.Error != nil && BoolValue(r.Retryable) && r.RetryCount < r.Service.MaxRetries() -} - -// ParamsFilled returns if the request's parameters have been populated -// and the parameters are valid. False is returned if no parameters are -// provided or invalid. -func (r *Request) ParamsFilled() bool { - return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() -} - -// DataFilled returns true if the request's data for response deserialization -// target has been set and is a valid. False is returned if data is not -// set, or is invalid. -func (r *Request) DataFilled() bool { - return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() -} - -// SetBufferBody will set the request's body bytes that will be sent to -// the service API. -func (r *Request) SetBufferBody(buf []byte) { - r.SetReaderBody(bytes.NewReader(buf)) -} - -// SetStringBody sets the body of the request to be backed by a string. -func (r *Request) SetStringBody(s string) { - r.SetReaderBody(strings.NewReader(s)) -} - -// SetReaderBody will set the request's body reader. -func (r *Request) SetReaderBody(reader io.ReadSeeker) { - r.HTTPRequest.Body = ioutil.NopCloser(reader) - r.Body = reader -} - -// Presign returns the request's signed URL. Error will be returned -// if the signing fails. -func (r *Request) Presign(expireTime time.Duration) (string, error) { - r.ExpireTime = expireTime - r.Sign() - if r.Error != nil { - return "", r.Error - } - return r.HTTPRequest.URL.String(), nil -} - -// Build will build the request's object so it can be signed and sent -// to the service. Build will also validate all the request's parameters. -// Anny additional build Handlers set on this request will be run -// in the order they were set. -// -// The request will only be built once. Multiple calls to build will have -// no effect. -// -// If any Validate or Build errors occur the build will stop and the error -// which occurred will be returned. -func (r *Request) Build() error { - if !r.built { - r.Error = nil - r.Handlers.Validate.Run(r) - if r.Error != nil { - return r.Error - } - r.Handlers.Build.Run(r) - r.built = true - } - - return r.Error -} - -// Sign will sign the request retuning error if errors are encountered. -// -// Send will build the request prior to signing. All Sign Handlers will -// be executed in the order they were set. -func (r *Request) Sign() error { - r.Build() - if r.Error != nil { - return r.Error - } - - r.Handlers.Sign.Run(r) - return r.Error -} - -// Send will send the request returning error if errors are encountered. -// -// Send will sign the request prior to sending. All Send Handlers will -// be executed in the order they were set. -func (r *Request) Send() error { - for { - r.Sign() - if r.Error != nil { - return r.Error - } - - if BoolValue(r.Retryable) { - // Re-seek the body back to the original point in for a retry so that - // send will send the body's contents again in the upcoming request. - r.Body.Seek(r.bodyStart, 0) - } - r.Retryable = nil - - r.Handlers.Send.Run(r) - if r.Error != nil { - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - return r.Error - } - continue - } - - r.Handlers.UnmarshalMeta.Run(r) - r.Handlers.ValidateResponse.Run(r) - if r.Error != nil { - r.Handlers.UnmarshalError.Run(r) - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - return r.Error - } - continue - } - - r.Handlers.Unmarshal.Run(r) - if r.Error != nil { - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - return r.Error - } - continue - } - - break - } - - return nil -} - -// HasNextPage returns true if this request has more pages of data available. -func (r *Request) HasNextPage() bool { - return r.nextPageTokens() != nil -} - -// nextPageTokens returns the tokens to use when asking for the next page of -// data. -func (r *Request) nextPageTokens() []interface{} { - if r.Operation.Paginator == nil { - return nil - } - - if r.Operation.TruncationToken != "" { - tr := awsutil.ValuesAtAnyPath(r.Data, r.Operation.TruncationToken) - if tr == nil || len(tr) == 0 { - return nil - } - switch v := tr[0].(type) { - case bool: - if v == false { - return nil - } - } - } - - found := false - tokens := make([]interface{}, len(r.Operation.OutputTokens)) - - for i, outtok := range r.Operation.OutputTokens { - v := awsutil.ValuesAtAnyPath(r.Data, outtok) - if v != nil && len(v) > 0 { - found = true - tokens[i] = v[0] - } - } - - if found { - return tokens - } - return nil -} - -// NextPage returns a new Request that can be executed to return the next -// page of result data. Call .Send() on this request to execute it. -func (r *Request) NextPage() *Request { - tokens := r.nextPageTokens() - if tokens == nil { - return nil - } - - data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() - nr := NewRequest(r.Service, r.Operation, awsutil.CopyOf(r.Params), data) - for i, intok := range nr.Operation.InputTokens { - awsutil.SetValueAtAnyPath(nr.Params, intok, tokens[i]) - } - return nr -} - -// EachPage iterates over each page of a paginated request object. The fn -// parameter should be a function with the following sample signature: -// -// func(page *T, lastPage bool) bool { -// return true // return false to stop iterating -// } -// -// Where "T" is the structure type matching the output structure of the given -// operation. For example, a request object generated by -// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput -// as the structure "T". The lastPage value represents whether the page is -// the last page of data or not. The return value of this function should -// return true to keep iterating or false to stop. -func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { - for page := r; page != nil; page = page.NextPage() { - page.Send() - shouldContinue := fn(page.Data, !page.HasNextPage()) - if page.Error != nil || !shouldContinue { - return page.Error - } - } - - return nil -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request_pagination_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request_pagination_test.go deleted file mode 100644 index 6b20c07..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request_pagination_test.go +++ /dev/null @@ -1,305 +0,0 @@ -package aws_test - -import ( - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/internal/test/unit" - "github.com/aws/aws-sdk-go/service/dynamodb" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/stretchr/testify/assert" -) - -var _ = unit.Imported - -// Use DynamoDB methods for simplicity -func TestPagination(t *testing.T) { - db := dynamodb.New(nil) - tokens, pages, numPages, gotToEnd := []string{}, []string{}, 0, false - - reqNum := 0 - resps := []*dynamodb.ListTablesOutput{ - {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")}, - {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")}, - {TableNames: []*string{aws.String("Table5")}}, - } - - db.Handlers.Send.Clear() // mock sending - db.Handlers.Unmarshal.Clear() - db.Handlers.UnmarshalMeta.Clear() - db.Handlers.ValidateResponse.Clear() - db.Handlers.Build.PushBack(func(r *aws.Request) { - in := r.Params.(*dynamodb.ListTablesInput) - if in == nil { - tokens = append(tokens, "") - } else if in.ExclusiveStartTableName != nil { - tokens = append(tokens, *in.ExclusiveStartTableName) - } - }) - db.Handlers.Unmarshal.PushBack(func(r *aws.Request) { - r.Data = resps[reqNum] - reqNum++ - }) - - params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} - err := db.ListTablesPages(params, func(p *dynamodb.ListTablesOutput, last bool) bool { - numPages++ - for _, t := range p.TableNames { - pages = append(pages, *t) - } - if last { - if gotToEnd { - assert.Fail(t, "last=true happened twice") - } - gotToEnd = true - } - return true - }) - - assert.Equal(t, []string{"Table2", "Table4"}, tokens) - assert.Equal(t, []string{"Table1", "Table2", "Table3", "Table4", "Table5"}, pages) - assert.Equal(t, 3, numPages) - assert.True(t, gotToEnd) - assert.Nil(t, err) - assert.Nil(t, params.ExclusiveStartTableName) -} - -// Use DynamoDB methods for simplicity -func TestPaginationEachPage(t *testing.T) { - db := dynamodb.New(nil) - tokens, pages, numPages, gotToEnd := []string{}, []string{}, 0, false - - reqNum := 0 - resps := []*dynamodb.ListTablesOutput{ - {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")}, - {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")}, - {TableNames: []*string{aws.String("Table5")}}, - } - - db.Handlers.Send.Clear() // mock sending - db.Handlers.Unmarshal.Clear() - db.Handlers.UnmarshalMeta.Clear() - db.Handlers.ValidateResponse.Clear() - db.Handlers.Build.PushBack(func(r *aws.Request) { - in := r.Params.(*dynamodb.ListTablesInput) - if in == nil { - tokens = append(tokens, "") - } else if in.ExclusiveStartTableName != nil { - tokens = append(tokens, *in.ExclusiveStartTableName) - } - }) - db.Handlers.Unmarshal.PushBack(func(r *aws.Request) { - r.Data = resps[reqNum] - reqNum++ - }) - - params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} - req, _ := db.ListTablesRequest(params) - err := req.EachPage(func(p interface{}, last bool) bool { - numPages++ - for _, t := range p.(*dynamodb.ListTablesOutput).TableNames { - pages = append(pages, *t) - } - if last { - if gotToEnd { - assert.Fail(t, "last=true happened twice") - } - gotToEnd = true - } - - return true - }) - - assert.Equal(t, []string{"Table2", "Table4"}, tokens) - assert.Equal(t, []string{"Table1", "Table2", "Table3", "Table4", "Table5"}, pages) - assert.Equal(t, 3, numPages) - assert.True(t, gotToEnd) - assert.Nil(t, err) -} - -// Use DynamoDB methods for simplicity -func TestPaginationEarlyExit(t *testing.T) { - db := dynamodb.New(nil) - numPages, gotToEnd := 0, false - - reqNum := 0 - resps := []*dynamodb.ListTablesOutput{ - {TableNames: []*string{aws.String("Table1"), aws.String("Table2")}, LastEvaluatedTableName: aws.String("Table2")}, - {TableNames: []*string{aws.String("Table3"), aws.String("Table4")}, LastEvaluatedTableName: aws.String("Table4")}, - {TableNames: []*string{aws.String("Table5")}}, - } - - db.Handlers.Send.Clear() // mock sending - db.Handlers.Unmarshal.Clear() - db.Handlers.UnmarshalMeta.Clear() - db.Handlers.ValidateResponse.Clear() - db.Handlers.Unmarshal.PushBack(func(r *aws.Request) { - r.Data = resps[reqNum] - reqNum++ - }) - - params := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} - err := db.ListTablesPages(params, func(p *dynamodb.ListTablesOutput, last bool) bool { - numPages++ - if numPages == 2 { - return false - } - if last { - if gotToEnd { - assert.Fail(t, "last=true happened twice") - } - gotToEnd = true - } - return true - }) - - assert.Equal(t, 2, numPages) - assert.False(t, gotToEnd) - assert.Nil(t, err) -} - -func TestSkipPagination(t *testing.T) { - client := s3.New(nil) - client.Handlers.Send.Clear() // mock sending - client.Handlers.Unmarshal.Clear() - client.Handlers.UnmarshalMeta.Clear() - client.Handlers.ValidateResponse.Clear() - client.Handlers.Unmarshal.PushBack(func(r *aws.Request) { - r.Data = &s3.HeadBucketOutput{} - }) - - req, _ := client.HeadBucketRequest(&s3.HeadBucketInput{Bucket: aws.String("bucket")}) - - numPages, gotToEnd := 0, false - req.EachPage(func(p interface{}, last bool) bool { - numPages++ - if last { - gotToEnd = true - } - return true - }) - assert.Equal(t, 1, numPages) - assert.True(t, gotToEnd) -} - -// Use S3 for simplicity -func TestPaginationTruncation(t *testing.T) { - count := 0 - client := s3.New(nil) - - reqNum := &count - resps := []*s3.ListObjectsOutput{ - {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key1")}}}, - {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key2")}}}, - {IsTruncated: aws.Bool(false), Contents: []*s3.Object{{Key: aws.String("Key3")}}}, - {IsTruncated: aws.Bool(true), Contents: []*s3.Object{{Key: aws.String("Key4")}}}, - } - - client.Handlers.Send.Clear() // mock sending - client.Handlers.Unmarshal.Clear() - client.Handlers.UnmarshalMeta.Clear() - client.Handlers.ValidateResponse.Clear() - client.Handlers.Unmarshal.PushBack(func(r *aws.Request) { - r.Data = resps[*reqNum] - *reqNum++ - }) - - params := &s3.ListObjectsInput{Bucket: aws.String("bucket")} - - results := []string{} - err := client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool { - results = append(results, *p.Contents[0].Key) - return true - }) - - assert.Equal(t, []string{"Key1", "Key2", "Key3"}, results) - assert.Nil(t, err) - - // Try again without truncation token at all - count = 0 - resps[1].IsTruncated = nil - resps[2].IsTruncated = aws.Bool(true) - results = []string{} - err = client.ListObjectsPages(params, func(p *s3.ListObjectsOutput, last bool) bool { - results = append(results, *p.Contents[0].Key) - return true - }) - - assert.Equal(t, []string{"Key1", "Key2"}, results) - assert.Nil(t, err) - -} - -// Benchmarks -var benchResps = []*dynamodb.ListTablesOutput{ - {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, - {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, - {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, - {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, - {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, - {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, - {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, - {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, - {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, - {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, - {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, - {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, - {TableNames: []*string{aws.String("TABLE"), aws.String("NXT")}, LastEvaluatedTableName: aws.String("NXT")}, - {TableNames: []*string{aws.String("TABLE")}}, -} - -var benchDb = func() *dynamodb.DynamoDB { - db := dynamodb.New(nil) - db.Handlers.Send.Clear() // mock sending - db.Handlers.Unmarshal.Clear() - db.Handlers.UnmarshalMeta.Clear() - db.Handlers.ValidateResponse.Clear() - return db -} - -func BenchmarkCodegenIterator(b *testing.B) { - reqNum := 0 - db := benchDb() - db.Handlers.Unmarshal.PushBack(func(r *aws.Request) { - r.Data = benchResps[reqNum] - reqNum++ - }) - - input := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} - iter := func(fn func(*dynamodb.ListTablesOutput, bool) bool) error { - page, _ := db.ListTablesRequest(input) - for ; page != nil; page = page.NextPage() { - page.Send() - out := page.Data.(*dynamodb.ListTablesOutput) - if result := fn(out, !page.HasNextPage()); page.Error != nil || !result { - return page.Error - } - } - return nil - } - - for i := 0; i < b.N; i++ { - reqNum = 0 - iter(func(p *dynamodb.ListTablesOutput, last bool) bool { - return true - }) - } -} - -func BenchmarkEachPageIterator(b *testing.B) { - reqNum := 0 - db := benchDb() - db.Handlers.Unmarshal.PushBack(func(r *aws.Request) { - r.Data = benchResps[reqNum] - reqNum++ - }) - - input := &dynamodb.ListTablesInput{Limit: aws.Int64(2)} - for i := 0; i < b.N; i++ { - reqNum = 0 - req, _ := db.ListTablesRequest(input) - req.EachPage(func(p interface{}, last bool) bool { - return true - }) - } -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request_test.go deleted file mode 100644 index d5d945d..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/request_test.go +++ /dev/null @@ -1,225 +0,0 @@ -package aws - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/stretchr/testify/assert" -) - -type testData struct { - Data string -} - -func body(str string) io.ReadCloser { - return ioutil.NopCloser(bytes.NewReader([]byte(str))) -} - -func unmarshal(req *Request) { - defer req.HTTPResponse.Body.Close() - if req.Data != nil { - json.NewDecoder(req.HTTPResponse.Body).Decode(req.Data) - } - return -} - -func unmarshalError(req *Request) { - bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body) - if err != nil { - req.Error = awserr.New("UnmarshaleError", req.HTTPResponse.Status, err) - return - } - if len(bodyBytes) == 0 { - req.Error = awserr.NewRequestFailure( - awserr.New("UnmarshaleError", req.HTTPResponse.Status, fmt.Errorf("empty body")), - req.HTTPResponse.StatusCode, - "", - ) - return - } - var jsonErr jsonErrorResponse - if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil { - req.Error = awserr.New("UnmarshaleError", "JSON unmarshal", err) - return - } - req.Error = awserr.NewRequestFailure( - awserr.New(jsonErr.Code, jsonErr.Message, nil), - req.HTTPResponse.StatusCode, - "", - ) -} - -type jsonErrorResponse struct { - Code string `json:"__type"` - Message string `json:"message"` -} - -// test that retries occur for 5xx status codes -func TestRequestRecoverRetry5xx(t *testing.T) { - reqNum := 0 - reqs := []http.Response{ - {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, - {StatusCode: 501, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, - {StatusCode: 200, Body: body(`{"data":"valid"}`)}, - } - - s := NewService(NewConfig().WithMaxRetries(10)) - s.Handlers.Validate.Clear() - s.Handlers.Unmarshal.PushBack(unmarshal) - s.Handlers.UnmarshalError.PushBack(unmarshalError) - s.Handlers.Send.Clear() // mock sending - s.Handlers.Send.PushBack(func(r *Request) { - r.HTTPResponse = &reqs[reqNum] - reqNum++ - }) - out := &testData{} - r := NewRequest(s, &Operation{Name: "Operation"}, nil, out) - err := r.Send() - assert.Nil(t, err) - assert.Equal(t, 2, int(r.RetryCount)) - assert.Equal(t, "valid", out.Data) -} - -// test that retries occur for 4xx status codes with a response type that can be retried - see `shouldRetry` -func TestRequestRecoverRetry4xxRetryable(t *testing.T) { - reqNum := 0 - reqs := []http.Response{ - {StatusCode: 400, Body: body(`{"__type":"Throttling","message":"Rate exceeded."}`)}, - {StatusCode: 429, Body: body(`{"__type":"ProvisionedThroughputExceededException","message":"Rate exceeded."}`)}, - {StatusCode: 200, Body: body(`{"data":"valid"}`)}, - } - - s := NewService(NewConfig().WithMaxRetries(10)) - s.Handlers.Validate.Clear() - s.Handlers.Unmarshal.PushBack(unmarshal) - s.Handlers.UnmarshalError.PushBack(unmarshalError) - s.Handlers.Send.Clear() // mock sending - s.Handlers.Send.PushBack(func(r *Request) { - r.HTTPResponse = &reqs[reqNum] - reqNum++ - }) - out := &testData{} - r := NewRequest(s, &Operation{Name: "Operation"}, nil, out) - err := r.Send() - assert.Nil(t, err) - assert.Equal(t, 2, int(r.RetryCount)) - assert.Equal(t, "valid", out.Data) -} - -// test that retries don't occur for 4xx status codes with a response type that can't be retried -func TestRequest4xxUnretryable(t *testing.T) { - s := NewService(NewConfig().WithMaxRetries(10)) - s.Handlers.Validate.Clear() - s.Handlers.Unmarshal.PushBack(unmarshal) - s.Handlers.UnmarshalError.PushBack(unmarshalError) - s.Handlers.Send.Clear() // mock sending - s.Handlers.Send.PushBack(func(r *Request) { - r.HTTPResponse = &http.Response{StatusCode: 401, Body: body(`{"__type":"SignatureDoesNotMatch","message":"Signature does not match."}`)} - }) - out := &testData{} - r := NewRequest(s, &Operation{Name: "Operation"}, nil, out) - err := r.Send() - assert.NotNil(t, err) - if e, ok := err.(awserr.RequestFailure); ok { - assert.Equal(t, 401, e.StatusCode()) - } else { - assert.Fail(t, "Expected error to be a service failure") - } - assert.Equal(t, "SignatureDoesNotMatch", err.(awserr.Error).Code()) - assert.Equal(t, "Signature does not match.", err.(awserr.Error).Message()) - assert.Equal(t, 0, int(r.RetryCount)) -} - -func TestRequestExhaustRetries(t *testing.T) { - delays := []time.Duration{} - sleepDelay = func(delay time.Duration) { - delays = append(delays, delay) - } - - reqNum := 0 - reqs := []http.Response{ - {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, - {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, - {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, - {StatusCode: 500, Body: body(`{"__type":"UnknownError","message":"An error occurred."}`)}, - } - - s := NewService(NewConfig().WithMaxRetries(DefaultRetries)) - s.Handlers.Validate.Clear() - s.Handlers.Unmarshal.PushBack(unmarshal) - s.Handlers.UnmarshalError.PushBack(unmarshalError) - s.Handlers.Send.Clear() // mock sending - s.Handlers.Send.PushBack(func(r *Request) { - r.HTTPResponse = &reqs[reqNum] - reqNum++ - }) - r := NewRequest(s, &Operation{Name: "Operation"}, nil, nil) - err := r.Send() - assert.NotNil(t, err) - if e, ok := err.(awserr.RequestFailure); ok { - assert.Equal(t, 500, e.StatusCode()) - } else { - assert.Fail(t, "Expected error to be a service failure") - } - assert.Equal(t, "UnknownError", err.(awserr.Error).Code()) - assert.Equal(t, "An error occurred.", err.(awserr.Error).Message()) - assert.Equal(t, 3, int(r.RetryCount)) - - expectDelays := []struct{ min, max time.Duration }{{30, 59}, {60, 118}, {120, 236}} - for i, v := range delays { - min := expectDelays[i].min * time.Millisecond - max := expectDelays[i].max * time.Millisecond - assert.True(t, min <= v && v <= max, - "Expect delay to be within range, i:%d, v:%s, min:%s, max:%s", i, v, min, max) - } -} - -// test that the request is retried after the credentials are expired. -func TestRequestRecoverExpiredCreds(t *testing.T) { - reqNum := 0 - reqs := []http.Response{ - {StatusCode: 400, Body: body(`{"__type":"ExpiredTokenException","message":"expired token"}`)}, - {StatusCode: 200, Body: body(`{"data":"valid"}`)}, - } - - s := NewService(&Config{MaxRetries: Int(10), Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "")}) - s.Handlers.Validate.Clear() - s.Handlers.Unmarshal.PushBack(unmarshal) - s.Handlers.UnmarshalError.PushBack(unmarshalError) - - credExpiredBeforeRetry := false - credExpiredAfterRetry := false - - s.Handlers.AfterRetry.PushBack(func(r *Request) { - credExpiredAfterRetry = r.Config.Credentials.IsExpired() - }) - - s.Handlers.Sign.Clear() - s.Handlers.Sign.PushBack(func(r *Request) { - r.Config.Credentials.Get() - }) - s.Handlers.Send.Clear() // mock sending - s.Handlers.Send.PushBack(func(r *Request) { - r.HTTPResponse = &reqs[reqNum] - reqNum++ - }) - out := &testData{} - r := NewRequest(s, &Operation{Name: "Operation"}, nil, out) - err := r.Send() - assert.Nil(t, err) - - assert.False(t, credExpiredBeforeRetry, "Expect valid creds before retry check") - assert.True(t, credExpiredAfterRetry, "Expect expired creds after retry check") - assert.False(t, s.Config.Credentials.IsExpired(), "Expect valid creds after cred expired recovery") - - assert.Equal(t, 1, int(r.RetryCount)) - assert.Equal(t, "valid", out.Data) -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/service.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/service.go deleted file mode 100644 index 672f7de..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/service.go +++ /dev/null @@ -1,194 +0,0 @@ -package aws - -import ( - "fmt" - "math" - "math/rand" - "net/http" - "net/http/httputil" - "regexp" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/internal/endpoints" -) - -// A Service implements the base service request and response handling -// used by all services. -type Service struct { - Config *Config - Handlers Handlers - ServiceName string - APIVersion string - Endpoint string - SigningName string - SigningRegion string - JSONVersion string - TargetPrefix string - RetryRules func(*Request) time.Duration - ShouldRetry func(*Request) bool - DefaultMaxRetries uint -} - -var schemeRE = regexp.MustCompile("^([^:]+)://") - -// NewService will return a pointer to a new Server object initialized. -func NewService(config *Config) *Service { - svc := &Service{Config: config} - svc.Initialize() - return svc -} - -// Initialize initializes the service. -func (s *Service) Initialize() { - if s.Config == nil { - s.Config = &Config{} - } - if s.Config.HTTPClient == nil { - s.Config.HTTPClient = http.DefaultClient - } - - if s.RetryRules == nil { - s.RetryRules = retryRules - } - - if s.ShouldRetry == nil { - s.ShouldRetry = shouldRetry - } - - s.DefaultMaxRetries = 3 - s.Handlers.Validate.PushBack(ValidateEndpointHandler) - s.Handlers.Build.PushBack(UserAgentHandler) - s.Handlers.Sign.PushBack(BuildContentLength) - s.Handlers.Send.PushBack(SendHandler) - s.Handlers.AfterRetry.PushBack(AfterRetryHandler) - s.Handlers.ValidateResponse.PushBack(ValidateResponseHandler) - s.AddDebugHandlers() - s.buildEndpoint() - - if !BoolValue(s.Config.DisableParamValidation) { - s.Handlers.Validate.PushBack(ValidateParameters) - } -} - -// buildEndpoint builds the endpoint values the service will use to make requests with. -func (s *Service) buildEndpoint() { - if StringValue(s.Config.Endpoint) != "" { - s.Endpoint = *s.Config.Endpoint - } else { - s.Endpoint, s.SigningRegion = - endpoints.EndpointForRegion(s.ServiceName, StringValue(s.Config.Region)) - } - - if s.Endpoint != "" && !schemeRE.MatchString(s.Endpoint) { - scheme := "https" - if BoolValue(s.Config.DisableSSL) { - scheme = "http" - } - s.Endpoint = scheme + "://" + s.Endpoint - } -} - -// AddDebugHandlers injects debug logging handlers into the service to log request -// debug information. -func (s *Service) AddDebugHandlers() { - if !s.Config.LogLevel.AtLeast(LogDebug) { - return - } - - s.Handlers.Send.PushFront(logRequest) - s.Handlers.Send.PushBack(logResponse) -} - -const logReqMsg = `DEBUG: Request %s/%s Details: ----[ REQUEST POST-SIGN ]----------------------------- -%s ------------------------------------------------------` - -func logRequest(r *Request) { - logBody := r.Config.LogLevel.Matches(LogDebugWithHTTPBody) - dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody) - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ServiceName, r.Operation.Name, string(dumpedBody))) -} - -const logRespMsg = `DEBUG: Response %s/%s Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` - -func logResponse(r *Request) { - var msg = "no reponse data" - if r.HTTPResponse != nil { - logBody := r.Config.LogLevel.Matches(LogDebugWithHTTPBody) - dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody) - msg = string(dumpedBody) - } else if r.Error != nil { - msg = r.Error.Error() - } - r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ServiceName, r.Operation.Name, msg)) -} - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API request. -func (s *Service) MaxRetries() uint { - if IntValue(s.Config.MaxRetries) < 0 { - return s.DefaultMaxRetries - } - return uint(IntValue(s.Config.MaxRetries)) -} - -var seededRand = rand.New(rand.NewSource(time.Now().UnixNano())) - -// retryRules returns the delay duration before retrying this request again -func retryRules(r *Request) time.Duration { - - delay := int(math.Pow(2, float64(r.RetryCount))) * (seededRand.Intn(30) + 30) - return time.Duration(delay) * time.Millisecond -} - -// retryableCodes is a collection of service response codes which are retry-able -// without any further action. -var retryableCodes = map[string]struct{}{ - "RequestError": {}, - "ProvisionedThroughputExceededException": {}, - "Throttling": {}, - "ThrottlingException": {}, - "RequestLimitExceeded": {}, - "RequestThrottled": {}, -} - -// credsExpiredCodes is a collection of error codes which signify the credentials -// need to be refreshed. Expired tokens require refreshing of credentials, and -// resigning before the request can be retried. -var credsExpiredCodes = map[string]struct{}{ - "ExpiredToken": {}, - "ExpiredTokenException": {}, - "RequestExpired": {}, // EC2 Only -} - -func isCodeRetryable(code string) bool { - if _, ok := retryableCodes[code]; ok { - return true - } - - return isCodeExpiredCreds(code) -} - -func isCodeExpiredCreds(code string) bool { - _, ok := credsExpiredCodes[code] - return ok -} - -// shouldRetry returns if the request should be retried. -func shouldRetry(r *Request) bool { - if r.HTTPResponse.StatusCode >= 500 { - return true - } - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeRetryable(err.Code()) - } - } - return false -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go deleted file mode 100644 index 87905d7..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/types.go +++ /dev/null @@ -1,55 +0,0 @@ -package aws - -import ( - "io" -) - -// ReadSeekCloser wraps a io.Reader returning a ReaderSeakerCloser -func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { - return ReaderSeekerCloser{r} -} - -// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and -// io.Closer interfaces to the underlying object if they are available. -type ReaderSeekerCloser struct { - r io.Reader -} - -// Read reads from the reader up to size of p. The number of bytes read, and -// error if it occurred will be returned. -// -// If the reader is not an io.Reader zero bytes read, and nil error will be returned. -// -// Performs the same functionality as io.Reader Read -func (r ReaderSeekerCloser) Read(p []byte) (int, error) { - switch t := r.r.(type) { - case io.Reader: - return t.Read(p) - } - return 0, nil -} - -// Seek sets the offset for the next Read to offset, interpreted according to -// whence: 0 means relative to the origin of the file, 1 means relative to the -// current offset, and 2 means relative to the end. Seek returns the new offset -// and an error, if any. -// -// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. -func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { - switch t := r.r.(type) { - case io.Seeker: - return t.Seek(offset, whence) - } - return int64(0), nil -} - -// Close closes the ReaderSeekerCloser. -// -// If the ReaderSeekerCloser is not an io.Closer nothing will be done. -func (r ReaderSeekerCloser) Close() error { - switch t := r.r.(type) { - case io.Closer: - return t.Close() - } - return nil -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.go deleted file mode 100644 index d040ccc..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.go +++ /dev/null @@ -1,31 +0,0 @@ -// Package endpoints validates regional endpoints for services. -package endpoints - -//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go -//go:generate gofmt -s -w endpoints_map.go - -import "strings" - -// EndpointForRegion returns an endpoint and its signing region for a service and region. -// if the service and region pair are not found endpoint and signingRegion will be empty. -func EndpointForRegion(svcName, region string) (endpoint, signingRegion string) { - derivedKeys := []string{ - region + "/" + svcName, - region + "/*", - "*/" + svcName, - "*/*", - } - - for _, key := range derivedKeys { - if val, ok := endpointsMap.Endpoints[key]; ok { - ep := val.Endpoint - ep = strings.Replace(ep, "{region}", region, -1) - ep = strings.Replace(ep, "{service}", svcName, -1) - - endpoint = ep - signingRegion = val.SigningRegion - return - } - } - return -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.json b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.json deleted file mode 100644 index 4c58809..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints.json +++ /dev/null @@ -1,77 +0,0 @@ -{ - "version": 2, - "endpoints": { - "*/*": { - "endpoint": "{service}.{region}.amazonaws.com" - }, - "cn-north-1/*": { - "endpoint": "{service}.{region}.amazonaws.com.cn", - "signatureVersion": "v4" - }, - "us-gov-west-1/iam": { - "endpoint": "iam.us-gov.amazonaws.com" - }, - "us-gov-west-1/sts": { - "endpoint": "sts.us-gov-west-1.amazonaws.com" - }, - "us-gov-west-1/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "*/cloudfront": { - "endpoint": "cloudfront.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/cloudsearchdomain": { - "endpoint": "", - "signingRegion": "us-east-1" - }, - "*/iam": { - "endpoint": "iam.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/importexport": { - "endpoint": "importexport.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/route53": { - "endpoint": "route53.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/sts": { - "endpoint": "sts.amazonaws.com", - "signingRegion": "us-east-1" - }, - "us-east-1/sdb": { - "endpoint": "sdb.amazonaws.com", - "signingRegion": "us-east-1" - }, - "us-east-1/s3": { - "endpoint": "s3.amazonaws.com" - }, - "us-west-1/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "us-west-2/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "eu-west-1/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "ap-southeast-1/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "ap-southeast-2/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "ap-northeast-1/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "sa-east-1/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "eu-central-1/s3": { - "endpoint": "{service}.{region}.amazonaws.com", - "signatureVersion": "v4" - } - } -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_map.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_map.go deleted file mode 100644 index 894c1a6..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_map.go +++ /dev/null @@ -1,89 +0,0 @@ -package endpoints - -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. - -type endpointStruct struct { - Version int - Endpoints map[string]endpointEntry -} - -type endpointEntry struct { - Endpoint string - SigningRegion string -} - -var endpointsMap = endpointStruct{ - Version: 2, - Endpoints: map[string]endpointEntry{ - "*/*": { - Endpoint: "{service}.{region}.amazonaws.com", - }, - "*/cloudfront": { - Endpoint: "cloudfront.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/cloudsearchdomain": { - Endpoint: "", - SigningRegion: "us-east-1", - }, - "*/iam": { - Endpoint: "iam.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/importexport": { - Endpoint: "importexport.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/route53": { - Endpoint: "route53.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/sts": { - Endpoint: "sts.amazonaws.com", - SigningRegion: "us-east-1", - }, - "ap-northeast-1/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "ap-southeast-1/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "ap-southeast-2/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "cn-north-1/*": { - Endpoint: "{service}.{region}.amazonaws.com.cn", - }, - "eu-central-1/s3": { - Endpoint: "{service}.{region}.amazonaws.com", - }, - "eu-west-1/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "sa-east-1/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "us-east-1/s3": { - Endpoint: "s3.amazonaws.com", - }, - "us-east-1/sdb": { - Endpoint: "sdb.amazonaws.com", - SigningRegion: "us-east-1", - }, - "us-gov-west-1/iam": { - Endpoint: "iam.us-gov.amazonaws.com", - }, - "us-gov-west-1/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "us-gov-west-1/sts": { - Endpoint: "sts.us-gov-west-1.amazonaws.com", - }, - "us-west-1/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "us-west-2/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - }, -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_test.go deleted file mode 100644 index 8af6587..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/endpoints/endpoints_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package endpoints - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestGlobalEndpoints(t *testing.T) { - region := "mock-region-1" - svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts"} - - for _, name := range svcs { - ep, sr := EndpointForRegion(name, region) - assert.Equal(t, name+".amazonaws.com", ep) - assert.Equal(t, "us-east-1", sr) - } -} - -func TestServicesInCN(t *testing.T) { - region := "cn-north-1" - svcs := []string{"cloudfront", "iam", "importexport", "route53", "sts", "s3"} - - for _, name := range svcs { - ep, _ := EndpointForRegion(name, region) - assert.Equal(t, name+"."+region+".amazonaws.com.cn", ep) - } -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build_test.go deleted file mode 100644 index 52bbf7e..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build_test.go +++ /dev/null @@ -1,1482 +0,0 @@ -package query_test - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "io" - "io/ioutil" - "net/http" - "net/url" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/internal/protocol/query" - "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil" - "github.com/aws/aws-sdk-go/internal/signer/v4" - "github.com/aws/aws-sdk-go/internal/util" - "github.com/stretchr/testify/assert" -) - -var _ bytes.Buffer // always import bytes -var _ http.Request -var _ json.Marshaler -var _ time.Time -var _ xmlutil.XMLNode -var _ xml.Attr -var _ = ioutil.Discard -var _ = util.Trim("") -var _ = url.Values{} -var _ = io.EOF - -type InputService1ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService1ProtocolTest client. -func NewInputService1ProtocolTest(config *aws.Config) *InputService1ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice1protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &InputService1ProtocolTest{service} -} - -// newRequest creates a new request for a InputService1ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService1TestCaseOperation1 = "OperationName" - -// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation. -func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputShape) (req *aws.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService1TestCaseOperation1, - } - - if input == nil { - input = &InputService1TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService1TestShapeInputService1TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { - req, out := c.InputService1TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type InputService1TestShapeInputService1TestCaseOperation1Output struct { - metadataInputService1TestShapeInputService1TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService1TestShapeInputService1TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService1TestShapeInputShape struct { - Bar *string `type:"string"` - - Foo *string `type:"string"` - - metadataInputService1TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService1TestShapeInputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService2ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService2ProtocolTest client. -func NewInputService2ProtocolTest(config *aws.Config) *InputService2ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice2protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &InputService2ProtocolTest{service} -} - -// newRequest creates a new request for a InputService2ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService2TestCaseOperation1 = "OperationName" - -// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation. -func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputShape) (req *aws.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService2TestCaseOperation1, - } - - if input == nil { - input = &InputService2TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService2TestShapeInputService2TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputShape) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { - req, out := c.InputService2TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type InputService2TestShapeInputService2TestCaseOperation1Output struct { - metadataInputService2TestShapeInputService2TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService2TestShapeInputService2TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService2TestShapeInputShape struct { - StructArg *InputService2TestShapeStructType `type:"structure"` - - metadataInputService2TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService2TestShapeInputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService2TestShapeStructType struct { - ScalarArg *string `type:"string"` - - metadataInputService2TestShapeStructType `json:"-" xml:"-"` -} - -type metadataInputService2TestShapeStructType struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService3ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService3ProtocolTest client. -func NewInputService3ProtocolTest(config *aws.Config) *InputService3ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice3protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &InputService3ProtocolTest{service} -} - -// newRequest creates a new request for a InputService3ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService3TestCaseOperation1 = "OperationName" - -// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation. -func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *aws.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService3TestCaseOperation1, - } - - if input == nil { - input = &InputService3TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService3TestShapeInputService3TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { - req, out := c.InputService3TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -const opInputService3TestCaseOperation2 = "OperationName" - -// InputService3TestCaseOperation2Request generates a request for the InputService3TestCaseOperation2 operation. -func (c *InputService3ProtocolTest) InputService3TestCaseOperation2Request(input *InputService3TestShapeInputShape) (req *aws.Request, output *InputService3TestShapeInputService3TestCaseOperation2Output) { - op := &aws.Operation{ - Name: opInputService3TestCaseOperation2, - } - - if input == nil { - input = &InputService3TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService3TestShapeInputService3TestCaseOperation2Output{} - req.Data = output - return -} - -func (c *InputService3ProtocolTest) InputService3TestCaseOperation2(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation2Output, error) { - req, out := c.InputService3TestCaseOperation2Request(input) - err := req.Send() - return out, err -} - -type InputService3TestShapeInputService3TestCaseOperation1Output struct { - metadataInputService3TestShapeInputService3TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService3TestShapeInputService3TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService3TestShapeInputService3TestCaseOperation2Output struct { - metadataInputService3TestShapeInputService3TestCaseOperation2Output `json:"-" xml:"-"` -} - -type metadataInputService3TestShapeInputService3TestCaseOperation2Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService3TestShapeInputShape struct { - ListArg []*string `type:"list"` - - metadataInputService3TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService3TestShapeInputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService4ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService4ProtocolTest client. -func NewInputService4ProtocolTest(config *aws.Config) *InputService4ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice4protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &InputService4ProtocolTest{service} -} - -// newRequest creates a new request for a InputService4ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService4TestCaseOperation1 = "OperationName" - -// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation. -func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputShape) (req *aws.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService4TestCaseOperation1, - } - - if input == nil { - input = &InputService4TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService4TestShapeInputService4TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputShape) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { - req, out := c.InputService4TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -const opInputService4TestCaseOperation2 = "OperationName" - -// InputService4TestCaseOperation2Request generates a request for the InputService4TestCaseOperation2 operation. -func (c *InputService4ProtocolTest) InputService4TestCaseOperation2Request(input *InputService4TestShapeInputShape) (req *aws.Request, output *InputService4TestShapeInputService4TestCaseOperation2Output) { - op := &aws.Operation{ - Name: opInputService4TestCaseOperation2, - } - - if input == nil { - input = &InputService4TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService4TestShapeInputService4TestCaseOperation2Output{} - req.Data = output - return -} - -func (c *InputService4ProtocolTest) InputService4TestCaseOperation2(input *InputService4TestShapeInputShape) (*InputService4TestShapeInputService4TestCaseOperation2Output, error) { - req, out := c.InputService4TestCaseOperation2Request(input) - err := req.Send() - return out, err -} - -type InputService4TestShapeInputService4TestCaseOperation1Output struct { - metadataInputService4TestShapeInputService4TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService4TestShapeInputService4TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService4TestShapeInputService4TestCaseOperation2Output struct { - metadataInputService4TestShapeInputService4TestCaseOperation2Output `json:"-" xml:"-"` -} - -type metadataInputService4TestShapeInputService4TestCaseOperation2Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService4TestShapeInputShape struct { - ListArg []*string `type:"list" flattened:"true"` - - ScalarArg *string `type:"string"` - - metadataInputService4TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService4TestShapeInputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService5ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService5ProtocolTest client. -func NewInputService5ProtocolTest(config *aws.Config) *InputService5ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice5protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &InputService5ProtocolTest{service} -} - -// newRequest creates a new request for a InputService5ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService5ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService5TestCaseOperation1 = "OperationName" - -// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation. -func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputShape) (req *aws.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService5TestCaseOperation1, - } - - if input == nil { - input = &InputService5TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService5TestShapeInputService5TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { - req, out := c.InputService5TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -const opInputService5TestCaseOperation2 = "OperationName" - -// InputService5TestCaseOperation2Request generates a request for the InputService5TestCaseOperation2 operation. -func (c *InputService5ProtocolTest) InputService5TestCaseOperation2Request(input *InputService5TestShapeInputShape) (req *aws.Request, output *InputService5TestShapeInputService5TestCaseOperation2Output) { - op := &aws.Operation{ - Name: opInputService5TestCaseOperation2, - } - - if input == nil { - input = &InputService5TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService5TestShapeInputService5TestCaseOperation2Output{} - req.Data = output - return -} - -func (c *InputService5ProtocolTest) InputService5TestCaseOperation2(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation2Output, error) { - req, out := c.InputService5TestCaseOperation2Request(input) - err := req.Send() - return out, err -} - -type InputService5TestShapeInputService5TestCaseOperation1Output struct { - metadataInputService5TestShapeInputService5TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService5TestShapeInputService5TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService5TestShapeInputService5TestCaseOperation2Output struct { - metadataInputService5TestShapeInputService5TestCaseOperation2Output `json:"-" xml:"-"` -} - -type metadataInputService5TestShapeInputService5TestCaseOperation2Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService5TestShapeInputShape struct { - MapArg map[string]*string `type:"map"` - - metadataInputService5TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService5TestShapeInputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService6ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService6ProtocolTest client. -func NewInputService6ProtocolTest(config *aws.Config) *InputService6ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice6protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &InputService6ProtocolTest{service} -} - -// newRequest creates a new request for a InputService6ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService6ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService6TestCaseOperation1 = "OperationName" - -// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation. -func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputShape) (req *aws.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService6TestCaseOperation1, - } - - if input == nil { - input = &InputService6TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService6TestShapeInputService6TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputShape) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { - req, out := c.InputService6TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type InputService6TestShapeInputService6TestCaseOperation1Output struct { - metadataInputService6TestShapeInputService6TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService6TestShapeInputService6TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService6TestShapeInputShape struct { - MapArg map[string]*string `locationNameKey:"TheKey" locationNameValue:"TheValue" type:"map"` - - metadataInputService6TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService6TestShapeInputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService7ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService7ProtocolTest client. -func NewInputService7ProtocolTest(config *aws.Config) *InputService7ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice7protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &InputService7ProtocolTest{service} -} - -// newRequest creates a new request for a InputService7ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService7ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService7TestCaseOperation1 = "OperationName" - -// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation. -func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputShape) (req *aws.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService7TestCaseOperation1, - } - - if input == nil { - input = &InputService7TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService7TestShapeInputService7TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputShape) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { - req, out := c.InputService7TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type InputService7TestShapeInputService7TestCaseOperation1Output struct { - metadataInputService7TestShapeInputService7TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService7TestShapeInputService7TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService7TestShapeInputShape struct { - BlobArg []byte `type:"blob"` - - metadataInputService7TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService7TestShapeInputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService8ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService8ProtocolTest client. -func NewInputService8ProtocolTest(config *aws.Config) *InputService8ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice8protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &InputService8ProtocolTest{service} -} - -// newRequest creates a new request for a InputService8ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService8TestCaseOperation1 = "OperationName" - -// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation. -func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputShape) (req *aws.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService8TestCaseOperation1, - } - - if input == nil { - input = &InputService8TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService8TestShapeInputService8TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputShape) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) { - req, out := c.InputService8TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type InputService8TestShapeInputService8TestCaseOperation1Output struct { - metadataInputService8TestShapeInputService8TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService8TestShapeInputService8TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService8TestShapeInputShape struct { - TimeArg *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - metadataInputService8TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService8TestShapeInputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService9ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService9ProtocolTest client. -func NewInputService9ProtocolTest(config *aws.Config) *InputService9ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice9protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &InputService9ProtocolTest{service} -} - -// newRequest creates a new request for a InputService9ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService9ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService9TestCaseOperation1 = "OperationName" - -// InputService9TestCaseOperation1Request generates a request for the InputService9TestCaseOperation1 operation. -func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService9TestCaseOperation1, - } - - if input == nil { - input = &InputService9TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService9TestShapeInputService9TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) { - req, out := c.InputService9TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -const opInputService9TestCaseOperation2 = "OperationName" - -// InputService9TestCaseOperation2Request generates a request for the InputService9TestCaseOperation2 operation. -func (c *InputService9ProtocolTest) InputService9TestCaseOperation2Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation2Output) { - op := &aws.Operation{ - Name: opInputService9TestCaseOperation2, - } - - if input == nil { - input = &InputService9TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService9TestShapeInputService9TestCaseOperation2Output{} - req.Data = output - return -} - -func (c *InputService9ProtocolTest) InputService9TestCaseOperation2(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation2Output, error) { - req, out := c.InputService9TestCaseOperation2Request(input) - err := req.Send() - return out, err -} - -const opInputService9TestCaseOperation3 = "OperationName" - -// InputService9TestCaseOperation3Request generates a request for the InputService9TestCaseOperation3 operation. -func (c *InputService9ProtocolTest) InputService9TestCaseOperation3Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation3Output) { - op := &aws.Operation{ - Name: opInputService9TestCaseOperation3, - } - - if input == nil { - input = &InputService9TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService9TestShapeInputService9TestCaseOperation3Output{} - req.Data = output - return -} - -func (c *InputService9ProtocolTest) InputService9TestCaseOperation3(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation3Output, error) { - req, out := c.InputService9TestCaseOperation3Request(input) - err := req.Send() - return out, err -} - -const opInputService9TestCaseOperation4 = "OperationName" - -// InputService9TestCaseOperation4Request generates a request for the InputService9TestCaseOperation4 operation. -func (c *InputService9ProtocolTest) InputService9TestCaseOperation4Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation4Output) { - op := &aws.Operation{ - Name: opInputService9TestCaseOperation4, - } - - if input == nil { - input = &InputService9TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService9TestShapeInputService9TestCaseOperation4Output{} - req.Data = output - return -} - -func (c *InputService9ProtocolTest) InputService9TestCaseOperation4(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation4Output, error) { - req, out := c.InputService9TestCaseOperation4Request(input) - err := req.Send() - return out, err -} - -const opInputService9TestCaseOperation5 = "OperationName" - -// InputService9TestCaseOperation5Request generates a request for the InputService9TestCaseOperation5 operation. -func (c *InputService9ProtocolTest) InputService9TestCaseOperation5Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation5Output) { - op := &aws.Operation{ - Name: opInputService9TestCaseOperation5, - } - - if input == nil { - input = &InputService9TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService9TestShapeInputService9TestCaseOperation5Output{} - req.Data = output - return -} - -func (c *InputService9ProtocolTest) InputService9TestCaseOperation5(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation5Output, error) { - req, out := c.InputService9TestCaseOperation5Request(input) - err := req.Send() - return out, err -} - -const opInputService9TestCaseOperation6 = "OperationName" - -// InputService9TestCaseOperation6Request generates a request for the InputService9TestCaseOperation6 operation. -func (c *InputService9ProtocolTest) InputService9TestCaseOperation6Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation6Output) { - op := &aws.Operation{ - Name: opInputService9TestCaseOperation6, - } - - if input == nil { - input = &InputService9TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService9TestShapeInputService9TestCaseOperation6Output{} - req.Data = output - return -} - -func (c *InputService9ProtocolTest) InputService9TestCaseOperation6(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation6Output, error) { - req, out := c.InputService9TestCaseOperation6Request(input) - err := req.Send() - return out, err -} - -type InputService9TestShapeInputService9TestCaseOperation1Output struct { - metadataInputService9TestShapeInputService9TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService9TestShapeInputService9TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService9TestShapeInputService9TestCaseOperation2Output struct { - metadataInputService9TestShapeInputService9TestCaseOperation2Output `json:"-" xml:"-"` -} - -type metadataInputService9TestShapeInputService9TestCaseOperation2Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService9TestShapeInputService9TestCaseOperation3Output struct { - metadataInputService9TestShapeInputService9TestCaseOperation3Output `json:"-" xml:"-"` -} - -type metadataInputService9TestShapeInputService9TestCaseOperation3Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService9TestShapeInputService9TestCaseOperation4Output struct { - metadataInputService9TestShapeInputService9TestCaseOperation4Output `json:"-" xml:"-"` -} - -type metadataInputService9TestShapeInputService9TestCaseOperation4Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService9TestShapeInputService9TestCaseOperation5Output struct { - metadataInputService9TestShapeInputService9TestCaseOperation5Output `json:"-" xml:"-"` -} - -type metadataInputService9TestShapeInputService9TestCaseOperation5Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService9TestShapeInputService9TestCaseOperation6Output struct { - metadataInputService9TestShapeInputService9TestCaseOperation6Output `json:"-" xml:"-"` -} - -type metadataInputService9TestShapeInputService9TestCaseOperation6Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService9TestShapeInputShape struct { - RecursiveStruct *InputService9TestShapeRecursiveStructType `type:"structure"` - - metadataInputService9TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService9TestShapeInputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService9TestShapeRecursiveStructType struct { - NoRecurse *string `type:"string"` - - RecursiveList []*InputService9TestShapeRecursiveStructType `type:"list"` - - RecursiveMap map[string]*InputService9TestShapeRecursiveStructType `type:"map"` - - RecursiveStruct *InputService9TestShapeRecursiveStructType `type:"structure"` - - metadataInputService9TestShapeRecursiveStructType `json:"-" xml:"-"` -} - -type metadataInputService9TestShapeRecursiveStructType struct { - SDKShapeTraits bool `type:"structure"` -} - -// -// Tests begin here -// - -func TestInputService1ProtocolTestScalarMembersCase1(t *testing.T) { - svc := NewInputService1ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService1TestShapeInputShape{ - Bar: aws.String("val2"), - Foo: aws.String("val1"), - } - req, _ := svc.InputService1TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - query.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body, _ := ioutil.ReadAll(r.Body) - assert.Equal(t, util.Trim(`Action=OperationName&Bar=val2&Foo=val1&Version=2014-01-01`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService2ProtocolTestNestedStructureMembersCase1(t *testing.T) { - svc := NewInputService2ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService2TestShapeInputShape{ - StructArg: &InputService2TestShapeStructType{ - ScalarArg: aws.String("foo"), - }, - } - req, _ := svc.InputService2TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - query.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body, _ := ioutil.ReadAll(r.Body) - assert.Equal(t, util.Trim(`Action=OperationName&StructArg.ScalarArg=foo&Version=2014-01-01`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService3ProtocolTestListTypesCase1(t *testing.T) { - svc := NewInputService3ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService3TestShapeInputShape{ - ListArg: []*string{ - aws.String("foo"), - aws.String("bar"), - aws.String("baz"), - }, - } - req, _ := svc.InputService3TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - query.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body, _ := ioutil.ReadAll(r.Body) - assert.Equal(t, util.Trim(`Action=OperationName&ListArg.member.1=foo&ListArg.member.2=bar&ListArg.member.3=baz&Version=2014-01-01`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService3ProtocolTestListTypesCase2(t *testing.T) { - svc := NewInputService3ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService3TestShapeInputShape{ - ListArg: []*string{}, - } - req, _ := svc.InputService3TestCaseOperation2Request(input) - r := req.HTTPRequest - - // build request - query.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body, _ := ioutil.ReadAll(r.Body) - assert.Equal(t, util.Trim(`Action=OperationName&ListArg=&Version=2014-01-01`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService4ProtocolTestFlattenedListCase1(t *testing.T) { - svc := NewInputService4ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService4TestShapeInputShape{ - ListArg: []*string{ - aws.String("a"), - aws.String("b"), - aws.String("c"), - }, - ScalarArg: aws.String("foo"), - } - req, _ := svc.InputService4TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - query.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body, _ := ioutil.ReadAll(r.Body) - assert.Equal(t, util.Trim(`Action=OperationName&ListArg.1=a&ListArg.2=b&ListArg.3=c&ScalarArg=foo&Version=2014-01-01`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService4ProtocolTestFlattenedListCase2(t *testing.T) { - svc := NewInputService4ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService4TestShapeInputShape{ - ListArg: []*string{}, - ScalarArg: aws.String("foo"), - } - req, _ := svc.InputService4TestCaseOperation2Request(input) - r := req.HTTPRequest - - // build request - query.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body, _ := ioutil.ReadAll(r.Body) - assert.Equal(t, util.Trim(`Action=OperationName&ListArg=&ScalarArg=foo&Version=2014-01-01`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService5ProtocolTestSerializeMapTypeCase1(t *testing.T) { - svc := NewInputService5ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService5TestShapeInputShape{ - MapArg: map[string]*string{ - "key1": aws.String("val1"), - "key2": aws.String("val2"), - }, - } - req, _ := svc.InputService5TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - query.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body, _ := ioutil.ReadAll(r.Body) - assert.Equal(t, util.Trim(`Action=OperationName&MapArg.entry.1.key=key1&MapArg.entry.1.value=val1&MapArg.entry.2.key=key2&MapArg.entry.2.value=val2&Version=2014-01-01`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService5ProtocolTestSerializeMapTypeCase2(t *testing.T) { - svc := NewInputService5ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService5TestShapeInputShape{ - MapArg: map[string]*string{}, - } - req, _ := svc.InputService5TestCaseOperation2Request(input) - r := req.HTTPRequest - - // build request - query.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body, _ := ioutil.ReadAll(r.Body) - assert.Equal(t, util.Trim(`Action=OperationName&MapArg=&Version=2014-01-01`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService6ProtocolTestSerializeMapTypeWithLocationNameCase1(t *testing.T) { - svc := NewInputService6ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService6TestShapeInputShape{ - MapArg: map[string]*string{ - "key1": aws.String("val1"), - "key2": aws.String("val2"), - }, - } - req, _ := svc.InputService6TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - query.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body, _ := ioutil.ReadAll(r.Body) - assert.Equal(t, util.Trim(`Action=OperationName&MapArg.entry.1.TheKey=key1&MapArg.entry.1.TheValue=val1&MapArg.entry.2.TheKey=key2&MapArg.entry.2.TheValue=val2&Version=2014-01-01`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService7ProtocolTestBase64EncodedBlobsCase1(t *testing.T) { - svc := NewInputService7ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService7TestShapeInputShape{ - BlobArg: []byte("foo"), - } - req, _ := svc.InputService7TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - query.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body, _ := ioutil.ReadAll(r.Body) - assert.Equal(t, util.Trim(`Action=OperationName&BlobArg=Zm9v&Version=2014-01-01`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService8ProtocolTestTimestampValuesCase1(t *testing.T) { - svc := NewInputService8ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService8TestShapeInputShape{ - TimeArg: aws.Time(time.Unix(1422172800, 0)), - } - req, _ := svc.InputService8TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - query.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body, _ := ioutil.ReadAll(r.Body) - assert.Equal(t, util.Trim(`Action=OperationName&TimeArg=2015-01-25T08%3A00%3A00Z&Version=2014-01-01`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService9ProtocolTestRecursiveShapesCase1(t *testing.T) { - svc := NewInputService9ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService9TestShapeInputShape{ - RecursiveStruct: &InputService9TestShapeRecursiveStructType{ - NoRecurse: aws.String("foo"), - }, - } - req, _ := svc.InputService9TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - query.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body, _ := ioutil.ReadAll(r.Body) - assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.NoRecurse=foo&Version=2014-01-01`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService9ProtocolTestRecursiveShapesCase2(t *testing.T) { - svc := NewInputService9ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService9TestShapeInputShape{ - RecursiveStruct: &InputService9TestShapeRecursiveStructType{ - RecursiveStruct: &InputService9TestShapeRecursiveStructType{ - NoRecurse: aws.String("foo"), - }, - }, - } - req, _ := svc.InputService9TestCaseOperation2Request(input) - r := req.HTTPRequest - - // build request - query.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body, _ := ioutil.ReadAll(r.Body) - assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.RecursiveStruct.NoRecurse=foo&Version=2014-01-01`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService9ProtocolTestRecursiveShapesCase3(t *testing.T) { - svc := NewInputService9ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService9TestShapeInputShape{ - RecursiveStruct: &InputService9TestShapeRecursiveStructType{ - RecursiveStruct: &InputService9TestShapeRecursiveStructType{ - RecursiveStruct: &InputService9TestShapeRecursiveStructType{ - RecursiveStruct: &InputService9TestShapeRecursiveStructType{ - NoRecurse: aws.String("foo"), - }, - }, - }, - }, - } - req, _ := svc.InputService9TestCaseOperation3Request(input) - r := req.HTTPRequest - - // build request - query.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body, _ := ioutil.ReadAll(r.Body) - assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.RecursiveStruct.RecursiveStruct.RecursiveStruct.NoRecurse=foo&Version=2014-01-01`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService9ProtocolTestRecursiveShapesCase4(t *testing.T) { - svc := NewInputService9ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService9TestShapeInputShape{ - RecursiveStruct: &InputService9TestShapeRecursiveStructType{ - RecursiveList: []*InputService9TestShapeRecursiveStructType{ - { - NoRecurse: aws.String("foo"), - }, - { - NoRecurse: aws.String("bar"), - }, - }, - }, - } - req, _ := svc.InputService9TestCaseOperation4Request(input) - r := req.HTTPRequest - - // build request - query.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body, _ := ioutil.ReadAll(r.Body) - assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.NoRecurse=bar&Version=2014-01-01`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService9ProtocolTestRecursiveShapesCase5(t *testing.T) { - svc := NewInputService9ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService9TestShapeInputShape{ - RecursiveStruct: &InputService9TestShapeRecursiveStructType{ - RecursiveList: []*InputService9TestShapeRecursiveStructType{ - { - NoRecurse: aws.String("foo"), - }, - { - RecursiveStruct: &InputService9TestShapeRecursiveStructType{ - NoRecurse: aws.String("bar"), - }, - }, - }, - }, - } - req, _ := svc.InputService9TestCaseOperation5Request(input) - r := req.HTTPRequest - - // build request - query.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body, _ := ioutil.ReadAll(r.Body) - assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.RecursiveList.member.1.NoRecurse=foo&RecursiveStruct.RecursiveList.member.2.RecursiveStruct.NoRecurse=bar&Version=2014-01-01`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService9ProtocolTestRecursiveShapesCase6(t *testing.T) { - svc := NewInputService9ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService9TestShapeInputShape{ - RecursiveStruct: &InputService9TestShapeRecursiveStructType{ - RecursiveMap: map[string]*InputService9TestShapeRecursiveStructType{ - "bar": { - NoRecurse: aws.String("bar"), - }, - "foo": { - NoRecurse: aws.String("foo"), - }, - }, - }, - } - req, _ := svc.InputService9TestCaseOperation6Request(input) - r := req.HTTPRequest - - // build request - query.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body, _ := ioutil.ReadAll(r.Body) - assert.Equal(t, util.Trim(`Action=OperationName&RecursiveStruct.RecursiveMap.entry.1.key=bar&RecursiveStruct.RecursiveMap.entry.1.value.NoRecurse=bar&RecursiveStruct.RecursiveMap.entry.2.key=foo&RecursiveStruct.RecursiveMap.entry.2.value.NoRecurse=foo&Version=2014-01-01`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal.go deleted file mode 100644 index e8cfa92..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal.go +++ /dev/null @@ -1,29 +0,0 @@ -package query - -//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/output/query.json unmarshal_test.go - -import ( - "encoding/xml" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil" -) - -// Unmarshal unmarshals a response for an AWS Query service. -func Unmarshal(r *aws.Request) { - defer r.HTTPResponse.Body.Close() - if r.DataFilled() { - decoder := xml.NewDecoder(r.HTTPResponse.Body) - err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") - if err != nil { - r.Error = awserr.New("SerializationError", "failed decoding Query response", err) - return - } - } -} - -// UnmarshalMeta unmarshals header response values for an AWS Query service. -func UnmarshalMeta(r *aws.Request) { - // TODO implement unmarshaling of request IDs -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_error.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_error.go deleted file mode 100644 index d88ee33..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_error.go +++ /dev/null @@ -1,33 +0,0 @@ -package query - -import ( - "encoding/xml" - "io" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" -) - -type xmlErrorResponse struct { - XMLName xml.Name `xml:"ErrorResponse"` - Code string `xml:"Error>Code"` - Message string `xml:"Error>Message"` - RequestID string `xml:"RequestId"` -} - -// UnmarshalError unmarshals an error response for an AWS Query service. -func UnmarshalError(r *aws.Request) { - defer r.HTTPResponse.Body.Close() - - resp := &xmlErrorResponse{} - err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) - if err != nil && err != io.EOF { - r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err) - } else { - r.Error = awserr.NewRequestFailure( - awserr.New(resp.Code, resp.Message, nil), - r.HTTPResponse.StatusCode, - resp.RequestID, - ) - } -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_test.go deleted file mode 100644 index a44060c..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/unmarshal_test.go +++ /dev/null @@ -1,1418 +0,0 @@ -package query_test - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "io" - "io/ioutil" - "net/http" - "net/url" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/internal/protocol/query" - "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil" - "github.com/aws/aws-sdk-go/internal/signer/v4" - "github.com/aws/aws-sdk-go/internal/util" - "github.com/stretchr/testify/assert" -) - -var _ bytes.Buffer // always import bytes -var _ http.Request -var _ json.Marshaler -var _ time.Time -var _ xmlutil.XMLNode -var _ xml.Attr -var _ = ioutil.Discard -var _ = util.Trim("") -var _ = url.Values{} -var _ = io.EOF - -type OutputService1ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService1ProtocolTest client. -func NewOutputService1ProtocolTest(config *aws.Config) *OutputService1ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice1protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &OutputService1ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService1TestCaseOperation1 = "OperationName" - -// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation. -func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *aws.Request, output *OutputService1TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService1TestCaseOperation1, - } - - if input == nil { - input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService1TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputShape, error) { - req, out := c.OutputService1TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { - metadataOutputService1TestShapeOutputService1TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService1TestShapeOutputService1TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService1TestShapeOutputShape struct { - Char *string `type:"character"` - - Double *float64 `type:"double"` - - FalseBool *bool `type:"boolean"` - - Float *float64 `type:"float"` - - Long *int64 `type:"long"` - - Num *int64 `locationName:"FooNum" type:"integer"` - - Str *string `type:"string"` - - Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - TrueBool *bool `type:"boolean"` - - metadataOutputService1TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService1TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService2ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService2ProtocolTest client. -func NewOutputService2ProtocolTest(config *aws.Config) *OutputService2ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice2protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &OutputService2ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService2TestCaseOperation1 = "OperationName" - -// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation. -func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *aws.Request, output *OutputService2TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService2TestCaseOperation1, - } - - if input == nil { - input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService2TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputShape, error) { - req, out := c.OutputService2TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { - metadataOutputService2TestShapeOutputService2TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService2TestShapeOutputService2TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService2TestShapeOutputShape struct { - Num *int64 `type:"integer"` - - Str *string `type:"string"` - - metadataOutputService2TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService2TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService3ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService3ProtocolTest client. -func NewOutputService3ProtocolTest(config *aws.Config) *OutputService3ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice3protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &OutputService3ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService3TestCaseOperation1 = "OperationName" - -// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation. -func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *aws.Request, output *OutputService3TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService3TestCaseOperation1, - } - - if input == nil { - input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService3TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputShape, error) { - req, out := c.OutputService3TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { - metadataOutputService3TestShapeOutputService3TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService3TestShapeOutputService3TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService3TestShapeOutputShape struct { - Blob []byte `type:"blob"` - - metadataOutputService3TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService3TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService4ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService4ProtocolTest client. -func NewOutputService4ProtocolTest(config *aws.Config) *OutputService4ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice4protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &OutputService4ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService4TestCaseOperation1 = "OperationName" - -// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation. -func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *aws.Request, output *OutputService4TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService4TestCaseOperation1, - } - - if input == nil { - input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService4TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputShape, error) { - req, out := c.OutputService4TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { - metadataOutputService4TestShapeOutputService4TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService4TestShapeOutputService4TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService4TestShapeOutputShape struct { - ListMember []*string `type:"list"` - - metadataOutputService4TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService4TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService5ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService5ProtocolTest client. -func NewOutputService5ProtocolTest(config *aws.Config) *OutputService5ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice5protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &OutputService5ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService5ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService5TestCaseOperation1 = "OperationName" - -// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation. -func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *aws.Request, output *OutputService5TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService5TestCaseOperation1, - } - - if input == nil { - input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService5TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputShape, error) { - req, out := c.OutputService5TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { - metadataOutputService5TestShapeOutputService5TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService5TestShapeOutputService5TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService5TestShapeOutputShape struct { - ListMember []*string `locationNameList:"item" type:"list"` - - metadataOutputService5TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService5TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService6ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService6ProtocolTest client. -func NewOutputService6ProtocolTest(config *aws.Config) *OutputService6ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice6protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &OutputService6ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService6ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService6TestCaseOperation1 = "OperationName" - -// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation. -func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *aws.Request, output *OutputService6TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService6TestCaseOperation1, - } - - if input == nil { - input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService6TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputShape, error) { - req, out := c.OutputService6TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { - metadataOutputService6TestShapeOutputService6TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService6TestShapeOutputService6TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService6TestShapeOutputShape struct { - ListMember []*string `type:"list" flattened:"true"` - - metadataOutputService6TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService6TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService7ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService7ProtocolTest client. -func NewOutputService7ProtocolTest(config *aws.Config) *OutputService7ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice7protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &OutputService7ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService7ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService7TestCaseOperation1 = "OperationName" - -// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation. -func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *aws.Request, output *OutputService7TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService7TestCaseOperation1, - } - - if input == nil { - input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService7TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputShape, error) { - req, out := c.OutputService7TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService7TestShapeOutputService7TestCaseOperation1Input struct { - metadataOutputService7TestShapeOutputService7TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService7TestShapeOutputService7TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService7TestShapeOutputShape struct { - ListMember []*string `type:"list" flattened:"true"` - - metadataOutputService7TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService7TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService8ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService8ProtocolTest client. -func NewOutputService8ProtocolTest(config *aws.Config) *OutputService8ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice8protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &OutputService8ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService8TestCaseOperation1 = "OperationName" - -// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation. -func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *aws.Request, output *OutputService8TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService8TestCaseOperation1, - } - - if input == nil { - input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService8TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputShape, error) { - req, out := c.OutputService8TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService8TestShapeOutputService8TestCaseOperation1Input struct { - metadataOutputService8TestShapeOutputService8TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService8TestShapeOutputService8TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService8TestShapeOutputShape struct { - List []*OutputService8TestShapeStructureShape `type:"list"` - - metadataOutputService8TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService8TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService8TestShapeStructureShape struct { - Bar *string `type:"string"` - - Baz *string `type:"string"` - - Foo *string `type:"string"` - - metadataOutputService8TestShapeStructureShape `json:"-" xml:"-"` -} - -type metadataOutputService8TestShapeStructureShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService9ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService9ProtocolTest client. -func NewOutputService9ProtocolTest(config *aws.Config) *OutputService9ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice9protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &OutputService9ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService9ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService9TestCaseOperation1 = "OperationName" - -// OutputService9TestCaseOperation1Request generates a request for the OutputService9TestCaseOperation1 operation. -func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *aws.Request, output *OutputService9TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService9TestCaseOperation1, - } - - if input == nil { - input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService9TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputShape, error) { - req, out := c.OutputService9TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService9TestShapeOutputService9TestCaseOperation1Input struct { - metadataOutputService9TestShapeOutputService9TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService9TestShapeOutputService9TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService9TestShapeOutputShape struct { - List []*OutputService9TestShapeStructureShape `type:"list" flattened:"true"` - - metadataOutputService9TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService9TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService9TestShapeStructureShape struct { - Bar *string `type:"string"` - - Baz *string `type:"string"` - - Foo *string `type:"string"` - - metadataOutputService9TestShapeStructureShape `json:"-" xml:"-"` -} - -type metadataOutputService9TestShapeStructureShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService10ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService10ProtocolTest client. -func NewOutputService10ProtocolTest(config *aws.Config) *OutputService10ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice10protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &OutputService10ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService10ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService10ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService10TestCaseOperation1 = "OperationName" - -// OutputService10TestCaseOperation1Request generates a request for the OutputService10TestCaseOperation1 operation. -func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1Request(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (req *aws.Request, output *OutputService10TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService10TestCaseOperation1, - } - - if input == nil { - input = &OutputService10TestShapeOutputService10TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService10TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (*OutputService10TestShapeOutputShape, error) { - req, out := c.OutputService10TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService10TestShapeOutputService10TestCaseOperation1Input struct { - metadataOutputService10TestShapeOutputService10TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService10TestShapeOutputService10TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService10TestShapeOutputShape struct { - List []*string `locationNameList:"NamedList" type:"list" flattened:"true"` - - metadataOutputService10TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService10TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService11ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService11ProtocolTest client. -func NewOutputService11ProtocolTest(config *aws.Config) *OutputService11ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice11protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &OutputService11ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService11ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService11ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService11TestCaseOperation1 = "OperationName" - -// OutputService11TestCaseOperation1Request generates a request for the OutputService11TestCaseOperation1 operation. -func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1Request(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (req *aws.Request, output *OutputService11TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService11TestCaseOperation1, - } - - if input == nil { - input = &OutputService11TestShapeOutputService11TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService11TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (*OutputService11TestShapeOutputShape, error) { - req, out := c.OutputService11TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService11TestShapeOutputService11TestCaseOperation1Input struct { - metadataOutputService11TestShapeOutputService11TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService11TestShapeOutputService11TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService11TestShapeOutputShape struct { - Map map[string]*OutputService11TestShapeStructType `type:"map"` - - metadataOutputService11TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService11TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService11TestShapeStructType struct { - Foo *string `locationName:"foo" type:"string"` - - metadataOutputService11TestShapeStructType `json:"-" xml:"-"` -} - -type metadataOutputService11TestShapeStructType struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService12ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService12ProtocolTest client. -func NewOutputService12ProtocolTest(config *aws.Config) *OutputService12ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice12protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &OutputService12ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService12ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService12ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService12TestCaseOperation1 = "OperationName" - -// OutputService12TestCaseOperation1Request generates a request for the OutputService12TestCaseOperation1 operation. -func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1Request(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (req *aws.Request, output *OutputService12TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService12TestCaseOperation1, - } - - if input == nil { - input = &OutputService12TestShapeOutputService12TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService12TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (*OutputService12TestShapeOutputShape, error) { - req, out := c.OutputService12TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService12TestShapeOutputService12TestCaseOperation1Input struct { - metadataOutputService12TestShapeOutputService12TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService12TestShapeOutputService12TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService12TestShapeOutputShape struct { - Map map[string]*string `type:"map" flattened:"true"` - - metadataOutputService12TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService12TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService13ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService13ProtocolTest client. -func NewOutputService13ProtocolTest(config *aws.Config) *OutputService13ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice13protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &OutputService13ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService13ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService13ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService13TestCaseOperation1 = "OperationName" - -// OutputService13TestCaseOperation1Request generates a request for the OutputService13TestCaseOperation1 operation. -func (c *OutputService13ProtocolTest) OutputService13TestCaseOperation1Request(input *OutputService13TestShapeOutputService13TestCaseOperation1Input) (req *aws.Request, output *OutputService13TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService13TestCaseOperation1, - } - - if input == nil { - input = &OutputService13TestShapeOutputService13TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService13TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService13ProtocolTest) OutputService13TestCaseOperation1(input *OutputService13TestShapeOutputService13TestCaseOperation1Input) (*OutputService13TestShapeOutputShape, error) { - req, out := c.OutputService13TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService13TestShapeOutputService13TestCaseOperation1Input struct { - metadataOutputService13TestShapeOutputService13TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService13TestShapeOutputService13TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService13TestShapeOutputShape struct { - Map map[string]*string `locationName:"Attribute" locationNameKey:"Name" locationNameValue:"Value" type:"map" flattened:"true"` - - metadataOutputService13TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService13TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService14ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService14ProtocolTest client. -func NewOutputService14ProtocolTest(config *aws.Config) *OutputService14ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice14protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(query.Build) - service.Handlers.Unmarshal.PushBack(query.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(query.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(query.UnmarshalError) - - return &OutputService14ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService14ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService14ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService14TestCaseOperation1 = "OperationName" - -// OutputService14TestCaseOperation1Request generates a request for the OutputService14TestCaseOperation1 operation. -func (c *OutputService14ProtocolTest) OutputService14TestCaseOperation1Request(input *OutputService14TestShapeOutputService14TestCaseOperation1Input) (req *aws.Request, output *OutputService14TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService14TestCaseOperation1, - } - - if input == nil { - input = &OutputService14TestShapeOutputService14TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService14TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService14ProtocolTest) OutputService14TestCaseOperation1(input *OutputService14TestShapeOutputService14TestCaseOperation1Input) (*OutputService14TestShapeOutputShape, error) { - req, out := c.OutputService14TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService14TestShapeOutputService14TestCaseOperation1Input struct { - metadataOutputService14TestShapeOutputService14TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService14TestShapeOutputService14TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService14TestShapeOutputShape struct { - Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map" flattened:"true"` - - metadataOutputService14TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService14TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -// -// Tests begin here -// - -func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { - svc := NewOutputService1ProtocolTest(nil) - - buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200a2015-01-25T08:00:00Zrequest-id")) - req, out := svc.OutputService1TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - query.UnmarshalMeta(req) - query.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "a", *out.Char) - assert.Equal(t, 1.3, *out.Double) - assert.Equal(t, false, *out.FalseBool) - assert.Equal(t, 1.2, *out.Float) - assert.Equal(t, int64(200), *out.Long) - assert.Equal(t, int64(123), *out.Num) - assert.Equal(t, "myname", *out.Str) - assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) - assert.Equal(t, true, *out.TrueBool) - -} - -func TestOutputService2ProtocolTestNotAllMembersInResponseCase1(t *testing.T) { - svc := NewOutputService2ProtocolTest(nil) - - buf := bytes.NewReader([]byte("mynamerequest-id")) - req, out := svc.OutputService2TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - query.UnmarshalMeta(req) - query.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "myname", *out.Str) - -} - -func TestOutputService3ProtocolTestBlobCase1(t *testing.T) { - svc := NewOutputService3ProtocolTest(nil) - - buf := bytes.NewReader([]byte("dmFsdWU=requestid")) - req, out := svc.OutputService3TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - query.UnmarshalMeta(req) - query.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "value", string(out.Blob)) - -} - -func TestOutputService4ProtocolTestListsCase1(t *testing.T) { - svc := NewOutputService4ProtocolTest(nil) - - buf := bytes.NewReader([]byte("abc123requestid")) - req, out := svc.OutputService4TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - query.UnmarshalMeta(req) - query.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "abc", *out.ListMember[0]) - assert.Equal(t, "123", *out.ListMember[1]) - -} - -func TestOutputService5ProtocolTestListWithCustomMemberNameCase1(t *testing.T) { - svc := NewOutputService5ProtocolTest(nil) - - buf := bytes.NewReader([]byte("abc123requestid")) - req, out := svc.OutputService5TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - query.UnmarshalMeta(req) - query.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "abc", *out.ListMember[0]) - assert.Equal(t, "123", *out.ListMember[1]) - -} - -func TestOutputService6ProtocolTestFlattenedListCase1(t *testing.T) { - svc := NewOutputService6ProtocolTest(nil) - - buf := bytes.NewReader([]byte("abc123requestid")) - req, out := svc.OutputService6TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - query.UnmarshalMeta(req) - query.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "abc", *out.ListMember[0]) - assert.Equal(t, "123", *out.ListMember[1]) - -} - -func TestOutputService7ProtocolTestFlattenedSingleElementListCase1(t *testing.T) { - svc := NewOutputService7ProtocolTest(nil) - - buf := bytes.NewReader([]byte("abcrequestid")) - req, out := svc.OutputService7TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - query.UnmarshalMeta(req) - query.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "abc", *out.ListMember[0]) - -} - -func TestOutputService8ProtocolTestListOfStructuresCase1(t *testing.T) { - svc := NewOutputService8ProtocolTest(nil) - - buf := bytes.NewReader([]byte("firstfoofirstbarfirstbazsecondfoosecondbarsecondbazrequestid")) - req, out := svc.OutputService8TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - query.UnmarshalMeta(req) - query.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "firstbar", *out.List[0].Bar) - assert.Equal(t, "firstbaz", *out.List[0].Baz) - assert.Equal(t, "firstfoo", *out.List[0].Foo) - assert.Equal(t, "secondbar", *out.List[1].Bar) - assert.Equal(t, "secondbaz", *out.List[1].Baz) - assert.Equal(t, "secondfoo", *out.List[1].Foo) - -} - -func TestOutputService9ProtocolTestFlattenedListOfStructuresCase1(t *testing.T) { - svc := NewOutputService9ProtocolTest(nil) - - buf := bytes.NewReader([]byte("firstfoofirstbarfirstbazsecondfoosecondbarsecondbazrequestid")) - req, out := svc.OutputService9TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - query.UnmarshalMeta(req) - query.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "firstbar", *out.List[0].Bar) - assert.Equal(t, "firstbaz", *out.List[0].Baz) - assert.Equal(t, "firstfoo", *out.List[0].Foo) - assert.Equal(t, "secondbar", *out.List[1].Bar) - assert.Equal(t, "secondbaz", *out.List[1].Baz) - assert.Equal(t, "secondfoo", *out.List[1].Foo) - -} - -func TestOutputService10ProtocolTestFlattenedListWithLocationNameCase1(t *testing.T) { - svc := NewOutputService10ProtocolTest(nil) - - buf := bytes.NewReader([]byte("abrequestid")) - req, out := svc.OutputService10TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - query.UnmarshalMeta(req) - query.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "a", *out.List[0]) - assert.Equal(t, "b", *out.List[1]) - -} - -func TestOutputService11ProtocolTestNormalMapCase1(t *testing.T) { - svc := NewOutputService11ProtocolTest(nil) - - buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) - req, out := svc.OutputService11TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - query.UnmarshalMeta(req) - query.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "bam", *out.Map["baz"].Foo) - assert.Equal(t, "bar", *out.Map["qux"].Foo) - -} - -func TestOutputService12ProtocolTestFlattenedMapCase1(t *testing.T) { - svc := NewOutputService12ProtocolTest(nil) - - buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) - req, out := svc.OutputService12TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - query.UnmarshalMeta(req) - query.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "bam", *out.Map["baz"]) - assert.Equal(t, "bar", *out.Map["qux"]) - -} - -func TestOutputService13ProtocolTestFlattenedMapInShapeDefinitionCase1(t *testing.T) { - svc := NewOutputService13ProtocolTest(nil) - - buf := bytes.NewReader([]byte("quxbarrequestid")) - req, out := svc.OutputService13TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - query.UnmarshalMeta(req) - query.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "bar", *out.Map["qux"]) - -} - -func TestOutputService14ProtocolTestNamedMapCase1(t *testing.T) { - svc := NewOutputService14ProtocolTest(nil) - - buf := bytes.NewReader([]byte("quxbarbazbamrequestid")) - req, out := svc.OutputService14TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - query.UnmarshalMeta(req) - query.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "bam", *out.Map["baz"]) - assert.Equal(t, "bar", *out.Map["qux"]) - -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/build.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/build.go deleted file mode 100644 index d8af4a0..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/build.go +++ /dev/null @@ -1,208 +0,0 @@ -// Package rest provides RESTful serialisation of AWS requests and responses. -package rest - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - "net/url" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// RFC822 returns an RFC822 formatted timestamp for AWS protocols -const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" - -// Whether the byte value can be sent without escaping in AWS URLs -var noEscape [256]bool - -func init() { - for i := 0; i < len(noEscape); i++ { - // AWS expects every character except these to be escaped - noEscape[i] = (i >= 'A' && i <= 'Z') || - (i >= 'a' && i <= 'z') || - (i >= '0' && i <= '9') || - i == '-' || - i == '.' || - i == '_' || - i == '~' - } -} - -// Build builds the REST component of a service request. -func Build(r *aws.Request) { - if r.ParamsFilled() { - v := reflect.ValueOf(r.Params).Elem() - buildLocationElements(r, v) - buildBody(r, v) - } -} - -func buildLocationElements(r *aws.Request, v reflect.Value) { - query := r.HTTPRequest.URL.Query() - - for i := 0; i < v.NumField(); i++ { - m := v.Field(i) - if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - field := v.Type().Field(i) - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - if m.Kind() == reflect.Ptr { - m = m.Elem() - } - if !m.IsValid() { - continue - } - - switch field.Tag.Get("location") { - case "headers": // header maps - buildHeaderMap(r, m, field.Tag.Get("locationName")) - case "header": - buildHeader(r, m, name) - case "uri": - buildURI(r, m, name) - case "querystring": - buildQueryString(r, m, name, query) - } - } - if r.Error != nil { - return - } - } - - r.HTTPRequest.URL.RawQuery = query.Encode() - updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) -} - -func buildBody(r *aws.Request, v reflect.Value) { - if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := reflect.Indirect(v.FieldByName(payloadName)) - if payload.IsValid() && payload.Interface() != nil { - switch reader := payload.Interface().(type) { - case io.ReadSeeker: - r.SetReaderBody(reader) - case []byte: - r.SetBufferBody(reader) - case string: - r.SetStringBody(reader) - default: - r.Error = awserr.New("SerializationError", - "failed to encode REST request", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } -} - -func buildHeader(r *aws.Request, v reflect.Value, name string) { - str, err := convertType(v) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to encode REST request", err) - } else if str != nil { - r.HTTPRequest.Header.Add(name, *str) - } -} - -func buildHeaderMap(r *aws.Request, v reflect.Value, prefix string) { - for _, key := range v.MapKeys() { - str, err := convertType(v.MapIndex(key)) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to encode REST request", err) - } else if str != nil { - r.HTTPRequest.Header.Add(prefix+key.String(), *str) - } - } -} - -func buildURI(r *aws.Request, v reflect.Value, name string) { - value, err := convertType(v) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to encode REST request", err) - } else if value != nil { - uri := r.HTTPRequest.URL.Path - uri = strings.Replace(uri, "{"+name+"}", EscapePath(*value, true), -1) - uri = strings.Replace(uri, "{"+name+"+}", EscapePath(*value, false), -1) - r.HTTPRequest.URL.Path = uri - } -} - -func buildQueryString(r *aws.Request, v reflect.Value, name string, query url.Values) { - str, err := convertType(v) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to encode REST request", err) - } else if str != nil { - query.Set(name, *str) - } -} - -func updatePath(url *url.URL, urlPath string) { - scheme, query := url.Scheme, url.RawQuery - - // get formatted URL minus scheme so we can build this into Opaque - url.Scheme, url.Path, url.RawQuery = "", "", "" - s := url.String() - url.Scheme = scheme - url.RawQuery = query - - // build opaque URI - url.Opaque = s + urlPath -} - -// EscapePath escapes part of a URL path in Amazon style -func EscapePath(path string, encodeSep bool) string { - var buf bytes.Buffer - for i := 0; i < len(path); i++ { - c := path[i] - if noEscape[c] || (c == '/' && !encodeSep) { - buf.WriteByte(c) - } else { - buf.WriteByte('%') - buf.WriteString(strings.ToUpper(strconv.FormatUint(uint64(c), 16))) - } - } - return buf.String() -} - -func convertType(v reflect.Value) (*string, error) { - v = reflect.Indirect(v) - if !v.IsValid() { - return nil, nil - } - - var str string - switch value := v.Interface().(type) { - case string: - str = value - case []byte: - str = base64.StdEncoding.EncodeToString(value) - case bool: - str = strconv.FormatBool(value) - case int64: - str = strconv.FormatInt(value, 10) - case float64: - str = strconv.FormatFloat(value, 'f', -1, 64) - case time.Time: - str = value.UTC().Format(RFC822) - default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) - return nil, err - } - return &str, nil -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/build_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/build_test.go deleted file mode 100644 index 98ca09f..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/build_test.go +++ /dev/null @@ -1,2717 +0,0 @@ -package restxml_test - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "io" - "io/ioutil" - "net/http" - "net/url" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/internal/protocol/restxml" - "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil" - "github.com/aws/aws-sdk-go/internal/signer/v4" - "github.com/aws/aws-sdk-go/internal/util" - "github.com/stretchr/testify/assert" -) - -var _ bytes.Buffer // always import bytes -var _ http.Request -var _ json.Marshaler -var _ time.Time -var _ xmlutil.XMLNode -var _ xml.Attr -var _ = ioutil.Discard -var _ = util.Trim("") -var _ = url.Values{} -var _ = io.EOF - -type InputService1ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService1ProtocolTest client. -func NewInputService1ProtocolTest(config *aws.Config) *InputService1ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice1protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &InputService1ProtocolTest{service} -} - -// newRequest creates a new request for a InputService1ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService1TestCaseOperation1 = "OperationName" - -// InputService1TestCaseOperation1Request generates a request for the InputService1TestCaseOperation1 operation. -func (c *InputService1ProtocolTest) InputService1TestCaseOperation1Request(input *InputService1TestShapeInputShape) (req *aws.Request, output *InputService1TestShapeInputService1TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService1TestCaseOperation1, - HTTPMethod: "POST", - HTTPPath: "/2014-01-01/hostedzone", - } - - if input == nil { - input = &InputService1TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService1TestShapeInputService1TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService1ProtocolTest) InputService1TestCaseOperation1(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation1Output, error) { - req, out := c.InputService1TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -const opInputService1TestCaseOperation2 = "OperationName" - -// InputService1TestCaseOperation2Request generates a request for the InputService1TestCaseOperation2 operation. -func (c *InputService1ProtocolTest) InputService1TestCaseOperation2Request(input *InputService1TestShapeInputShape) (req *aws.Request, output *InputService1TestShapeInputService1TestCaseOperation2Output) { - op := &aws.Operation{ - Name: opInputService1TestCaseOperation2, - HTTPMethod: "PUT", - HTTPPath: "/2014-01-01/hostedzone", - } - - if input == nil { - input = &InputService1TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService1TestShapeInputService1TestCaseOperation2Output{} - req.Data = output - return -} - -func (c *InputService1ProtocolTest) InputService1TestCaseOperation2(input *InputService1TestShapeInputShape) (*InputService1TestShapeInputService1TestCaseOperation2Output, error) { - req, out := c.InputService1TestCaseOperation2Request(input) - err := req.Send() - return out, err -} - -type InputService1TestShapeInputService1TestCaseOperation1Output struct { - metadataInputService1TestShapeInputService1TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService1TestShapeInputService1TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService1TestShapeInputService1TestCaseOperation2Output struct { - metadataInputService1TestShapeInputService1TestCaseOperation2Output `json:"-" xml:"-"` -} - -type metadataInputService1TestShapeInputService1TestCaseOperation2Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService1TestShapeInputShape struct { - Description *string `type:"string"` - - Name *string `type:"string"` - - metadataInputService1TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService1TestShapeInputShape struct { - SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` -} - -type InputService2ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService2ProtocolTest client. -func NewInputService2ProtocolTest(config *aws.Config) *InputService2ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice2protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &InputService2ProtocolTest{service} -} - -// newRequest creates a new request for a InputService2ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService2TestCaseOperation1 = "OperationName" - -// InputService2TestCaseOperation1Request generates a request for the InputService2TestCaseOperation1 operation. -func (c *InputService2ProtocolTest) InputService2TestCaseOperation1Request(input *InputService2TestShapeInputShape) (req *aws.Request, output *InputService2TestShapeInputService2TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService2TestCaseOperation1, - HTTPMethod: "POST", - HTTPPath: "/2014-01-01/hostedzone", - } - - if input == nil { - input = &InputService2TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService2TestShapeInputService2TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService2ProtocolTest) InputService2TestCaseOperation1(input *InputService2TestShapeInputShape) (*InputService2TestShapeInputService2TestCaseOperation1Output, error) { - req, out := c.InputService2TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type InputService2TestShapeInputService2TestCaseOperation1Output struct { - metadataInputService2TestShapeInputService2TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService2TestShapeInputService2TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService2TestShapeInputShape struct { - First *bool `type:"boolean"` - - Fourth *int64 `type:"integer"` - - Second *bool `type:"boolean"` - - Third *float64 `type:"float"` - - metadataInputService2TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService2TestShapeInputShape struct { - SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` -} - -type InputService3ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService3ProtocolTest client. -func NewInputService3ProtocolTest(config *aws.Config) *InputService3ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice3protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &InputService3ProtocolTest{service} -} - -// newRequest creates a new request for a InputService3ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService3TestCaseOperation1 = "OperationName" - -// InputService3TestCaseOperation1Request generates a request for the InputService3TestCaseOperation1 operation. -func (c *InputService3ProtocolTest) InputService3TestCaseOperation1Request(input *InputService3TestShapeInputShape) (req *aws.Request, output *InputService3TestShapeInputService3TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService3TestCaseOperation1, - HTTPMethod: "POST", - HTTPPath: "/2014-01-01/hostedzone", - } - - if input == nil { - input = &InputService3TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService3TestShapeInputService3TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService3ProtocolTest) InputService3TestCaseOperation1(input *InputService3TestShapeInputShape) (*InputService3TestShapeInputService3TestCaseOperation1Output, error) { - req, out := c.InputService3TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type InputService3TestShapeInputService3TestCaseOperation1Output struct { - metadataInputService3TestShapeInputService3TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService3TestShapeInputService3TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService3TestShapeInputShape struct { - Description *string `type:"string"` - - SubStructure *InputService3TestShapeSubStructure `type:"structure"` - - metadataInputService3TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService3TestShapeInputShape struct { - SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` -} - -type InputService3TestShapeSubStructure struct { - Bar *string `type:"string"` - - Foo *string `type:"string"` - - metadataInputService3TestShapeSubStructure `json:"-" xml:"-"` -} - -type metadataInputService3TestShapeSubStructure struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService4ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService4ProtocolTest client. -func NewInputService4ProtocolTest(config *aws.Config) *InputService4ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice4protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &InputService4ProtocolTest{service} -} - -// newRequest creates a new request for a InputService4ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService4TestCaseOperation1 = "OperationName" - -// InputService4TestCaseOperation1Request generates a request for the InputService4TestCaseOperation1 operation. -func (c *InputService4ProtocolTest) InputService4TestCaseOperation1Request(input *InputService4TestShapeInputShape) (req *aws.Request, output *InputService4TestShapeInputService4TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService4TestCaseOperation1, - HTTPMethod: "POST", - HTTPPath: "/2014-01-01/hostedzone", - } - - if input == nil { - input = &InputService4TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService4TestShapeInputService4TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService4ProtocolTest) InputService4TestCaseOperation1(input *InputService4TestShapeInputShape) (*InputService4TestShapeInputService4TestCaseOperation1Output, error) { - req, out := c.InputService4TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type InputService4TestShapeInputService4TestCaseOperation1Output struct { - metadataInputService4TestShapeInputService4TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService4TestShapeInputService4TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService4TestShapeInputShape struct { - Description *string `type:"string"` - - SubStructure *InputService4TestShapeSubStructure `type:"structure"` - - metadataInputService4TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService4TestShapeInputShape struct { - SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` -} - -type InputService4TestShapeSubStructure struct { - Bar *string `type:"string"` - - Foo *string `type:"string"` - - metadataInputService4TestShapeSubStructure `json:"-" xml:"-"` -} - -type metadataInputService4TestShapeSubStructure struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService5ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService5ProtocolTest client. -func NewInputService5ProtocolTest(config *aws.Config) *InputService5ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice5protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &InputService5ProtocolTest{service} -} - -// newRequest creates a new request for a InputService5ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService5ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService5TestCaseOperation1 = "OperationName" - -// InputService5TestCaseOperation1Request generates a request for the InputService5TestCaseOperation1 operation. -func (c *InputService5ProtocolTest) InputService5TestCaseOperation1Request(input *InputService5TestShapeInputShape) (req *aws.Request, output *InputService5TestShapeInputService5TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService5TestCaseOperation1, - HTTPMethod: "POST", - HTTPPath: "/2014-01-01/hostedzone", - } - - if input == nil { - input = &InputService5TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService5TestShapeInputService5TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService5ProtocolTest) InputService5TestCaseOperation1(input *InputService5TestShapeInputShape) (*InputService5TestShapeInputService5TestCaseOperation1Output, error) { - req, out := c.InputService5TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type InputService5TestShapeInputService5TestCaseOperation1Output struct { - metadataInputService5TestShapeInputService5TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService5TestShapeInputService5TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService5TestShapeInputShape struct { - ListParam []*string `type:"list"` - - metadataInputService5TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService5TestShapeInputShape struct { - SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` -} - -type InputService6ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService6ProtocolTest client. -func NewInputService6ProtocolTest(config *aws.Config) *InputService6ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice6protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &InputService6ProtocolTest{service} -} - -// newRequest creates a new request for a InputService6ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService6ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService6TestCaseOperation1 = "OperationName" - -// InputService6TestCaseOperation1Request generates a request for the InputService6TestCaseOperation1 operation. -func (c *InputService6ProtocolTest) InputService6TestCaseOperation1Request(input *InputService6TestShapeInputShape) (req *aws.Request, output *InputService6TestShapeInputService6TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService6TestCaseOperation1, - HTTPMethod: "POST", - HTTPPath: "/2014-01-01/hostedzone", - } - - if input == nil { - input = &InputService6TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService6TestShapeInputService6TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService6ProtocolTest) InputService6TestCaseOperation1(input *InputService6TestShapeInputShape) (*InputService6TestShapeInputService6TestCaseOperation1Output, error) { - req, out := c.InputService6TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type InputService6TestShapeInputService6TestCaseOperation1Output struct { - metadataInputService6TestShapeInputService6TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService6TestShapeInputService6TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService6TestShapeInputShape struct { - ListParam []*string `locationName:"AlternateName" locationNameList:"NotMember" type:"list"` - - metadataInputService6TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService6TestShapeInputShape struct { - SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` -} - -type InputService7ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService7ProtocolTest client. -func NewInputService7ProtocolTest(config *aws.Config) *InputService7ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice7protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &InputService7ProtocolTest{service} -} - -// newRequest creates a new request for a InputService7ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService7ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService7TestCaseOperation1 = "OperationName" - -// InputService7TestCaseOperation1Request generates a request for the InputService7TestCaseOperation1 operation. -func (c *InputService7ProtocolTest) InputService7TestCaseOperation1Request(input *InputService7TestShapeInputShape) (req *aws.Request, output *InputService7TestShapeInputService7TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService7TestCaseOperation1, - HTTPMethod: "POST", - HTTPPath: "/2014-01-01/hostedzone", - } - - if input == nil { - input = &InputService7TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService7TestShapeInputService7TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService7ProtocolTest) InputService7TestCaseOperation1(input *InputService7TestShapeInputShape) (*InputService7TestShapeInputService7TestCaseOperation1Output, error) { - req, out := c.InputService7TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type InputService7TestShapeInputService7TestCaseOperation1Output struct { - metadataInputService7TestShapeInputService7TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService7TestShapeInputService7TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService7TestShapeInputShape struct { - ListParam []*string `type:"list" flattened:"true"` - - metadataInputService7TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService7TestShapeInputShape struct { - SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` -} - -type InputService8ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService8ProtocolTest client. -func NewInputService8ProtocolTest(config *aws.Config) *InputService8ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice8protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &InputService8ProtocolTest{service} -} - -// newRequest creates a new request for a InputService8ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService8TestCaseOperation1 = "OperationName" - -// InputService8TestCaseOperation1Request generates a request for the InputService8TestCaseOperation1 operation. -func (c *InputService8ProtocolTest) InputService8TestCaseOperation1Request(input *InputService8TestShapeInputShape) (req *aws.Request, output *InputService8TestShapeInputService8TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService8TestCaseOperation1, - HTTPMethod: "POST", - HTTPPath: "/2014-01-01/hostedzone", - } - - if input == nil { - input = &InputService8TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService8TestShapeInputService8TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService8ProtocolTest) InputService8TestCaseOperation1(input *InputService8TestShapeInputShape) (*InputService8TestShapeInputService8TestCaseOperation1Output, error) { - req, out := c.InputService8TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type InputService8TestShapeInputService8TestCaseOperation1Output struct { - metadataInputService8TestShapeInputService8TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService8TestShapeInputService8TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService8TestShapeInputShape struct { - ListParam []*string `locationName:"item" type:"list" flattened:"true"` - - metadataInputService8TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService8TestShapeInputShape struct { - SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` -} - -type InputService9ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService9ProtocolTest client. -func NewInputService9ProtocolTest(config *aws.Config) *InputService9ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice9protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &InputService9ProtocolTest{service} -} - -// newRequest creates a new request for a InputService9ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService9ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService9TestCaseOperation1 = "OperationName" - -// InputService9TestCaseOperation1Request generates a request for the InputService9TestCaseOperation1 operation. -func (c *InputService9ProtocolTest) InputService9TestCaseOperation1Request(input *InputService9TestShapeInputShape) (req *aws.Request, output *InputService9TestShapeInputService9TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService9TestCaseOperation1, - HTTPMethod: "POST", - HTTPPath: "/2014-01-01/hostedzone", - } - - if input == nil { - input = &InputService9TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService9TestShapeInputService9TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService9ProtocolTest) InputService9TestCaseOperation1(input *InputService9TestShapeInputShape) (*InputService9TestShapeInputService9TestCaseOperation1Output, error) { - req, out := c.InputService9TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type InputService9TestShapeInputService9TestCaseOperation1Output struct { - metadataInputService9TestShapeInputService9TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService9TestShapeInputService9TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService9TestShapeInputShape struct { - ListParam []*InputService9TestShapeSingleFieldStruct `locationName:"item" type:"list" flattened:"true"` - - metadataInputService9TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService9TestShapeInputShape struct { - SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` -} - -type InputService9TestShapeSingleFieldStruct struct { - Element *string `locationName:"value" type:"string"` - - metadataInputService9TestShapeSingleFieldStruct `json:"-" xml:"-"` -} - -type metadataInputService9TestShapeSingleFieldStruct struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService10ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService10ProtocolTest client. -func NewInputService10ProtocolTest(config *aws.Config) *InputService10ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice10protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &InputService10ProtocolTest{service} -} - -// newRequest creates a new request for a InputService10ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService10ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService10TestCaseOperation1 = "OperationName" - -// InputService10TestCaseOperation1Request generates a request for the InputService10TestCaseOperation1 operation. -func (c *InputService10ProtocolTest) InputService10TestCaseOperation1Request(input *InputService10TestShapeInputShape) (req *aws.Request, output *InputService10TestShapeInputService10TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService10TestCaseOperation1, - HTTPMethod: "POST", - HTTPPath: "/2014-01-01/hostedzone", - } - - if input == nil { - input = &InputService10TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService10TestShapeInputService10TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService10ProtocolTest) InputService10TestCaseOperation1(input *InputService10TestShapeInputShape) (*InputService10TestShapeInputService10TestCaseOperation1Output, error) { - req, out := c.InputService10TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type InputService10TestShapeInputService10TestCaseOperation1Output struct { - metadataInputService10TestShapeInputService10TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService10TestShapeInputService10TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService10TestShapeInputShape struct { - StructureParam *InputService10TestShapeStructureShape `type:"structure"` - - metadataInputService10TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService10TestShapeInputShape struct { - SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` -} - -type InputService10TestShapeStructureShape struct { - B []byte `locationName:"b" type:"blob"` - - T *time.Time `locationName:"t" type:"timestamp" timestampFormat:"iso8601"` - - metadataInputService10TestShapeStructureShape `json:"-" xml:"-"` -} - -type metadataInputService10TestShapeStructureShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService11ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService11ProtocolTest client. -func NewInputService11ProtocolTest(config *aws.Config) *InputService11ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice11protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &InputService11ProtocolTest{service} -} - -// newRequest creates a new request for a InputService11ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService11ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService11TestCaseOperation1 = "OperationName" - -// InputService11TestCaseOperation1Request generates a request for the InputService11TestCaseOperation1 operation. -func (c *InputService11ProtocolTest) InputService11TestCaseOperation1Request(input *InputService11TestShapeInputShape) (req *aws.Request, output *InputService11TestShapeInputService11TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService11TestCaseOperation1, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &InputService11TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService11TestShapeInputService11TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService11ProtocolTest) InputService11TestCaseOperation1(input *InputService11TestShapeInputShape) (*InputService11TestShapeInputService11TestCaseOperation1Output, error) { - req, out := c.InputService11TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type InputService11TestShapeInputService11TestCaseOperation1Output struct { - metadataInputService11TestShapeInputService11TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService11TestShapeInputService11TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService11TestShapeInputShape struct { - Foo map[string]*string `location:"headers" locationName:"x-foo-" type:"map"` - - metadataInputService11TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService11TestShapeInputShape struct { - SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` -} - -type InputService12ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService12ProtocolTest client. -func NewInputService12ProtocolTest(config *aws.Config) *InputService12ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice12protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &InputService12ProtocolTest{service} -} - -// newRequest creates a new request for a InputService12ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService12ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService12TestCaseOperation1 = "OperationName" - -// InputService12TestCaseOperation1Request generates a request for the InputService12TestCaseOperation1 operation. -func (c *InputService12ProtocolTest) InputService12TestCaseOperation1Request(input *InputService12TestShapeInputShape) (req *aws.Request, output *InputService12TestShapeInputService12TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService12TestCaseOperation1, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &InputService12TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService12TestShapeInputService12TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService12ProtocolTest) InputService12TestCaseOperation1(input *InputService12TestShapeInputShape) (*InputService12TestShapeInputService12TestCaseOperation1Output, error) { - req, out := c.InputService12TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type InputService12TestShapeInputService12TestCaseOperation1Output struct { - metadataInputService12TestShapeInputService12TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService12TestShapeInputService12TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService12TestShapeInputShape struct { - Foo *string `locationName:"foo" type:"string"` - - metadataInputService12TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService12TestShapeInputShape struct { - SDKShapeTraits bool `type:"structure" payload:"Foo"` -} - -type InputService13ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService13ProtocolTest client. -func NewInputService13ProtocolTest(config *aws.Config) *InputService13ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice13protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &InputService13ProtocolTest{service} -} - -// newRequest creates a new request for a InputService13ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService13ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService13TestCaseOperation1 = "OperationName" - -// InputService13TestCaseOperation1Request generates a request for the InputService13TestCaseOperation1 operation. -func (c *InputService13ProtocolTest) InputService13TestCaseOperation1Request(input *InputService13TestShapeInputShape) (req *aws.Request, output *InputService13TestShapeInputService13TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService13TestCaseOperation1, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &InputService13TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService13TestShapeInputService13TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService13ProtocolTest) InputService13TestCaseOperation1(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation1Output, error) { - req, out := c.InputService13TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -const opInputService13TestCaseOperation2 = "OperationName" - -// InputService13TestCaseOperation2Request generates a request for the InputService13TestCaseOperation2 operation. -func (c *InputService13ProtocolTest) InputService13TestCaseOperation2Request(input *InputService13TestShapeInputShape) (req *aws.Request, output *InputService13TestShapeInputService13TestCaseOperation2Output) { - op := &aws.Operation{ - Name: opInputService13TestCaseOperation2, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &InputService13TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService13TestShapeInputService13TestCaseOperation2Output{} - req.Data = output - return -} - -func (c *InputService13ProtocolTest) InputService13TestCaseOperation2(input *InputService13TestShapeInputShape) (*InputService13TestShapeInputService13TestCaseOperation2Output, error) { - req, out := c.InputService13TestCaseOperation2Request(input) - err := req.Send() - return out, err -} - -type InputService13TestShapeInputService13TestCaseOperation1Output struct { - metadataInputService13TestShapeInputService13TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService13TestShapeInputService13TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService13TestShapeInputService13TestCaseOperation2Output struct { - metadataInputService13TestShapeInputService13TestCaseOperation2Output `json:"-" xml:"-"` -} - -type metadataInputService13TestShapeInputService13TestCaseOperation2Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService13TestShapeInputShape struct { - Foo []byte `locationName:"foo" type:"blob"` - - metadataInputService13TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService13TestShapeInputShape struct { - SDKShapeTraits bool `type:"structure" payload:"Foo"` -} - -type InputService14ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService14ProtocolTest client. -func NewInputService14ProtocolTest(config *aws.Config) *InputService14ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice14protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &InputService14ProtocolTest{service} -} - -// newRequest creates a new request for a InputService14ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService14ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService14TestCaseOperation1 = "OperationName" - -// InputService14TestCaseOperation1Request generates a request for the InputService14TestCaseOperation1 operation. -func (c *InputService14ProtocolTest) InputService14TestCaseOperation1Request(input *InputService14TestShapeInputShape) (req *aws.Request, output *InputService14TestShapeInputService14TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService14TestCaseOperation1, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &InputService14TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService14TestShapeInputService14TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService14ProtocolTest) InputService14TestCaseOperation1(input *InputService14TestShapeInputShape) (*InputService14TestShapeInputService14TestCaseOperation1Output, error) { - req, out := c.InputService14TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -const opInputService14TestCaseOperation2 = "OperationName" - -// InputService14TestCaseOperation2Request generates a request for the InputService14TestCaseOperation2 operation. -func (c *InputService14ProtocolTest) InputService14TestCaseOperation2Request(input *InputService14TestShapeInputShape) (req *aws.Request, output *InputService14TestShapeInputService14TestCaseOperation2Output) { - op := &aws.Operation{ - Name: opInputService14TestCaseOperation2, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &InputService14TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService14TestShapeInputService14TestCaseOperation2Output{} - req.Data = output - return -} - -func (c *InputService14ProtocolTest) InputService14TestCaseOperation2(input *InputService14TestShapeInputShape) (*InputService14TestShapeInputService14TestCaseOperation2Output, error) { - req, out := c.InputService14TestCaseOperation2Request(input) - err := req.Send() - return out, err -} - -const opInputService14TestCaseOperation3 = "OperationName" - -// InputService14TestCaseOperation3Request generates a request for the InputService14TestCaseOperation3 operation. -func (c *InputService14ProtocolTest) InputService14TestCaseOperation3Request(input *InputService14TestShapeInputShape) (req *aws.Request, output *InputService14TestShapeInputService14TestCaseOperation3Output) { - op := &aws.Operation{ - Name: opInputService14TestCaseOperation3, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &InputService14TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService14TestShapeInputService14TestCaseOperation3Output{} - req.Data = output - return -} - -func (c *InputService14ProtocolTest) InputService14TestCaseOperation3(input *InputService14TestShapeInputShape) (*InputService14TestShapeInputService14TestCaseOperation3Output, error) { - req, out := c.InputService14TestCaseOperation3Request(input) - err := req.Send() - return out, err -} - -type InputService14TestShapeFooShape struct { - Baz *string `locationName:"baz" type:"string"` - - metadataInputService14TestShapeFooShape `json:"-" xml:"-"` -} - -type metadataInputService14TestShapeFooShape struct { - SDKShapeTraits bool `locationName:"foo" type:"structure"` -} - -type InputService14TestShapeInputService14TestCaseOperation1Output struct { - metadataInputService14TestShapeInputService14TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService14TestShapeInputService14TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService14TestShapeInputService14TestCaseOperation2Output struct { - metadataInputService14TestShapeInputService14TestCaseOperation2Output `json:"-" xml:"-"` -} - -type metadataInputService14TestShapeInputService14TestCaseOperation2Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService14TestShapeInputService14TestCaseOperation3Output struct { - metadataInputService14TestShapeInputService14TestCaseOperation3Output `json:"-" xml:"-"` -} - -type metadataInputService14TestShapeInputService14TestCaseOperation3Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService14TestShapeInputShape struct { - Foo *InputService14TestShapeFooShape `locationName:"foo" type:"structure"` - - metadataInputService14TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService14TestShapeInputShape struct { - SDKShapeTraits bool `type:"structure" payload:"Foo"` -} - -type InputService15ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService15ProtocolTest client. -func NewInputService15ProtocolTest(config *aws.Config) *InputService15ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice15protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &InputService15ProtocolTest{service} -} - -// newRequest creates a new request for a InputService15ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService15ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService15TestCaseOperation1 = "OperationName" - -// InputService15TestCaseOperation1Request generates a request for the InputService15TestCaseOperation1 operation. -func (c *InputService15ProtocolTest) InputService15TestCaseOperation1Request(input *InputService15TestShapeInputShape) (req *aws.Request, output *InputService15TestShapeInputService15TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService15TestCaseOperation1, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &InputService15TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService15TestShapeInputService15TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService15ProtocolTest) InputService15TestCaseOperation1(input *InputService15TestShapeInputShape) (*InputService15TestShapeInputService15TestCaseOperation1Output, error) { - req, out := c.InputService15TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type InputService15TestShapeGrant struct { - Grantee *InputService15TestShapeGrantee `type:"structure"` - - metadataInputService15TestShapeGrant `json:"-" xml:"-"` -} - -type metadataInputService15TestShapeGrant struct { - SDKShapeTraits bool `locationName:"Grant" type:"structure"` -} - -type InputService15TestShapeGrantee struct { - EmailAddress *string `type:"string"` - - Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true"` - - metadataInputService15TestShapeGrantee `json:"-" xml:"-"` -} - -type metadataInputService15TestShapeGrantee struct { - SDKShapeTraits bool `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` -} - -type InputService15TestShapeInputService15TestCaseOperation1Output struct { - metadataInputService15TestShapeInputService15TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService15TestShapeInputService15TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService15TestShapeInputShape struct { - Grant *InputService15TestShapeGrant `locationName:"Grant" type:"structure"` - - metadataInputService15TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService15TestShapeInputShape struct { - SDKShapeTraits bool `type:"structure" payload:"Grant"` -} - -type InputService16ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService16ProtocolTest client. -func NewInputService16ProtocolTest(config *aws.Config) *InputService16ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice16protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &InputService16ProtocolTest{service} -} - -// newRequest creates a new request for a InputService16ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService16ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService16TestCaseOperation1 = "OperationName" - -// InputService16TestCaseOperation1Request generates a request for the InputService16TestCaseOperation1 operation. -func (c *InputService16ProtocolTest) InputService16TestCaseOperation1Request(input *InputService16TestShapeInputShape) (req *aws.Request, output *InputService16TestShapeInputService16TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService16TestCaseOperation1, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &InputService16TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService16TestShapeInputService16TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService16ProtocolTest) InputService16TestCaseOperation1(input *InputService16TestShapeInputShape) (*InputService16TestShapeInputService16TestCaseOperation1Output, error) { - req, out := c.InputService16TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type InputService16TestShapeInputService16TestCaseOperation1Output struct { - metadataInputService16TestShapeInputService16TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService16TestShapeInputService16TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService16TestShapeInputShape struct { - Bucket *string `location:"uri" type:"string"` - - Key *string `location:"uri" type:"string"` - - metadataInputService16TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService16TestShapeInputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService17ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService17ProtocolTest client. -func NewInputService17ProtocolTest(config *aws.Config) *InputService17ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice17protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &InputService17ProtocolTest{service} -} - -// newRequest creates a new request for a InputService17ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService17ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService17TestCaseOperation1 = "OperationName" - -// InputService17TestCaseOperation1Request generates a request for the InputService17TestCaseOperation1 operation. -func (c *InputService17ProtocolTest) InputService17TestCaseOperation1Request(input *InputService17TestShapeInputShape) (req *aws.Request, output *InputService17TestShapeInputService17TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService17TestCaseOperation1, - HTTPMethod: "POST", - HTTPPath: "/path", - } - - if input == nil { - input = &InputService17TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService17TestShapeInputService17TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService17ProtocolTest) InputService17TestCaseOperation1(input *InputService17TestShapeInputShape) (*InputService17TestShapeInputService17TestCaseOperation1Output, error) { - req, out := c.InputService17TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -const opInputService17TestCaseOperation2 = "OperationName" - -// InputService17TestCaseOperation2Request generates a request for the InputService17TestCaseOperation2 operation. -func (c *InputService17ProtocolTest) InputService17TestCaseOperation2Request(input *InputService17TestShapeInputShape) (req *aws.Request, output *InputService17TestShapeInputService17TestCaseOperation2Output) { - op := &aws.Operation{ - Name: opInputService17TestCaseOperation2, - HTTPMethod: "POST", - HTTPPath: "/path?abc=mno", - } - - if input == nil { - input = &InputService17TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService17TestShapeInputService17TestCaseOperation2Output{} - req.Data = output - return -} - -func (c *InputService17ProtocolTest) InputService17TestCaseOperation2(input *InputService17TestShapeInputShape) (*InputService17TestShapeInputService17TestCaseOperation2Output, error) { - req, out := c.InputService17TestCaseOperation2Request(input) - err := req.Send() - return out, err -} - -type InputService17TestShapeInputService17TestCaseOperation1Output struct { - metadataInputService17TestShapeInputService17TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService17TestShapeInputService17TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService17TestShapeInputService17TestCaseOperation2Output struct { - metadataInputService17TestShapeInputService17TestCaseOperation2Output `json:"-" xml:"-"` -} - -type metadataInputService17TestShapeInputService17TestCaseOperation2Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService17TestShapeInputShape struct { - Foo *string `location:"querystring" locationName:"param-name" type:"string"` - - metadataInputService17TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService17TestShapeInputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService18ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService18ProtocolTest client. -func NewInputService18ProtocolTest(config *aws.Config) *InputService18ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice18protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &InputService18ProtocolTest{service} -} - -// newRequest creates a new request for a InputService18ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService18ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService18TestCaseOperation1 = "OperationName" - -// InputService18TestCaseOperation1Request generates a request for the InputService18TestCaseOperation1 operation. -func (c *InputService18ProtocolTest) InputService18TestCaseOperation1Request(input *InputService18TestShapeInputShape) (req *aws.Request, output *InputService18TestShapeInputService18TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService18TestCaseOperation1, - HTTPMethod: "POST", - HTTPPath: "/path", - } - - if input == nil { - input = &InputService18TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService18TestShapeInputService18TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService18ProtocolTest) InputService18TestCaseOperation1(input *InputService18TestShapeInputShape) (*InputService18TestShapeInputService18TestCaseOperation1Output, error) { - req, out := c.InputService18TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -const opInputService18TestCaseOperation2 = "OperationName" - -// InputService18TestCaseOperation2Request generates a request for the InputService18TestCaseOperation2 operation. -func (c *InputService18ProtocolTest) InputService18TestCaseOperation2Request(input *InputService18TestShapeInputShape) (req *aws.Request, output *InputService18TestShapeInputService18TestCaseOperation2Output) { - op := &aws.Operation{ - Name: opInputService18TestCaseOperation2, - HTTPMethod: "POST", - HTTPPath: "/path", - } - - if input == nil { - input = &InputService18TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService18TestShapeInputService18TestCaseOperation2Output{} - req.Data = output - return -} - -func (c *InputService18ProtocolTest) InputService18TestCaseOperation2(input *InputService18TestShapeInputShape) (*InputService18TestShapeInputService18TestCaseOperation2Output, error) { - req, out := c.InputService18TestCaseOperation2Request(input) - err := req.Send() - return out, err -} - -const opInputService18TestCaseOperation3 = "OperationName" - -// InputService18TestCaseOperation3Request generates a request for the InputService18TestCaseOperation3 operation. -func (c *InputService18ProtocolTest) InputService18TestCaseOperation3Request(input *InputService18TestShapeInputShape) (req *aws.Request, output *InputService18TestShapeInputService18TestCaseOperation3Output) { - op := &aws.Operation{ - Name: opInputService18TestCaseOperation3, - HTTPMethod: "POST", - HTTPPath: "/path", - } - - if input == nil { - input = &InputService18TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService18TestShapeInputService18TestCaseOperation3Output{} - req.Data = output - return -} - -func (c *InputService18ProtocolTest) InputService18TestCaseOperation3(input *InputService18TestShapeInputShape) (*InputService18TestShapeInputService18TestCaseOperation3Output, error) { - req, out := c.InputService18TestCaseOperation3Request(input) - err := req.Send() - return out, err -} - -const opInputService18TestCaseOperation4 = "OperationName" - -// InputService18TestCaseOperation4Request generates a request for the InputService18TestCaseOperation4 operation. -func (c *InputService18ProtocolTest) InputService18TestCaseOperation4Request(input *InputService18TestShapeInputShape) (req *aws.Request, output *InputService18TestShapeInputService18TestCaseOperation4Output) { - op := &aws.Operation{ - Name: opInputService18TestCaseOperation4, - HTTPMethod: "POST", - HTTPPath: "/path", - } - - if input == nil { - input = &InputService18TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService18TestShapeInputService18TestCaseOperation4Output{} - req.Data = output - return -} - -func (c *InputService18ProtocolTest) InputService18TestCaseOperation4(input *InputService18TestShapeInputShape) (*InputService18TestShapeInputService18TestCaseOperation4Output, error) { - req, out := c.InputService18TestCaseOperation4Request(input) - err := req.Send() - return out, err -} - -const opInputService18TestCaseOperation5 = "OperationName" - -// InputService18TestCaseOperation5Request generates a request for the InputService18TestCaseOperation5 operation. -func (c *InputService18ProtocolTest) InputService18TestCaseOperation5Request(input *InputService18TestShapeInputShape) (req *aws.Request, output *InputService18TestShapeInputService18TestCaseOperation5Output) { - op := &aws.Operation{ - Name: opInputService18TestCaseOperation5, - HTTPMethod: "POST", - HTTPPath: "/path", - } - - if input == nil { - input = &InputService18TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService18TestShapeInputService18TestCaseOperation5Output{} - req.Data = output - return -} - -func (c *InputService18ProtocolTest) InputService18TestCaseOperation5(input *InputService18TestShapeInputShape) (*InputService18TestShapeInputService18TestCaseOperation5Output, error) { - req, out := c.InputService18TestCaseOperation5Request(input) - err := req.Send() - return out, err -} - -const opInputService18TestCaseOperation6 = "OperationName" - -// InputService18TestCaseOperation6Request generates a request for the InputService18TestCaseOperation6 operation. -func (c *InputService18ProtocolTest) InputService18TestCaseOperation6Request(input *InputService18TestShapeInputShape) (req *aws.Request, output *InputService18TestShapeInputService18TestCaseOperation6Output) { - op := &aws.Operation{ - Name: opInputService18TestCaseOperation6, - HTTPMethod: "POST", - HTTPPath: "/path", - } - - if input == nil { - input = &InputService18TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService18TestShapeInputService18TestCaseOperation6Output{} - req.Data = output - return -} - -func (c *InputService18ProtocolTest) InputService18TestCaseOperation6(input *InputService18TestShapeInputShape) (*InputService18TestShapeInputService18TestCaseOperation6Output, error) { - req, out := c.InputService18TestCaseOperation6Request(input) - err := req.Send() - return out, err -} - -type InputService18TestShapeInputService18TestCaseOperation1Output struct { - metadataInputService18TestShapeInputService18TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService18TestShapeInputService18TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService18TestShapeInputService18TestCaseOperation2Output struct { - metadataInputService18TestShapeInputService18TestCaseOperation2Output `json:"-" xml:"-"` -} - -type metadataInputService18TestShapeInputService18TestCaseOperation2Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService18TestShapeInputService18TestCaseOperation3Output struct { - metadataInputService18TestShapeInputService18TestCaseOperation3Output `json:"-" xml:"-"` -} - -type metadataInputService18TestShapeInputService18TestCaseOperation3Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService18TestShapeInputService18TestCaseOperation4Output struct { - metadataInputService18TestShapeInputService18TestCaseOperation4Output `json:"-" xml:"-"` -} - -type metadataInputService18TestShapeInputService18TestCaseOperation4Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService18TestShapeInputService18TestCaseOperation5Output struct { - metadataInputService18TestShapeInputService18TestCaseOperation5Output `json:"-" xml:"-"` -} - -type metadataInputService18TestShapeInputService18TestCaseOperation5Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService18TestShapeInputService18TestCaseOperation6Output struct { - metadataInputService18TestShapeInputService18TestCaseOperation6Output `json:"-" xml:"-"` -} - -type metadataInputService18TestShapeInputService18TestCaseOperation6Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService18TestShapeInputShape struct { - RecursiveStruct *InputService18TestShapeRecursiveStructType `type:"structure"` - - metadataInputService18TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService18TestShapeInputShape struct { - SDKShapeTraits bool `locationName:"OperationRequest" type:"structure" xmlURI:"https://foo/"` -} - -type InputService18TestShapeRecursiveStructType struct { - NoRecurse *string `type:"string"` - - RecursiveList []*InputService18TestShapeRecursiveStructType `type:"list"` - - RecursiveMap map[string]*InputService18TestShapeRecursiveStructType `type:"map"` - - RecursiveStruct *InputService18TestShapeRecursiveStructType `type:"structure"` - - metadataInputService18TestShapeRecursiveStructType `json:"-" xml:"-"` -} - -type metadataInputService18TestShapeRecursiveStructType struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService19ProtocolTest struct { - *aws.Service -} - -// New returns a new InputService19ProtocolTest client. -func NewInputService19ProtocolTest(config *aws.Config) *InputService19ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "inputservice19protocoltest", - APIVersion: "2014-01-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &InputService19ProtocolTest{service} -} - -// newRequest creates a new request for a InputService19ProtocolTest operation and runs any -// custom request initialization. -func (c *InputService19ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opInputService19TestCaseOperation1 = "OperationName" - -// InputService19TestCaseOperation1Request generates a request for the InputService19TestCaseOperation1 operation. -func (c *InputService19ProtocolTest) InputService19TestCaseOperation1Request(input *InputService19TestShapeInputShape) (req *aws.Request, output *InputService19TestShapeInputService19TestCaseOperation1Output) { - op := &aws.Operation{ - Name: opInputService19TestCaseOperation1, - HTTPMethod: "POST", - HTTPPath: "/path", - } - - if input == nil { - input = &InputService19TestShapeInputShape{} - } - - req = c.newRequest(op, input, output) - output = &InputService19TestShapeInputService19TestCaseOperation1Output{} - req.Data = output - return -} - -func (c *InputService19ProtocolTest) InputService19TestCaseOperation1(input *InputService19TestShapeInputShape) (*InputService19TestShapeInputService19TestCaseOperation1Output, error) { - req, out := c.InputService19TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type InputService19TestShapeInputService19TestCaseOperation1Output struct { - metadataInputService19TestShapeInputService19TestCaseOperation1Output `json:"-" xml:"-"` -} - -type metadataInputService19TestShapeInputService19TestCaseOperation1Output struct { - SDKShapeTraits bool `type:"structure"` -} - -type InputService19TestShapeInputShape struct { - TimeArgInHeader *time.Time `location:"header" locationName:"x-amz-timearg" type:"timestamp" timestampFormat:"rfc822"` - - metadataInputService19TestShapeInputShape `json:"-" xml:"-"` -} - -type metadataInputService19TestShapeInputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -// -// Tests begin here -// - -func TestInputService1ProtocolTestBasicXMLSerializationCase1(t *testing.T) { - svc := NewInputService1ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService1TestShapeInputShape{ - Description: aws.String("bar"), - Name: aws.String("foo"), - } - req, _ := svc.InputService1TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`barfoo`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) - - // assert headers - -} - -func TestInputService1ProtocolTestBasicXMLSerializationCase2(t *testing.T) { - svc := NewInputService1ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService1TestShapeInputShape{ - Description: aws.String("bar"), - Name: aws.String("foo"), - } - req, _ := svc.InputService1TestCaseOperation2Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`barfoo`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) - - // assert headers - -} - -func TestInputService2ProtocolTestSerializeOtherScalarTypesCase1(t *testing.T) { - svc := NewInputService2ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService2TestShapeInputShape{ - First: aws.Bool(true), - Fourth: aws.Int64(3), - Second: aws.Bool(false), - Third: aws.Float64(1.2), - } - req, _ := svc.InputService2TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`true3false1.2`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) - - // assert headers - -} - -func TestInputService3ProtocolTestNestedStructuresCase1(t *testing.T) { - svc := NewInputService3ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService3TestShapeInputShape{ - Description: aws.String("baz"), - SubStructure: &InputService3TestShapeSubStructure{ - Bar: aws.String("b"), - Foo: aws.String("a"), - }, - } - req, _ := svc.InputService3TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`bazba`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) - - // assert headers - -} - -func TestInputService4ProtocolTestNestedStructuresCase1(t *testing.T) { - svc := NewInputService4ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService4TestShapeInputShape{ - Description: aws.String("baz"), - SubStructure: &InputService4TestShapeSubStructure{}, - } - req, _ := svc.InputService4TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`baz`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) - - // assert headers - -} - -func TestInputService5ProtocolTestNonFlattenedListsCase1(t *testing.T) { - svc := NewInputService5ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService5TestShapeInputShape{ - ListParam: []*string{ - aws.String("one"), - aws.String("two"), - aws.String("three"), - }, - } - req, _ := svc.InputService5TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`onetwothree`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) - - // assert headers - -} - -func TestInputService6ProtocolTestNonFlattenedListsWithLocationNameCase1(t *testing.T) { - svc := NewInputService6ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService6TestShapeInputShape{ - ListParam: []*string{ - aws.String("one"), - aws.String("two"), - aws.String("three"), - }, - } - req, _ := svc.InputService6TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`onetwothree`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) - - // assert headers - -} - -func TestInputService7ProtocolTestFlattenedListsCase1(t *testing.T) { - svc := NewInputService7ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService7TestShapeInputShape{ - ListParam: []*string{ - aws.String("one"), - aws.String("two"), - aws.String("three"), - }, - } - req, _ := svc.InputService7TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`onetwothree`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) - - // assert headers - -} - -func TestInputService8ProtocolTestFlattenedListsWithLocationNameCase1(t *testing.T) { - svc := NewInputService8ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService8TestShapeInputShape{ - ListParam: []*string{ - aws.String("one"), - aws.String("two"), - aws.String("three"), - }, - } - req, _ := svc.InputService8TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`onetwothree`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) - - // assert headers - -} - -func TestInputService9ProtocolTestListOfStructuresCase1(t *testing.T) { - svc := NewInputService9ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService9TestShapeInputShape{ - ListParam: []*InputService9TestShapeSingleFieldStruct{ - { - Element: aws.String("one"), - }, - { - Element: aws.String("two"), - }, - { - Element: aws.String("three"), - }, - }, - } - req, _ := svc.InputService9TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`onetwothree`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) - - // assert headers - -} - -func TestInputService10ProtocolTestBlobAndTimestampShapesCase1(t *testing.T) { - svc := NewInputService10ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService10TestShapeInputShape{ - StructureParam: &InputService10TestShapeStructureShape{ - B: []byte("foo"), - T: aws.Time(time.Unix(1422172800, 0)), - }, - } - req, _ := svc.InputService10TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`Zm9v2015-01-25T08:00:00Z`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/2014-01-01/hostedzone", r.URL.String()) - - // assert headers - -} - -func TestInputService11ProtocolTestHeaderMapsCase1(t *testing.T) { - svc := NewInputService11ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService11TestShapeInputShape{ - Foo: map[string]*string{ - "a": aws.String("b"), - "c": aws.String("d"), - }, - } - req, _ := svc.InputService11TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - assert.Equal(t, "b", r.Header.Get("x-foo-a")) - assert.Equal(t, "d", r.Header.Get("x-foo-c")) - -} - -func TestInputService12ProtocolTestStringPayloadCase1(t *testing.T) { - svc := NewInputService12ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService12TestShapeInputShape{ - Foo: aws.String("bar"), - } - req, _ := svc.InputService12TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`bar`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService13ProtocolTestBlobPayloadCase1(t *testing.T) { - svc := NewInputService13ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService13TestShapeInputShape{ - Foo: []byte("bar"), - } - req, _ := svc.InputService13TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`bar`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService13ProtocolTestBlobPayloadCase2(t *testing.T) { - svc := NewInputService13ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService13TestShapeInputShape{} - req, _ := svc.InputService13TestCaseOperation2Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService14ProtocolTestStructurePayloadCase1(t *testing.T) { - svc := NewInputService14ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService14TestShapeInputShape{ - Foo: &InputService14TestShapeFooShape{ - Baz: aws.String("bar"), - }, - } - req, _ := svc.InputService14TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`bar`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService14ProtocolTestStructurePayloadCase2(t *testing.T) { - svc := NewInputService14ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService14TestShapeInputShape{ - Foo: &InputService14TestShapeFooShape{}, - } - req, _ := svc.InputService14TestCaseOperation2Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(``), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService14ProtocolTestStructurePayloadCase3(t *testing.T) { - svc := NewInputService14ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService14TestShapeInputShape{} - req, _ := svc.InputService14TestCaseOperation3Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService15ProtocolTestXMLAttributeCase1(t *testing.T) { - svc := NewInputService15ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService15TestShapeInputShape{ - Grant: &InputService15TestShapeGrant{ - Grantee: &InputService15TestShapeGrantee{ - EmailAddress: aws.String("foo@example.com"), - Type: aws.String("CanonicalUser"), - }, - }, - } - req, _ := svc.InputService15TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`foo@example.com`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/", r.URL.String()) - - // assert headers - -} - -func TestInputService16ProtocolTestGreedyKeysCase1(t *testing.T) { - svc := NewInputService16ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService16TestShapeInputShape{ - Bucket: aws.String("my/bucket"), - Key: aws.String("testing /123"), - } - req, _ := svc.InputService16TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert URL - assert.Equal(t, "https://test/my%2Fbucket/testing%20/123", r.URL.String()) - - // assert headers - -} - -func TestInputService17ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase1(t *testing.T) { - svc := NewInputService17ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService17TestShapeInputShape{} - req, _ := svc.InputService17TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert URL - assert.Equal(t, "https://test/path", r.URL.String()) - - // assert headers - -} - -func TestInputService17ProtocolTestOmitsNullQueryParamsButSerializesEmptyStringsCase2(t *testing.T) { - svc := NewInputService17ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService17TestShapeInputShape{ - Foo: aws.String(""), - } - req, _ := svc.InputService17TestCaseOperation2Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert URL - assert.Equal(t, "https://test/path?abc=mno¶m-name=", r.URL.String()) - - // assert headers - -} - -func TestInputService18ProtocolTestRecursiveShapesCase1(t *testing.T) { - svc := NewInputService18ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService18TestShapeInputShape{ - RecursiveStruct: &InputService18TestShapeRecursiveStructType{ - NoRecurse: aws.String("foo"), - }, - } - req, _ := svc.InputService18TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`foo`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/path", r.URL.String()) - - // assert headers - -} - -func TestInputService18ProtocolTestRecursiveShapesCase2(t *testing.T) { - svc := NewInputService18ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService18TestShapeInputShape{ - RecursiveStruct: &InputService18TestShapeRecursiveStructType{ - RecursiveStruct: &InputService18TestShapeRecursiveStructType{ - NoRecurse: aws.String("foo"), - }, - }, - } - req, _ := svc.InputService18TestCaseOperation2Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`foo`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/path", r.URL.String()) - - // assert headers - -} - -func TestInputService18ProtocolTestRecursiveShapesCase3(t *testing.T) { - svc := NewInputService18ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService18TestShapeInputShape{ - RecursiveStruct: &InputService18TestShapeRecursiveStructType{ - RecursiveStruct: &InputService18TestShapeRecursiveStructType{ - RecursiveStruct: &InputService18TestShapeRecursiveStructType{ - RecursiveStruct: &InputService18TestShapeRecursiveStructType{ - NoRecurse: aws.String("foo"), - }, - }, - }, - }, - } - req, _ := svc.InputService18TestCaseOperation3Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`foo`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/path", r.URL.String()) - - // assert headers - -} - -func TestInputService18ProtocolTestRecursiveShapesCase4(t *testing.T) { - svc := NewInputService18ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService18TestShapeInputShape{ - RecursiveStruct: &InputService18TestShapeRecursiveStructType{ - RecursiveList: []*InputService18TestShapeRecursiveStructType{ - { - NoRecurse: aws.String("foo"), - }, - { - NoRecurse: aws.String("bar"), - }, - }, - }, - } - req, _ := svc.InputService18TestCaseOperation4Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`foobar`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/path", r.URL.String()) - - // assert headers - -} - -func TestInputService18ProtocolTestRecursiveShapesCase5(t *testing.T) { - svc := NewInputService18ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService18TestShapeInputShape{ - RecursiveStruct: &InputService18TestShapeRecursiveStructType{ - RecursiveList: []*InputService18TestShapeRecursiveStructType{ - { - NoRecurse: aws.String("foo"), - }, - { - RecursiveStruct: &InputService18TestShapeRecursiveStructType{ - NoRecurse: aws.String("bar"), - }, - }, - }, - }, - } - req, _ := svc.InputService18TestCaseOperation5Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`foobar`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/path", r.URL.String()) - - // assert headers - -} - -func TestInputService18ProtocolTestRecursiveShapesCase6(t *testing.T) { - svc := NewInputService18ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService18TestShapeInputShape{ - RecursiveStruct: &InputService18TestShapeRecursiveStructType{ - RecursiveMap: map[string]*InputService18TestShapeRecursiveStructType{ - "bar": { - NoRecurse: aws.String("bar"), - }, - "foo": { - NoRecurse: aws.String("foo"), - }, - }, - }, - } - req, _ := svc.InputService18TestCaseOperation6Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert body - assert.NotNil(t, r.Body) - body := util.SortXML(r.Body) - assert.Equal(t, util.Trim(`barbarfoofoo`), util.Trim(string(body))) - - // assert URL - assert.Equal(t, "https://test/path", r.URL.String()) - - // assert headers - -} - -func TestInputService19ProtocolTestTimestampInHeaderCase1(t *testing.T) { - svc := NewInputService19ProtocolTest(nil) - svc.Endpoint = "https://test" - - input := &InputService19TestShapeInputShape{ - TimeArgInHeader: aws.Time(time.Unix(1422172800, 0)), - } - req, _ := svc.InputService19TestCaseOperation1Request(input) - r := req.HTTPRequest - - // build request - restxml.Build(req) - assert.NoError(t, req.Error) - - // assert URL - assert.Equal(t, "https://test/path", r.URL.String()) - - // assert headers - assert.Equal(t, "Sun, 25 Jan 2015 08:00:00 GMT", r.Header.Get("x-amz-timearg")) - -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/restxml.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/restxml.go deleted file mode 100644 index d6cbff4..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/restxml.go +++ /dev/null @@ -1,55 +0,0 @@ -// Package restxml provides RESTful XML serialisation of AWS -// requests and responses. -package restxml - -//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/input/rest-xml.json build_test.go -//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/output/rest-xml.json unmarshal_test.go - -import ( - "bytes" - "encoding/xml" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/internal/protocol/query" - "github.com/aws/aws-sdk-go/internal/protocol/rest" - "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil" -) - -// Build builds a request payload for the REST XML protocol. -func Build(r *aws.Request) { - rest.Build(r) - - if t := rest.PayloadType(r.Params); t == "structure" || t == "" { - var buf bytes.Buffer - err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to enode rest XML request", err) - return - } - r.SetBufferBody(buf.Bytes()) - } -} - -// Unmarshal unmarshals a payload response for the REST XML protocol. -func Unmarshal(r *aws.Request) { - if t := rest.PayloadType(r.Data); t == "structure" || t == "" { - defer r.HTTPResponse.Body.Close() - decoder := xml.NewDecoder(r.HTTPResponse.Body) - err := xmlutil.UnmarshalXML(r.Data, decoder, "") - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err) - return - } - } -} - -// UnmarshalMeta unmarshals response headers for the REST XML protocol. -func UnmarshalMeta(r *aws.Request) { - rest.Unmarshal(r) -} - -// UnmarshalError unmarshals a response error for the REST XML protocol. -func UnmarshalError(r *aws.Request) { - query.UnmarshalError(r) -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/unmarshal_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/unmarshal_test.go deleted file mode 100644 index b232580..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/restxml/unmarshal_test.go +++ /dev/null @@ -1,1310 +0,0 @@ -package restxml_test - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "io" - "io/ioutil" - "net/http" - "net/url" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/internal/protocol/restxml" - "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil" - "github.com/aws/aws-sdk-go/internal/signer/v4" - "github.com/aws/aws-sdk-go/internal/util" - "github.com/stretchr/testify/assert" -) - -var _ bytes.Buffer // always import bytes -var _ http.Request -var _ json.Marshaler -var _ time.Time -var _ xmlutil.XMLNode -var _ xml.Attr -var _ = ioutil.Discard -var _ = util.Trim("") -var _ = url.Values{} -var _ = io.EOF - -type OutputService1ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService1ProtocolTest client. -func NewOutputService1ProtocolTest(config *aws.Config) *OutputService1ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice1protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &OutputService1ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService1ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService1TestCaseOperation1 = "OperationName" - -// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation. -func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *aws.Request, output *OutputService1TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService1TestCaseOperation1, - } - - if input == nil { - input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService1TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputShape, error) { - req, out := c.OutputService1TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -const opOutputService1TestCaseOperation2 = "OperationName" - -// OutputService1TestCaseOperation2Request generates a request for the OutputService1TestCaseOperation2 operation. -func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation2Request(input *OutputService1TestShapeOutputService1TestCaseOperation2Input) (req *aws.Request, output *OutputService1TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService1TestCaseOperation2, - } - - if input == nil { - input = &OutputService1TestShapeOutputService1TestCaseOperation2Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService1TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation2(input *OutputService1TestShapeOutputService1TestCaseOperation2Input) (*OutputService1TestShapeOutputShape, error) { - req, out := c.OutputService1TestCaseOperation2Request(input) - err := req.Send() - return out, err -} - -type OutputService1TestShapeOutputService1TestCaseOperation1Input struct { - metadataOutputService1TestShapeOutputService1TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService1TestShapeOutputService1TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService1TestShapeOutputService1TestCaseOperation2Input struct { - metadataOutputService1TestShapeOutputService1TestCaseOperation2Input `json:"-" xml:"-"` -} - -type metadataOutputService1TestShapeOutputService1TestCaseOperation2Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService1TestShapeOutputShape struct { - Char *string `type:"character"` - - Double *float64 `type:"double"` - - FalseBool *bool `type:"boolean"` - - Float *float64 `type:"float"` - - ImaHeader *string `location:"header" type:"string"` - - ImaHeaderLocation *string `location:"header" locationName:"X-Foo" type:"string"` - - Long *int64 `type:"long"` - - Num *int64 `locationName:"FooNum" type:"integer"` - - Str *string `type:"string"` - - Timestamp *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - TrueBool *bool `type:"boolean"` - - metadataOutputService1TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService1TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService2ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService2ProtocolTest client. -func NewOutputService2ProtocolTest(config *aws.Config) *OutputService2ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice2protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &OutputService2ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService2ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService2TestCaseOperation1 = "OperationName" - -// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation. -func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *aws.Request, output *OutputService2TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService2TestCaseOperation1, - } - - if input == nil { - input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService2TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputShape, error) { - req, out := c.OutputService2TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService2TestShapeOutputService2TestCaseOperation1Input struct { - metadataOutputService2TestShapeOutputService2TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService2TestShapeOutputService2TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService2TestShapeOutputShape struct { - Blob []byte `type:"blob"` - - metadataOutputService2TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService2TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService3ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService3ProtocolTest client. -func NewOutputService3ProtocolTest(config *aws.Config) *OutputService3ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice3protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &OutputService3ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService3ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService3TestCaseOperation1 = "OperationName" - -// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation. -func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *aws.Request, output *OutputService3TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService3TestCaseOperation1, - } - - if input == nil { - input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService3TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputShape, error) { - req, out := c.OutputService3TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService3TestShapeOutputService3TestCaseOperation1Input struct { - metadataOutputService3TestShapeOutputService3TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService3TestShapeOutputService3TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService3TestShapeOutputShape struct { - ListMember []*string `type:"list"` - - metadataOutputService3TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService3TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService4ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService4ProtocolTest client. -func NewOutputService4ProtocolTest(config *aws.Config) *OutputService4ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice4protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &OutputService4ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService4ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService4TestCaseOperation1 = "OperationName" - -// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation. -func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *aws.Request, output *OutputService4TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService4TestCaseOperation1, - } - - if input == nil { - input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService4TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputShape, error) { - req, out := c.OutputService4TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService4TestShapeOutputService4TestCaseOperation1Input struct { - metadataOutputService4TestShapeOutputService4TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService4TestShapeOutputService4TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService4TestShapeOutputShape struct { - ListMember []*string `locationNameList:"item" type:"list"` - - metadataOutputService4TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService4TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService5ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService5ProtocolTest client. -func NewOutputService5ProtocolTest(config *aws.Config) *OutputService5ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice5protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &OutputService5ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService5ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService5TestCaseOperation1 = "OperationName" - -// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation. -func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *aws.Request, output *OutputService5TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService5TestCaseOperation1, - } - - if input == nil { - input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService5TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputShape, error) { - req, out := c.OutputService5TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService5TestShapeOutputService5TestCaseOperation1Input struct { - metadataOutputService5TestShapeOutputService5TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService5TestShapeOutputService5TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService5TestShapeOutputShape struct { - ListMember []*string `type:"list" flattened:"true"` - - metadataOutputService5TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService5TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService6ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService6ProtocolTest client. -func NewOutputService6ProtocolTest(config *aws.Config) *OutputService6ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice6protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &OutputService6ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService6ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService6TestCaseOperation1 = "OperationName" - -// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation. -func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *aws.Request, output *OutputService6TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService6TestCaseOperation1, - } - - if input == nil { - input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService6TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputShape, error) { - req, out := c.OutputService6TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService6TestShapeOutputService6TestCaseOperation1Input struct { - metadataOutputService6TestShapeOutputService6TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService6TestShapeOutputService6TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService6TestShapeOutputShape struct { - Map map[string]*OutputService6TestShapeSingleStructure `type:"map"` - - metadataOutputService6TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService6TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService6TestShapeSingleStructure struct { - Foo *string `locationName:"foo" type:"string"` - - metadataOutputService6TestShapeSingleStructure `json:"-" xml:"-"` -} - -type metadataOutputService6TestShapeSingleStructure struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService7ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService7ProtocolTest client. -func NewOutputService7ProtocolTest(config *aws.Config) *OutputService7ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice7protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &OutputService7ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService7ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService7ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService7TestCaseOperation1 = "OperationName" - -// OutputService7TestCaseOperation1Request generates a request for the OutputService7TestCaseOperation1 operation. -func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1Request(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (req *aws.Request, output *OutputService7TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService7TestCaseOperation1, - } - - if input == nil { - input = &OutputService7TestShapeOutputService7TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService7TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService7ProtocolTest) OutputService7TestCaseOperation1(input *OutputService7TestShapeOutputService7TestCaseOperation1Input) (*OutputService7TestShapeOutputShape, error) { - req, out := c.OutputService7TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService7TestShapeOutputService7TestCaseOperation1Input struct { - metadataOutputService7TestShapeOutputService7TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService7TestShapeOutputService7TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService7TestShapeOutputShape struct { - Map map[string]*string `type:"map" flattened:"true"` - - metadataOutputService7TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService7TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService8ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService8ProtocolTest client. -func NewOutputService8ProtocolTest(config *aws.Config) *OutputService8ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice8protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &OutputService8ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService8ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService8ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService8TestCaseOperation1 = "OperationName" - -// OutputService8TestCaseOperation1Request generates a request for the OutputService8TestCaseOperation1 operation. -func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1Request(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (req *aws.Request, output *OutputService8TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService8TestCaseOperation1, - } - - if input == nil { - input = &OutputService8TestShapeOutputService8TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService8TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService8ProtocolTest) OutputService8TestCaseOperation1(input *OutputService8TestShapeOutputService8TestCaseOperation1Input) (*OutputService8TestShapeOutputShape, error) { - req, out := c.OutputService8TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService8TestShapeOutputService8TestCaseOperation1Input struct { - metadataOutputService8TestShapeOutputService8TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService8TestShapeOutputService8TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService8TestShapeOutputShape struct { - Map map[string]*string `locationNameKey:"foo" locationNameValue:"bar" type:"map"` - - metadataOutputService8TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService8TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService9ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService9ProtocolTest client. -func NewOutputService9ProtocolTest(config *aws.Config) *OutputService9ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice9protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &OutputService9ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService9ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService9ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService9TestCaseOperation1 = "OperationName" - -// OutputService9TestCaseOperation1Request generates a request for the OutputService9TestCaseOperation1 operation. -func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1Request(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (req *aws.Request, output *OutputService9TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService9TestCaseOperation1, - } - - if input == nil { - input = &OutputService9TestShapeOutputService9TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService9TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService9ProtocolTest) OutputService9TestCaseOperation1(input *OutputService9TestShapeOutputService9TestCaseOperation1Input) (*OutputService9TestShapeOutputShape, error) { - req, out := c.OutputService9TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService9TestShapeOutputService9TestCaseOperation1Input struct { - metadataOutputService9TestShapeOutputService9TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService9TestShapeOutputService9TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService9TestShapeOutputShape struct { - Data *OutputService9TestShapeSingleStructure `type:"structure"` - - Header *string `location:"header" locationName:"X-Foo" type:"string"` - - metadataOutputService9TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService9TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure" payload:"Data"` -} - -type OutputService9TestShapeSingleStructure struct { - Foo *string `type:"string"` - - metadataOutputService9TestShapeSingleStructure `json:"-" xml:"-"` -} - -type metadataOutputService9TestShapeSingleStructure struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService10ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService10ProtocolTest client. -func NewOutputService10ProtocolTest(config *aws.Config) *OutputService10ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice10protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &OutputService10ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService10ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService10ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService10TestCaseOperation1 = "OperationName" - -// OutputService10TestCaseOperation1Request generates a request for the OutputService10TestCaseOperation1 operation. -func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1Request(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (req *aws.Request, output *OutputService10TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService10TestCaseOperation1, - } - - if input == nil { - input = &OutputService10TestShapeOutputService10TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService10TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService10ProtocolTest) OutputService10TestCaseOperation1(input *OutputService10TestShapeOutputService10TestCaseOperation1Input) (*OutputService10TestShapeOutputShape, error) { - req, out := c.OutputService10TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService10TestShapeOutputService10TestCaseOperation1Input struct { - metadataOutputService10TestShapeOutputService10TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService10TestShapeOutputService10TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService10TestShapeOutputShape struct { - Stream []byte `type:"blob"` - - metadataOutputService10TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService10TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure" payload:"Stream"` -} - -type OutputService11ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService11ProtocolTest client. -func NewOutputService11ProtocolTest(config *aws.Config) *OutputService11ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice11protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &OutputService11ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService11ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService11ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService11TestCaseOperation1 = "OperationName" - -// OutputService11TestCaseOperation1Request generates a request for the OutputService11TestCaseOperation1 operation. -func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1Request(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (req *aws.Request, output *OutputService11TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService11TestCaseOperation1, - } - - if input == nil { - input = &OutputService11TestShapeOutputService11TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService11TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService11ProtocolTest) OutputService11TestCaseOperation1(input *OutputService11TestShapeOutputService11TestCaseOperation1Input) (*OutputService11TestShapeOutputShape, error) { - req, out := c.OutputService11TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService11TestShapeOutputService11TestCaseOperation1Input struct { - metadataOutputService11TestShapeOutputService11TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService11TestShapeOutputService11TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService11TestShapeOutputShape struct { - Char *string `location:"header" locationName:"x-char" type:"character"` - - Double *float64 `location:"header" locationName:"x-double" type:"double"` - - FalseBool *bool `location:"header" locationName:"x-false-bool" type:"boolean"` - - Float *float64 `location:"header" locationName:"x-float" type:"float"` - - Integer *int64 `location:"header" locationName:"x-int" type:"integer"` - - Long *int64 `location:"header" locationName:"x-long" type:"long"` - - Str *string `location:"header" locationName:"x-str" type:"string"` - - Timestamp *time.Time `location:"header" locationName:"x-timestamp" type:"timestamp" timestampFormat:"iso8601"` - - TrueBool *bool `location:"header" locationName:"x-true-bool" type:"boolean"` - - metadataOutputService11TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService11TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService12ProtocolTest struct { - *aws.Service -} - -// New returns a new OutputService12ProtocolTest client. -func NewOutputService12ProtocolTest(config *aws.Config) *OutputService12ProtocolTest { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "outputservice12protocoltest", - APIVersion: "", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - return &OutputService12ProtocolTest{service} -} - -// newRequest creates a new request for a OutputService12ProtocolTest operation and runs any -// custom request initialization. -func (c *OutputService12ProtocolTest) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - return req -} - -const opOutputService12TestCaseOperation1 = "OperationName" - -// OutputService12TestCaseOperation1Request generates a request for the OutputService12TestCaseOperation1 operation. -func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1Request(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (req *aws.Request, output *OutputService12TestShapeOutputShape) { - op := &aws.Operation{ - Name: opOutputService12TestCaseOperation1, - } - - if input == nil { - input = &OutputService12TestShapeOutputService12TestCaseOperation1Input{} - } - - req = c.newRequest(op, input, output) - output = &OutputService12TestShapeOutputShape{} - req.Data = output - return -} - -func (c *OutputService12ProtocolTest) OutputService12TestCaseOperation1(input *OutputService12TestShapeOutputService12TestCaseOperation1Input) (*OutputService12TestShapeOutputShape, error) { - req, out := c.OutputService12TestCaseOperation1Request(input) - err := req.Send() - return out, err -} - -type OutputService12TestShapeOutputService12TestCaseOperation1Input struct { - metadataOutputService12TestShapeOutputService12TestCaseOperation1Input `json:"-" xml:"-"` -} - -type metadataOutputService12TestShapeOutputService12TestCaseOperation1Input struct { - SDKShapeTraits bool `type:"structure"` -} - -type OutputService12TestShapeOutputShape struct { - String *string `type:"string"` - - metadataOutputService12TestShapeOutputShape `json:"-" xml:"-"` -} - -type metadataOutputService12TestShapeOutputShape struct { - SDKShapeTraits bool `type:"structure" payload:"String"` -} - -// -// Tests begin here -// - -func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) { - svc := NewOutputService1ProtocolTest(nil) - - buf := bytes.NewReader([]byte("myname123falsetrue1.21.3200a2015-01-25T08:00:00Z")) - req, out := svc.OutputService1TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - req.HTTPResponse.Header.Set("ImaHeader", "test") - req.HTTPResponse.Header.Set("X-Foo", "abc") - - // unmarshal response - restxml.UnmarshalMeta(req) - restxml.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "a", *out.Char) - assert.Equal(t, 1.3, *out.Double) - assert.Equal(t, false, *out.FalseBool) - assert.Equal(t, 1.2, *out.Float) - assert.Equal(t, "test", *out.ImaHeader) - assert.Equal(t, "abc", *out.ImaHeaderLocation) - assert.Equal(t, int64(200), *out.Long) - assert.Equal(t, int64(123), *out.Num) - assert.Equal(t, "myname", *out.Str) - assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) - assert.Equal(t, true, *out.TrueBool) - -} - -func TestOutputService1ProtocolTestScalarMembersCase2(t *testing.T) { - svc := NewOutputService1ProtocolTest(nil) - - buf := bytes.NewReader([]byte("123falsetrue1.21.3200a2015-01-25T08:00:00Z")) - req, out := svc.OutputService1TestCaseOperation2Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - req.HTTPResponse.Header.Set("ImaHeader", "test") - req.HTTPResponse.Header.Set("X-Foo", "abc") - - // unmarshal response - restxml.UnmarshalMeta(req) - restxml.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "a", *out.Char) - assert.Equal(t, 1.3, *out.Double) - assert.Equal(t, false, *out.FalseBool) - assert.Equal(t, 1.2, *out.Float) - assert.Equal(t, "test", *out.ImaHeader) - assert.Equal(t, "abc", *out.ImaHeaderLocation) - assert.Equal(t, int64(200), *out.Long) - assert.Equal(t, int64(123), *out.Num) - assert.Equal(t, "", *out.Str) - assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) - assert.Equal(t, true, *out.TrueBool) - -} - -func TestOutputService2ProtocolTestBlobCase1(t *testing.T) { - svc := NewOutputService2ProtocolTest(nil) - - buf := bytes.NewReader([]byte("dmFsdWU=")) - req, out := svc.OutputService2TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - restxml.UnmarshalMeta(req) - restxml.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "value", string(out.Blob)) - -} - -func TestOutputService3ProtocolTestListsCase1(t *testing.T) { - svc := NewOutputService3ProtocolTest(nil) - - buf := bytes.NewReader([]byte("abc123")) - req, out := svc.OutputService3TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - restxml.UnmarshalMeta(req) - restxml.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "abc", *out.ListMember[0]) - assert.Equal(t, "123", *out.ListMember[1]) - -} - -func TestOutputService4ProtocolTestListWithCustomMemberNameCase1(t *testing.T) { - svc := NewOutputService4ProtocolTest(nil) - - buf := bytes.NewReader([]byte("abc123")) - req, out := svc.OutputService4TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - restxml.UnmarshalMeta(req) - restxml.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "abc", *out.ListMember[0]) - assert.Equal(t, "123", *out.ListMember[1]) - -} - -func TestOutputService5ProtocolTestFlattenedListCase1(t *testing.T) { - svc := NewOutputService5ProtocolTest(nil) - - buf := bytes.NewReader([]byte("abc123")) - req, out := svc.OutputService5TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - restxml.UnmarshalMeta(req) - restxml.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "abc", *out.ListMember[0]) - assert.Equal(t, "123", *out.ListMember[1]) - -} - -func TestOutputService6ProtocolTestNormalMapCase1(t *testing.T) { - svc := NewOutputService6ProtocolTest(nil) - - buf := bytes.NewReader([]byte("quxbarbazbam")) - req, out := svc.OutputService6TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - restxml.UnmarshalMeta(req) - restxml.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "bam", *out.Map["baz"].Foo) - assert.Equal(t, "bar", *out.Map["qux"].Foo) - -} - -func TestOutputService7ProtocolTestFlattenedMapCase1(t *testing.T) { - svc := NewOutputService7ProtocolTest(nil) - - buf := bytes.NewReader([]byte("quxbarbazbam")) - req, out := svc.OutputService7TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - restxml.UnmarshalMeta(req) - restxml.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "bam", *out.Map["baz"]) - assert.Equal(t, "bar", *out.Map["qux"]) - -} - -func TestOutputService8ProtocolTestNamedMapCase1(t *testing.T) { - svc := NewOutputService8ProtocolTest(nil) - - buf := bytes.NewReader([]byte("quxbarbazbam")) - req, out := svc.OutputService8TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - restxml.UnmarshalMeta(req) - restxml.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "bam", *out.Map["baz"]) - assert.Equal(t, "bar", *out.Map["qux"]) - -} - -func TestOutputService9ProtocolTestXMLPayloadCase1(t *testing.T) { - svc := NewOutputService9ProtocolTest(nil) - - buf := bytes.NewReader([]byte("abc")) - req, out := svc.OutputService9TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - req.HTTPResponse.Header.Set("X-Foo", "baz") - - // unmarshal response - restxml.UnmarshalMeta(req) - restxml.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "abc", *out.Data.Foo) - assert.Equal(t, "baz", *out.Header) - -} - -func TestOutputService10ProtocolTestStreamingPayloadCase1(t *testing.T) { - svc := NewOutputService10ProtocolTest(nil) - - buf := bytes.NewReader([]byte("abc")) - req, out := svc.OutputService10TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - restxml.UnmarshalMeta(req) - restxml.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "abc", string(out.Stream)) - -} - -func TestOutputService11ProtocolTestScalarMembersInHeadersCase1(t *testing.T) { - svc := NewOutputService11ProtocolTest(nil) - - buf := bytes.NewReader([]byte("")) - req, out := svc.OutputService11TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - req.HTTPResponse.Header.Set("x-char", "a") - req.HTTPResponse.Header.Set("x-double", "1.5") - req.HTTPResponse.Header.Set("x-false-bool", "false") - req.HTTPResponse.Header.Set("x-float", "1.5") - req.HTTPResponse.Header.Set("x-int", "1") - req.HTTPResponse.Header.Set("x-long", "100") - req.HTTPResponse.Header.Set("x-str", "string") - req.HTTPResponse.Header.Set("x-timestamp", "Sun, 25 Jan 2015 08:00:00 GMT") - req.HTTPResponse.Header.Set("x-true-bool", "true") - - // unmarshal response - restxml.UnmarshalMeta(req) - restxml.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "a", *out.Char) - assert.Equal(t, 1.5, *out.Double) - assert.Equal(t, false, *out.FalseBool) - assert.Equal(t, 1.5, *out.Float) - assert.Equal(t, int64(1), *out.Integer) - assert.Equal(t, int64(100), *out.Long) - assert.Equal(t, "string", *out.Str) - assert.Equal(t, time.Unix(1.4221728e+09, 0).UTC().String(), out.Timestamp.String()) - assert.Equal(t, true, *out.TrueBool) - -} - -func TestOutputService12ProtocolTestStringCase1(t *testing.T) { - svc := NewOutputService12ProtocolTest(nil) - - buf := bytes.NewReader([]byte("operation result string")) - req, out := svc.OutputService12TestCaseOperation1Request(nil) - req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}} - - // set headers - - // unmarshal response - restxml.UnmarshalMeta(req) - restxml.Unmarshal(req) - assert.NoError(t, req.Error) - - // assert response - assert.NotNil(t, out) // ensure out variable is used - assert.Equal(t, "operation result string", *out.String) - -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/functional_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/functional_test.go deleted file mode 100644 index fbb0e41..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/functional_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package v4_test - -import ( - "net/url" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/internal/test/unit" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/stretchr/testify/assert" -) - -var _ = unit.Imported - -func TestPresignHandler(t *testing.T) { - svc := s3.New(nil) - req, _ := svc.PutObjectRequest(&s3.PutObjectInput{ - Bucket: aws.String("bucket"), - Key: aws.String("key"), - ContentDisposition: aws.String("a+b c$d"), - ACL: aws.String("public-read"), - }) - req.Time = time.Unix(0, 0) - urlstr, err := req.Presign(5 * time.Minute) - - assert.NoError(t, err) - - expectedDate := "19700101T000000Z" - expectedHeaders := "host;x-amz-acl" - expectedSig := "7edcb4e3a1bf12f4989018d75acbe3a7f03df24bd6f3112602d59fc551f0e4e2" - expectedCred := "AKID/19700101/mock-region/s3/aws4_request" - - u, _ := url.Parse(urlstr) - urlQ := u.Query() - assert.Equal(t, expectedSig, urlQ.Get("X-Amz-Signature")) - assert.Equal(t, expectedCred, urlQ.Get("X-Amz-Credential")) - assert.Equal(t, expectedHeaders, urlQ.Get("X-Amz-SignedHeaders")) - assert.Equal(t, expectedDate, urlQ.Get("X-Amz-Date")) - assert.Equal(t, "300", urlQ.Get("X-Amz-Expires")) - - assert.NotContains(t, urlstr, "+") // + encoded as %20 -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4.go deleted file mode 100644 index 748c37f..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4.go +++ /dev/null @@ -1,364 +0,0 @@ -// Package v4 implements signing for AWS V4 signer -package v4 - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/internal/protocol/rest" -) - -const ( - authHeaderPrefix = "AWS4-HMAC-SHA256" - timeFormat = "20060102T150405Z" - shortTimeFormat = "20060102" -) - -var ignoredHeaders = map[string]bool{ - "Authorization": true, - "Content-Type": true, - "Content-Length": true, - "User-Agent": true, -} - -type signer struct { - Request *http.Request - Time time.Time - ExpireTime time.Duration - ServiceName string - Region string - CredValues credentials.Value - Credentials *credentials.Credentials - Query url.Values - Body io.ReadSeeker - Debug aws.LogLevelType - Logger aws.Logger - - isPresign bool - formattedTime string - formattedShortTime string - - signedHeaders string - canonicalHeaders string - canonicalString string - credentialString string - stringToSign string - signature string - authorization string -} - -// Sign requests with signature version 4. -// -// Will sign the requests with the service config's Credentials object -// Signing is skipped if the credentials is the credentials.AnonymousCredentials -// object. -func Sign(req *aws.Request) { - // If the request does not need to be signed ignore the signing of the - // request if the AnonymousCredentials object is used. - if req.Service.Config.Credentials == credentials.AnonymousCredentials { - return - } - - region := req.Service.SigningRegion - if region == "" { - region = aws.StringValue(req.Service.Config.Region) - } - - name := req.Service.SigningName - if name == "" { - name = req.Service.ServiceName - } - - s := signer{ - Request: req.HTTPRequest, - Time: req.Time, - ExpireTime: req.ExpireTime, - Query: req.HTTPRequest.URL.Query(), - Body: req.Body, - ServiceName: name, - Region: region, - Credentials: req.Service.Config.Credentials, - Debug: req.Service.Config.LogLevel.Value(), - Logger: req.Service.Config.Logger, - } - - req.Error = s.sign() -} - -func (v4 *signer) sign() error { - if v4.ExpireTime != 0 { - v4.isPresign = true - } - - if v4.isRequestSigned() { - if !v4.Credentials.IsExpired() { - // If the request is already signed, and the credentials have not - // expired yet ignore the signing request. - return nil - } - - // The credentials have expired for this request. The current signing - // is invalid, and needs to be request because the request will fail. - if v4.isPresign { - v4.removePresign() - // Update the request's query string to ensure the values stays in - // sync in the case retrieving the new credentials fails. - v4.Request.URL.RawQuery = v4.Query.Encode() - } - } - - var err error - v4.CredValues, err = v4.Credentials.Get() - if err != nil { - return err - } - - if v4.isPresign { - v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix) - if v4.CredValues.SessionToken != "" { - v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) - } else { - v4.Query.Del("X-Amz-Security-Token") - } - } else if v4.CredValues.SessionToken != "" { - v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) - } - - v4.build() - - if v4.Debug.Matches(aws.LogDebugWithSigning) { - v4.logSigningInfo() - } - - return nil -} - -const logSignInfoMsg = `DEBUG: Request Signiture: ----[ CANONICAL STRING ]----------------------------- -%s ----[ STRING TO SIGN ]-------------------------------- -%s%s ------------------------------------------------------` -const logSignedURLMsg = ` ----[ SIGNED URL ]------------------------------------ -%s` - -func (v4 *signer) logSigningInfo() { - signedURLMsg := "" - if v4.isPresign { - signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String()) - } - msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg) - v4.Logger.Log(msg) -} - -func (v4 *signer) build() { - v4.buildTime() // no depends - v4.buildCredentialString() // no depends - if v4.isPresign { - v4.buildQuery() // no depends - } - v4.buildCanonicalHeaders() // depends on cred string - v4.buildCanonicalString() // depends on canon headers / signed headers - v4.buildStringToSign() // depends on canon string - v4.buildSignature() // depends on string to sign - - if v4.isPresign { - v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature - } else { - parts := []string{ - authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString, - "SignedHeaders=" + v4.signedHeaders, - "Signature=" + v4.signature, - } - v4.Request.Header.Set("Authorization", strings.Join(parts, ", ")) - } -} - -func (v4 *signer) buildTime() { - v4.formattedTime = v4.Time.UTC().Format(timeFormat) - v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat) - - if v4.isPresign { - duration := int64(v4.ExpireTime / time.Second) - v4.Query.Set("X-Amz-Date", v4.formattedTime) - v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) - } else { - v4.Request.Header.Set("X-Amz-Date", v4.formattedTime) - } -} - -func (v4 *signer) buildCredentialString() { - v4.credentialString = strings.Join([]string{ - v4.formattedShortTime, - v4.Region, - v4.ServiceName, - "aws4_request", - }, "/") - - if v4.isPresign { - v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString) - } -} - -func (v4 *signer) buildQuery() { - for k, h := range v4.Request.Header { - if strings.HasPrefix(http.CanonicalHeaderKey(k), "X-Amz-") { - continue // never hoist x-amz-* headers, they must be signed - } - if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { - continue // never hoist ignored headers - } - - v4.Request.Header.Del(k) - v4.Query.Del(k) - for _, v := range h { - v4.Query.Add(k, v) - } - } -} - -func (v4 *signer) buildCanonicalHeaders() { - var headers []string - headers = append(headers, "host") - for k := range v4.Request.Header { - if _, ok := ignoredHeaders[http.CanonicalHeaderKey(k)]; ok { - continue // ignored header - } - headers = append(headers, strings.ToLower(k)) - } - sort.Strings(headers) - - v4.signedHeaders = strings.Join(headers, ";") - - if v4.isPresign { - v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders) - } - - headerValues := make([]string, len(headers)) - for i, k := range headers { - if k == "host" { - headerValues[i] = "host:" + v4.Request.URL.Host - } else { - headerValues[i] = k + ":" + - strings.Join(v4.Request.Header[http.CanonicalHeaderKey(k)], ",") - } - } - - v4.canonicalHeaders = strings.Join(headerValues, "\n") -} - -func (v4 *signer) buildCanonicalString() { - v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1) - uri := v4.Request.URL.Opaque - if uri != "" { - uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") - } else { - uri = v4.Request.URL.Path - } - if uri == "" { - uri = "/" - } - - if v4.ServiceName != "s3" { - uri = rest.EscapePath(uri, false) - } - - v4.canonicalString = strings.Join([]string{ - v4.Request.Method, - uri, - v4.Request.URL.RawQuery, - v4.canonicalHeaders + "\n", - v4.signedHeaders, - v4.bodyDigest(), - }, "\n") -} - -func (v4 *signer) buildStringToSign() { - v4.stringToSign = strings.Join([]string{ - authHeaderPrefix, - v4.formattedTime, - v4.credentialString, - hex.EncodeToString(makeSha256([]byte(v4.canonicalString))), - }, "\n") -} - -func (v4 *signer) buildSignature() { - secret := v4.CredValues.SecretAccessKey - date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime)) - region := makeHmac(date, []byte(v4.Region)) - service := makeHmac(region, []byte(v4.ServiceName)) - credentials := makeHmac(service, []byte("aws4_request")) - signature := makeHmac(credentials, []byte(v4.stringToSign)) - v4.signature = hex.EncodeToString(signature) -} - -func (v4 *signer) bodyDigest() string { - hash := v4.Request.Header.Get("X-Amz-Content-Sha256") - if hash == "" { - if v4.isPresign && v4.ServiceName == "s3" { - hash = "UNSIGNED-PAYLOAD" - } else if v4.Body == nil { - hash = hex.EncodeToString(makeSha256([]byte{})) - } else { - hash = hex.EncodeToString(makeSha256Reader(v4.Body)) - } - v4.Request.Header.Add("X-Amz-Content-Sha256", hash) - } - return hash -} - -// isRequestSigned returns if the request is currently signed or presigned -func (v4 *signer) isRequestSigned() bool { - if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" { - return true - } - if v4.Request.Header.Get("Authorization") != "" { - return true - } - - return false -} - -// unsign removes signing flags for both signed and presigned requests. -func (v4 *signer) removePresign() { - v4.Query.Del("X-Amz-Algorithm") - v4.Query.Del("X-Amz-Signature") - v4.Query.Del("X-Amz-Security-Token") - v4.Query.Del("X-Amz-Date") - v4.Query.Del("X-Amz-Expires") - v4.Query.Del("X-Amz-Credential") - v4.Query.Del("X-Amz-SignedHeaders") -} - -func makeHmac(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256Reader(reader io.ReadSeeker) []byte { - hash := sha256.New() - start, _ := reader.Seek(0, 1) - defer reader.Seek(start, 0) - - io.Copy(hash, reader) - return hash.Sum(nil) -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4_test.go deleted file mode 100644 index adf8e7b..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/signer/v4/v4_test.go +++ /dev/null @@ -1,245 +0,0 @@ -package v4 - -import ( - "net/http" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/stretchr/testify/assert" -) - -func buildSigner(serviceName string, region string, signTime time.Time, expireTime time.Duration, body string) signer { - endpoint := "https://" + serviceName + "." + region + ".amazonaws.com" - reader := strings.NewReader(body) - req, _ := http.NewRequest("POST", endpoint, reader) - req.URL.Opaque = "//example.org/bucket/key-._~,!@#$%^&*()" - req.Header.Add("X-Amz-Target", "prefix.Operation") - req.Header.Add("Content-Type", "application/x-amz-json-1.0") - req.Header.Add("Content-Length", string(len(body))) - req.Header.Add("X-Amz-Meta-Other-Header", "some-value=!@#$%^&* (+)") - - return signer{ - Request: req, - Time: signTime, - ExpireTime: expireTime, - Query: req.URL.Query(), - Body: reader, - ServiceName: serviceName, - Region: region, - Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"), - } -} - -func removeWS(text string) string { - text = strings.Replace(text, " ", "", -1) - text = strings.Replace(text, "\n", "", -1) - text = strings.Replace(text, "\t", "", -1) - return text -} - -func assertEqual(t *testing.T, expected, given string) { - if removeWS(expected) != removeWS(given) { - t.Errorf("\nExpected: %s\nGiven: %s", expected, given) - } -} - -func TestPresignRequest(t *testing.T) { - signer := buildSigner("dynamodb", "us-east-1", time.Unix(0, 0), 300*time.Second, "{}") - signer.sign() - - expectedDate := "19700101T000000Z" - expectedHeaders := "host;x-amz-meta-other-header;x-amz-target" - expectedSig := "5eeedebf6f995145ce56daa02902d10485246d3defb34f97b973c1f40ab82d36" - expectedCred := "AKID/19700101/us-east-1/dynamodb/aws4_request" - - q := signer.Request.URL.Query() - assert.Equal(t, expectedSig, q.Get("X-Amz-Signature")) - assert.Equal(t, expectedCred, q.Get("X-Amz-Credential")) - assert.Equal(t, expectedHeaders, q.Get("X-Amz-SignedHeaders")) - assert.Equal(t, expectedDate, q.Get("X-Amz-Date")) -} - -func TestSignRequest(t *testing.T) { - signer := buildSigner("dynamodb", "us-east-1", time.Unix(0, 0), 0, "{}") - signer.sign() - - expectedDate := "19700101T000000Z" - expectedSig := "AWS4-HMAC-SHA256 Credential=AKID/19700101/us-east-1/dynamodb/aws4_request, SignedHeaders=host;x-amz-date;x-amz-meta-other-header;x-amz-security-token;x-amz-target, Signature=69ada33fec48180dab153576e4dd80c4e04124f80dda3eccfed8a67c2b91ed5e" - - q := signer.Request.Header - assert.Equal(t, expectedSig, q.Get("Authorization")) - assert.Equal(t, expectedDate, q.Get("X-Amz-Date")) -} - -func TestSignEmptyBody(t *testing.T) { - signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "") - signer.Body = nil - signer.sign() - hash := signer.Request.Header.Get("X-Amz-Content-Sha256") - assert.Equal(t, "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", hash) -} - -func TestSignBody(t *testing.T) { - signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "hello") - signer.sign() - hash := signer.Request.Header.Get("X-Amz-Content-Sha256") - assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash) -} - -func TestSignSeekedBody(t *testing.T) { - signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, " hello") - signer.Body.Read(make([]byte, 3)) // consume first 3 bytes so body is now "hello" - signer.sign() - hash := signer.Request.Header.Get("X-Amz-Content-Sha256") - assert.Equal(t, "2cf24dba5fb0a30e26e83b2ac5b9e29e1b161e5c1fa7425e73043362938b9824", hash) - - start, _ := signer.Body.Seek(0, 1) - assert.Equal(t, int64(3), start) -} - -func TestPresignEmptyBodyS3(t *testing.T) { - signer := buildSigner("s3", "us-east-1", time.Now(), 5*time.Minute, "hello") - signer.sign() - hash := signer.Request.Header.Get("X-Amz-Content-Sha256") - assert.Equal(t, "UNSIGNED-PAYLOAD", hash) -} - -func TestSignPrecomputedBodyChecksum(t *testing.T) { - signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "hello") - signer.Request.Header.Set("X-Amz-Content-Sha256", "PRECOMPUTED") - signer.sign() - hash := signer.Request.Header.Get("X-Amz-Content-Sha256") - assert.Equal(t, "PRECOMPUTED", hash) -} - -func TestAnonymousCredentials(t *testing.T) { - r := aws.NewRequest( - aws.NewService(&aws.Config{Credentials: credentials.AnonymousCredentials}), - &aws.Operation{ - Name: "BatchGetItem", - HTTPMethod: "POST", - HTTPPath: "/", - }, - nil, - nil, - ) - Sign(r) - - urlQ := r.HTTPRequest.URL.Query() - assert.Empty(t, urlQ.Get("X-Amz-Signature")) - assert.Empty(t, urlQ.Get("X-Amz-Credential")) - assert.Empty(t, urlQ.Get("X-Amz-SignedHeaders")) - assert.Empty(t, urlQ.Get("X-Amz-Date")) - - hQ := r.HTTPRequest.Header - assert.Empty(t, hQ.Get("Authorization")) - assert.Empty(t, hQ.Get("X-Amz-Date")) -} - -func TestIgnoreResignRequestWithValidCreds(t *testing.T) { - r := aws.NewRequest( - aws.NewService(&aws.Config{ - Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"), - Region: aws.String("us-west-2"), - }), - &aws.Operation{ - Name: "BatchGetItem", - HTTPMethod: "POST", - HTTPPath: "/", - }, - nil, - nil, - ) - - Sign(r) - sig := r.HTTPRequest.Header.Get("Authorization") - - Sign(r) - assert.Equal(t, sig, r.HTTPRequest.Header.Get("Authorization")) -} - -func TestIgnorePreResignRequestWithValidCreds(t *testing.T) { - r := aws.NewRequest( - aws.NewService(&aws.Config{ - Credentials: credentials.NewStaticCredentials("AKID", "SECRET", "SESSION"), - Region: aws.String("us-west-2"), - }), - &aws.Operation{ - Name: "BatchGetItem", - HTTPMethod: "POST", - HTTPPath: "/", - }, - nil, - nil, - ) - r.ExpireTime = time.Minute * 10 - - Sign(r) - sig := r.HTTPRequest.Header.Get("X-Amz-Signature") - - Sign(r) - assert.Equal(t, sig, r.HTTPRequest.Header.Get("X-Amz-Signature")) -} - -func TestResignRequestExpiredCreds(t *testing.T) { - creds := credentials.NewStaticCredentials("AKID", "SECRET", "SESSION") - r := aws.NewRequest( - aws.NewService(&aws.Config{Credentials: creds}), - &aws.Operation{ - Name: "BatchGetItem", - HTTPMethod: "POST", - HTTPPath: "/", - }, - nil, - nil, - ) - Sign(r) - querySig := r.HTTPRequest.Header.Get("Authorization") - - creds.Expire() - - Sign(r) - assert.NotEqual(t, querySig, r.HTTPRequest.Header.Get("Authorization")) -} - -func TestPreResignRequestExpiredCreds(t *testing.T) { - provider := &credentials.StaticProvider{credentials.Value{"AKID", "SECRET", "SESSION"}} - creds := credentials.NewCredentials(provider) - r := aws.NewRequest( - aws.NewService(&aws.Config{Credentials: creds}), - &aws.Operation{ - Name: "BatchGetItem", - HTTPMethod: "POST", - HTTPPath: "/", - }, - nil, - nil, - ) - r.ExpireTime = time.Minute * 10 - - Sign(r) - querySig := r.HTTPRequest.URL.Query().Get("X-Amz-Signature") - - creds.Expire() - r.Time = time.Now().Add(time.Hour * 48) - - Sign(r) - assert.NotEqual(t, querySig, r.HTTPRequest.URL.Query().Get("X-Amz-Signature")) -} - -func BenchmarkPresignRequest(b *testing.B) { - signer := buildSigner("dynamodb", "us-east-1", time.Now(), 300*time.Second, "{}") - for i := 0; i < b.N; i++ { - signer.sign() - } -} - -func BenchmarkSignRequest(b *testing.B) { - signer := buildSigner("dynamodb", "us-east-1", time.Now(), 0, "{}") - for i := 0; i < b.N; i++ { - signer.sign() - } -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/api.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/api.go deleted file mode 100644 index e2e9604..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/api.go +++ /dev/null @@ -1,6637 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. - -// Package s3 provides a client for Amazon Simple Storage Service. -package s3 - -import ( - "io" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -const opAbortMultipartUpload = "AbortMultipartUpload" - -// AbortMultipartUploadRequest generates a request for the AbortMultipartUpload operation. -func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *aws.Request, output *AbortMultipartUploadOutput) { - op := &aws.Operation{ - Name: opAbortMultipartUpload, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &AbortMultipartUploadInput{} - } - - req = c.newRequest(op, input, output) - output = &AbortMultipartUploadOutput{} - req.Data = output - return -} - -// Aborts a multipart upload. -// -// To verify that all parts have been removed, so you don't get charged for -// the part storage, you should call the List Parts operation and ensure the -// parts list is empty. -func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { - req, out := c.AbortMultipartUploadRequest(input) - err := req.Send() - return out, err -} - -const opCompleteMultipartUpload = "CompleteMultipartUpload" - -// CompleteMultipartUploadRequest generates a request for the CompleteMultipartUpload operation. -func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *aws.Request, output *CompleteMultipartUploadOutput) { - op := &aws.Operation{ - Name: opCompleteMultipartUpload, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &CompleteMultipartUploadInput{} - } - - req = c.newRequest(op, input, output) - output = &CompleteMultipartUploadOutput{} - req.Data = output - return -} - -// Completes a multipart upload by assembling previously uploaded parts. -func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { - req, out := c.CompleteMultipartUploadRequest(input) - err := req.Send() - return out, err -} - -const opCopyObject = "CopyObject" - -// CopyObjectRequest generates a request for the CopyObject operation. -func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *aws.Request, output *CopyObjectOutput) { - op := &aws.Operation{ - Name: opCopyObject, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &CopyObjectInput{} - } - - req = c.newRequest(op, input, output) - output = &CopyObjectOutput{} - req.Data = output - return -} - -// Creates a copy of an object that is already stored in Amazon S3. -func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { - req, out := c.CopyObjectRequest(input) - err := req.Send() - return out, err -} - -const opCreateBucket = "CreateBucket" - -// CreateBucketRequest generates a request for the CreateBucket operation. -func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *aws.Request, output *CreateBucketOutput) { - op := &aws.Operation{ - Name: opCreateBucket, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}", - } - - if input == nil { - input = &CreateBucketInput{} - } - - req = c.newRequest(op, input, output) - output = &CreateBucketOutput{} - req.Data = output - return -} - -// Creates a new bucket. -func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { - req, out := c.CreateBucketRequest(input) - err := req.Send() - return out, err -} - -const opCreateMultipartUpload = "CreateMultipartUpload" - -// CreateMultipartUploadRequest generates a request for the CreateMultipartUpload operation. -func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *aws.Request, output *CreateMultipartUploadOutput) { - op := &aws.Operation{ - Name: opCreateMultipartUpload, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?uploads", - } - - if input == nil { - input = &CreateMultipartUploadInput{} - } - - req = c.newRequest(op, input, output) - output = &CreateMultipartUploadOutput{} - req.Data = output - return -} - -// Initiates a multipart upload and returns an upload ID. -// -// Note: After you initiate multipart upload and upload one or more parts, -// you must either complete or abort multipart upload in order to stop getting -// charged for storage of the uploaded parts. Only after you either complete -// or abort multipart upload, Amazon S3 frees up the parts storage and stops -// charging you for the parts storage. -func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { - req, out := c.CreateMultipartUploadRequest(input) - err := req.Send() - return out, err -} - -const opDeleteBucket = "DeleteBucket" - -// DeleteBucketRequest generates a request for the DeleteBucket operation. -func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *aws.Request, output *DeleteBucketOutput) { - op := &aws.Operation{ - Name: opDeleteBucket, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}", - } - - if input == nil { - input = &DeleteBucketInput{} - } - - req = c.newRequest(op, input, output) - output = &DeleteBucketOutput{} - req.Data = output - return -} - -// Deletes the bucket. All objects (including all object versions and Delete -// Markers) in the bucket must be deleted before the bucket itself can be deleted. -func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { - req, out := c.DeleteBucketRequest(input) - err := req.Send() - return out, err -} - -const opDeleteBucketCORS = "DeleteBucketCors" - -// DeleteBucketCORSRequest generates a request for the DeleteBucketCORS operation. -func (c *S3) DeleteBucketCORSRequest(input *DeleteBucketCORSInput) (req *aws.Request, output *DeleteBucketCORSOutput) { - op := &aws.Operation{ - Name: opDeleteBucketCORS, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?cors", - } - - if input == nil { - input = &DeleteBucketCORSInput{} - } - - req = c.newRequest(op, input, output) - output = &DeleteBucketCORSOutput{} - req.Data = output - return -} - -// Deletes the cors configuration information set for the bucket. -func (c *S3) DeleteBucketCORS(input *DeleteBucketCORSInput) (*DeleteBucketCORSOutput, error) { - req, out := c.DeleteBucketCORSRequest(input) - err := req.Send() - return out, err -} - -const opDeleteBucketLifecycle = "DeleteBucketLifecycle" - -// DeleteBucketLifecycleRequest generates a request for the DeleteBucketLifecycle operation. -func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *aws.Request, output *DeleteBucketLifecycleOutput) { - op := &aws.Operation{ - Name: opDeleteBucketLifecycle, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &DeleteBucketLifecycleInput{} - } - - req = c.newRequest(op, input, output) - output = &DeleteBucketLifecycleOutput{} - req.Data = output - return -} - -// Deletes the lifecycle configuration from the bucket. -func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { - req, out := c.DeleteBucketLifecycleRequest(input) - err := req.Send() - return out, err -} - -const opDeleteBucketPolicy = "DeleteBucketPolicy" - -// DeleteBucketPolicyRequest generates a request for the DeleteBucketPolicy operation. -func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *aws.Request, output *DeleteBucketPolicyOutput) { - op := &aws.Operation{ - Name: opDeleteBucketPolicy, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?policy", - } - - if input == nil { - input = &DeleteBucketPolicyInput{} - } - - req = c.newRequest(op, input, output) - output = &DeleteBucketPolicyOutput{} - req.Data = output - return -} - -// Deletes the policy from the bucket. -func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { - req, out := c.DeleteBucketPolicyRequest(input) - err := req.Send() - return out, err -} - -const opDeleteBucketReplication = "DeleteBucketReplication" - -// DeleteBucketReplicationRequest generates a request for the DeleteBucketReplication operation. -func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *aws.Request, output *DeleteBucketReplicationOutput) { - op := &aws.Operation{ - Name: opDeleteBucketReplication, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?replication", - } - - if input == nil { - input = &DeleteBucketReplicationInput{} - } - - req = c.newRequest(op, input, output) - output = &DeleteBucketReplicationOutput{} - req.Data = output - return -} - -func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { - req, out := c.DeleteBucketReplicationRequest(input) - err := req.Send() - return out, err -} - -const opDeleteBucketTagging = "DeleteBucketTagging" - -// DeleteBucketTaggingRequest generates a request for the DeleteBucketTagging operation. -func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *aws.Request, output *DeleteBucketTaggingOutput) { - op := &aws.Operation{ - Name: opDeleteBucketTagging, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?tagging", - } - - if input == nil { - input = &DeleteBucketTaggingInput{} - } - - req = c.newRequest(op, input, output) - output = &DeleteBucketTaggingOutput{} - req.Data = output - return -} - -// Deletes the tags from the bucket. -func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { - req, out := c.DeleteBucketTaggingRequest(input) - err := req.Send() - return out, err -} - -const opDeleteBucketWebsite = "DeleteBucketWebsite" - -// DeleteBucketWebsiteRequest generates a request for the DeleteBucketWebsite operation. -func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *aws.Request, output *DeleteBucketWebsiteOutput) { - op := &aws.Operation{ - Name: opDeleteBucketWebsite, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}?website", - } - - if input == nil { - input = &DeleteBucketWebsiteInput{} - } - - req = c.newRequest(op, input, output) - output = &DeleteBucketWebsiteOutput{} - req.Data = output - return -} - -// This operation removes the website configuration from the bucket. -func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { - req, out := c.DeleteBucketWebsiteRequest(input) - err := req.Send() - return out, err -} - -const opDeleteObject = "DeleteObject" - -// DeleteObjectRequest generates a request for the DeleteObject operation. -func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *aws.Request, output *DeleteObjectOutput) { - op := &aws.Operation{ - Name: opDeleteObject, - HTTPMethod: "DELETE", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &DeleteObjectInput{} - } - - req = c.newRequest(op, input, output) - output = &DeleteObjectOutput{} - req.Data = output - return -} - -// Removes the null version (if there is one) of an object and inserts a delete -// marker, which becomes the latest version of the object. If there isn't a -// null version, Amazon S3 does not remove any objects. -func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { - req, out := c.DeleteObjectRequest(input) - err := req.Send() - return out, err -} - -const opDeleteObjects = "DeleteObjects" - -// DeleteObjectsRequest generates a request for the DeleteObjects operation. -func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *aws.Request, output *DeleteObjectsOutput) { - op := &aws.Operation{ - Name: opDeleteObjects, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}?delete", - } - - if input == nil { - input = &DeleteObjectsInput{} - } - - req = c.newRequest(op, input, output) - output = &DeleteObjectsOutput{} - req.Data = output - return -} - -// This operation enables you to delete multiple objects from a bucket using -// a single HTTP request. You may specify up to 1000 keys. -func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { - req, out := c.DeleteObjectsRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketACL = "GetBucketAcl" - -// GetBucketACLRequest generates a request for the GetBucketACL operation. -func (c *S3) GetBucketACLRequest(input *GetBucketACLInput) (req *aws.Request, output *GetBucketACLOutput) { - op := &aws.Operation{ - Name: opGetBucketACL, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?acl", - } - - if input == nil { - input = &GetBucketACLInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketACLOutput{} - req.Data = output - return -} - -// Gets the access control policy for the bucket. -func (c *S3) GetBucketACL(input *GetBucketACLInput) (*GetBucketACLOutput, error) { - req, out := c.GetBucketACLRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketCORS = "GetBucketCors" - -// GetBucketCORSRequest generates a request for the GetBucketCORS operation. -func (c *S3) GetBucketCORSRequest(input *GetBucketCORSInput) (req *aws.Request, output *GetBucketCORSOutput) { - op := &aws.Operation{ - Name: opGetBucketCORS, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?cors", - } - - if input == nil { - input = &GetBucketCORSInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketCORSOutput{} - req.Data = output - return -} - -// Returns the cors configuration for the bucket. -func (c *S3) GetBucketCORS(input *GetBucketCORSInput) (*GetBucketCORSOutput, error) { - req, out := c.GetBucketCORSRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketLifecycle = "GetBucketLifecycle" - -// GetBucketLifecycleRequest generates a request for the GetBucketLifecycle operation. -func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *aws.Request, output *GetBucketLifecycleOutput) { - op := &aws.Operation{ - Name: opGetBucketLifecycle, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &GetBucketLifecycleInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketLifecycleOutput{} - req.Data = output - return -} - -// Returns the lifecycle configuration information set on the bucket. -func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { - req, out := c.GetBucketLifecycleRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketLocation = "GetBucketLocation" - -// GetBucketLocationRequest generates a request for the GetBucketLocation operation. -func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *aws.Request, output *GetBucketLocationOutput) { - op := &aws.Operation{ - Name: opGetBucketLocation, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?location", - } - - if input == nil { - input = &GetBucketLocationInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketLocationOutput{} - req.Data = output - return -} - -// Returns the region the bucket resides in. -func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { - req, out := c.GetBucketLocationRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketLogging = "GetBucketLogging" - -// GetBucketLoggingRequest generates a request for the GetBucketLogging operation. -func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *aws.Request, output *GetBucketLoggingOutput) { - op := &aws.Operation{ - Name: opGetBucketLogging, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?logging", - } - - if input == nil { - input = &GetBucketLoggingInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketLoggingOutput{} - req.Data = output - return -} - -// Returns the logging status of a bucket and the permissions users have to -// view and modify that status. To use GET, you must be the bucket owner. -func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { - req, out := c.GetBucketLoggingRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketNotification = "GetBucketNotification" - -// GetBucketNotificationRequest generates a request for the GetBucketNotification operation. -func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *aws.Request, output *NotificationConfigurationDeprecated) { - op := &aws.Operation{ - Name: opGetBucketNotification, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?notification", - } - - if input == nil { - input = &GetBucketNotificationConfigurationRequest{} - } - - req = c.newRequest(op, input, output) - output = &NotificationConfigurationDeprecated{} - req.Data = output - return -} - -// Deprecated, see the GetBucketNotificationConfiguration operation. -func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { - req, out := c.GetBucketNotificationRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" - -// GetBucketNotificationConfigurationRequest generates a request for the GetBucketNotificationConfiguration operation. -func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *aws.Request, output *NotificationConfiguration) { - op := &aws.Operation{ - Name: opGetBucketNotificationConfiguration, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?notification", - } - - if input == nil { - input = &GetBucketNotificationConfigurationRequest{} - } - - req = c.newRequest(op, input, output) - output = &NotificationConfiguration{} - req.Data = output - return -} - -// Returns the notification configuration of a bucket. -func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { - req, out := c.GetBucketNotificationConfigurationRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketPolicy = "GetBucketPolicy" - -// GetBucketPolicyRequest generates a request for the GetBucketPolicy operation. -func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *aws.Request, output *GetBucketPolicyOutput) { - op := &aws.Operation{ - Name: opGetBucketPolicy, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?policy", - } - - if input == nil { - input = &GetBucketPolicyInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketPolicyOutput{} - req.Data = output - return -} - -// Returns the policy of a specified bucket. -func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { - req, out := c.GetBucketPolicyRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketReplication = "GetBucketReplication" - -// GetBucketReplicationRequest generates a request for the GetBucketReplication operation. -func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *aws.Request, output *GetBucketReplicationOutput) { - op := &aws.Operation{ - Name: opGetBucketReplication, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?replication", - } - - if input == nil { - input = &GetBucketReplicationInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketReplicationOutput{} - req.Data = output - return -} - -func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { - req, out := c.GetBucketReplicationRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketRequestPayment = "GetBucketRequestPayment" - -// GetBucketRequestPaymentRequest generates a request for the GetBucketRequestPayment operation. -func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *aws.Request, output *GetBucketRequestPaymentOutput) { - op := &aws.Operation{ - Name: opGetBucketRequestPayment, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?requestPayment", - } - - if input == nil { - input = &GetBucketRequestPaymentInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketRequestPaymentOutput{} - req.Data = output - return -} - -// Returns the request payment configuration of a bucket. -func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { - req, out := c.GetBucketRequestPaymentRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketTagging = "GetBucketTagging" - -// GetBucketTaggingRequest generates a request for the GetBucketTagging operation. -func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *aws.Request, output *GetBucketTaggingOutput) { - op := &aws.Operation{ - Name: opGetBucketTagging, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?tagging", - } - - if input == nil { - input = &GetBucketTaggingInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketTaggingOutput{} - req.Data = output - return -} - -// Returns the tag set associated with the bucket. -func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { - req, out := c.GetBucketTaggingRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketVersioning = "GetBucketVersioning" - -// GetBucketVersioningRequest generates a request for the GetBucketVersioning operation. -func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *aws.Request, output *GetBucketVersioningOutput) { - op := &aws.Operation{ - Name: opGetBucketVersioning, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?versioning", - } - - if input == nil { - input = &GetBucketVersioningInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketVersioningOutput{} - req.Data = output - return -} - -// Returns the versioning state of a bucket. -func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { - req, out := c.GetBucketVersioningRequest(input) - err := req.Send() - return out, err -} - -const opGetBucketWebsite = "GetBucketWebsite" - -// GetBucketWebsiteRequest generates a request for the GetBucketWebsite operation. -func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *aws.Request, output *GetBucketWebsiteOutput) { - op := &aws.Operation{ - Name: opGetBucketWebsite, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?website", - } - - if input == nil { - input = &GetBucketWebsiteInput{} - } - - req = c.newRequest(op, input, output) - output = &GetBucketWebsiteOutput{} - req.Data = output - return -} - -// Returns the website configuration for a bucket. -func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { - req, out := c.GetBucketWebsiteRequest(input) - err := req.Send() - return out, err -} - -const opGetObject = "GetObject" - -// GetObjectRequest generates a request for the GetObject operation. -func (c *S3) GetObjectRequest(input *GetObjectInput) (req *aws.Request, output *GetObjectOutput) { - op := &aws.Operation{ - Name: opGetObject, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &GetObjectInput{} - } - - req = c.newRequest(op, input, output) - output = &GetObjectOutput{} - req.Data = output - return -} - -// Retrieves objects from Amazon S3. -func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { - req, out := c.GetObjectRequest(input) - err := req.Send() - return out, err -} - -const opGetObjectACL = "GetObjectAcl" - -// GetObjectACLRequest generates a request for the GetObjectACL operation. -func (c *S3) GetObjectACLRequest(input *GetObjectACLInput) (req *aws.Request, output *GetObjectACLOutput) { - op := &aws.Operation{ - Name: opGetObjectACL, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?acl", - } - - if input == nil { - input = &GetObjectACLInput{} - } - - req = c.newRequest(op, input, output) - output = &GetObjectACLOutput{} - req.Data = output - return -} - -// Returns the access control list (ACL) of an object. -func (c *S3) GetObjectACL(input *GetObjectACLInput) (*GetObjectACLOutput, error) { - req, out := c.GetObjectACLRequest(input) - err := req.Send() - return out, err -} - -const opGetObjectTorrent = "GetObjectTorrent" - -// GetObjectTorrentRequest generates a request for the GetObjectTorrent operation. -func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *aws.Request, output *GetObjectTorrentOutput) { - op := &aws.Operation{ - Name: opGetObjectTorrent, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}?torrent", - } - - if input == nil { - input = &GetObjectTorrentInput{} - } - - req = c.newRequest(op, input, output) - output = &GetObjectTorrentOutput{} - req.Data = output - return -} - -// Return torrent files from a bucket. -func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { - req, out := c.GetObjectTorrentRequest(input) - err := req.Send() - return out, err -} - -const opHeadBucket = "HeadBucket" - -// HeadBucketRequest generates a request for the HeadBucket operation. -func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *aws.Request, output *HeadBucketOutput) { - op := &aws.Operation{ - Name: opHeadBucket, - HTTPMethod: "HEAD", - HTTPPath: "/{Bucket}", - } - - if input == nil { - input = &HeadBucketInput{} - } - - req = c.newRequest(op, input, output) - output = &HeadBucketOutput{} - req.Data = output - return -} - -// This operation is useful to determine if a bucket exists and you have permission -// to access it. -func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { - req, out := c.HeadBucketRequest(input) - err := req.Send() - return out, err -} - -const opHeadObject = "HeadObject" - -// HeadObjectRequest generates a request for the HeadObject operation. -func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *aws.Request, output *HeadObjectOutput) { - op := &aws.Operation{ - Name: opHeadObject, - HTTPMethod: "HEAD", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &HeadObjectInput{} - } - - req = c.newRequest(op, input, output) - output = &HeadObjectOutput{} - req.Data = output - return -} - -// The HEAD operation retrieves metadata from an object without returning the -// object itself. This operation is useful if you're only interested in an object's -// metadata. To use HEAD, you must have READ access to the object. -func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { - req, out := c.HeadObjectRequest(input) - err := req.Send() - return out, err -} - -const opListBuckets = "ListBuckets" - -// ListBucketsRequest generates a request for the ListBuckets operation. -func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *aws.Request, output *ListBucketsOutput) { - op := &aws.Operation{ - Name: opListBuckets, - HTTPMethod: "GET", - HTTPPath: "/", - } - - if input == nil { - input = &ListBucketsInput{} - } - - req = c.newRequest(op, input, output) - output = &ListBucketsOutput{} - req.Data = output - return -} - -// Returns a list of all buckets owned by the authenticated sender of the request. -func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { - req, out := c.ListBucketsRequest(input) - err := req.Send() - return out, err -} - -const opListMultipartUploads = "ListMultipartUploads" - -// ListMultipartUploadsRequest generates a request for the ListMultipartUploads operation. -func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *aws.Request, output *ListMultipartUploadsOutput) { - op := &aws.Operation{ - Name: opListMultipartUploads, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?uploads", - Paginator: &aws.Paginator{ - InputTokens: []string{"KeyMarker", "UploadIdMarker"}, - OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, - LimitToken: "MaxUploads", - TruncationToken: "IsTruncated", - }, - } - - if input == nil { - input = &ListMultipartUploadsInput{} - } - - req = c.newRequest(op, input, output) - output = &ListMultipartUploadsOutput{} - req.Data = output - return -} - -// This operation lists in-progress multipart uploads. -func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { - req, out := c.ListMultipartUploadsRequest(input) - err := req.Send() - return out, err -} - -func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(p *ListMultipartUploadsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListMultipartUploadsRequest(input) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListMultipartUploadsOutput), lastPage) - }) -} - -const opListObjectVersions = "ListObjectVersions" - -// ListObjectVersionsRequest generates a request for the ListObjectVersions operation. -func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *aws.Request, output *ListObjectVersionsOutput) { - op := &aws.Operation{ - Name: opListObjectVersions, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}?versions", - Paginator: &aws.Paginator{ - InputTokens: []string{"KeyMarker", "VersionIdMarker"}, - OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, - LimitToken: "MaxKeys", - TruncationToken: "IsTruncated", - }, - } - - if input == nil { - input = &ListObjectVersionsInput{} - } - - req = c.newRequest(op, input, output) - output = &ListObjectVersionsOutput{} - req.Data = output - return -} - -// Returns metadata about all of the versions of objects in a bucket. -func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { - req, out := c.ListObjectVersionsRequest(input) - err := req.Send() - return out, err -} - -func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(p *ListObjectVersionsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListObjectVersionsRequest(input) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListObjectVersionsOutput), lastPage) - }) -} - -const opListObjects = "ListObjects" - -// ListObjectsRequest generates a request for the ListObjects operation. -func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *aws.Request, output *ListObjectsOutput) { - op := &aws.Operation{ - Name: opListObjects, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}", - Paginator: &aws.Paginator{ - InputTokens: []string{"Marker"}, - OutputTokens: []string{"NextMarker || Contents[-1].Key"}, - LimitToken: "MaxKeys", - TruncationToken: "IsTruncated", - }, - } - - if input == nil { - input = &ListObjectsInput{} - } - - req = c.newRequest(op, input, output) - output = &ListObjectsOutput{} - req.Data = output - return -} - -// Returns some or all (up to 1000) of the objects in a bucket. You can use -// the request parameters as selection criteria to return a subset of the objects -// in a bucket. -func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { - req, out := c.ListObjectsRequest(input) - err := req.Send() - return out, err -} - -func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(p *ListObjectsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListObjectsRequest(input) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListObjectsOutput), lastPage) - }) -} - -const opListParts = "ListParts" - -// ListPartsRequest generates a request for the ListParts operation. -func (c *S3) ListPartsRequest(input *ListPartsInput) (req *aws.Request, output *ListPartsOutput) { - op := &aws.Operation{ - Name: opListParts, - HTTPMethod: "GET", - HTTPPath: "/{Bucket}/{Key+}", - Paginator: &aws.Paginator{ - InputTokens: []string{"PartNumberMarker"}, - OutputTokens: []string{"NextPartNumberMarker"}, - LimitToken: "MaxParts", - TruncationToken: "IsTruncated", - }, - } - - if input == nil { - input = &ListPartsInput{} - } - - req = c.newRequest(op, input, output) - output = &ListPartsOutput{} - req.Data = output - return -} - -// Lists the parts that have been uploaded for a specific multipart upload. -func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { - req, out := c.ListPartsRequest(input) - err := req.Send() - return out, err -} - -func (c *S3) ListPartsPages(input *ListPartsInput, fn func(p *ListPartsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.ListPartsRequest(input) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*ListPartsOutput), lastPage) - }) -} - -const opPutBucketACL = "PutBucketAcl" - -// PutBucketACLRequest generates a request for the PutBucketACL operation. -func (c *S3) PutBucketACLRequest(input *PutBucketACLInput) (req *aws.Request, output *PutBucketACLOutput) { - op := &aws.Operation{ - Name: opPutBucketACL, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?acl", - } - - if input == nil { - input = &PutBucketACLInput{} - } - - req = c.newRequest(op, input, output) - output = &PutBucketACLOutput{} - req.Data = output - return -} - -// Sets the permissions on a bucket using access control lists (ACL). -func (c *S3) PutBucketACL(input *PutBucketACLInput) (*PutBucketACLOutput, error) { - req, out := c.PutBucketACLRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketCORS = "PutBucketCors" - -// PutBucketCORSRequest generates a request for the PutBucketCORS operation. -func (c *S3) PutBucketCORSRequest(input *PutBucketCORSInput) (req *aws.Request, output *PutBucketCORSOutput) { - op := &aws.Operation{ - Name: opPutBucketCORS, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?cors", - } - - if input == nil { - input = &PutBucketCORSInput{} - } - - req = c.newRequest(op, input, output) - output = &PutBucketCORSOutput{} - req.Data = output - return -} - -// Sets the cors configuration for a bucket. -func (c *S3) PutBucketCORS(input *PutBucketCORSInput) (*PutBucketCORSOutput, error) { - req, out := c.PutBucketCORSRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketLifecycle = "PutBucketLifecycle" - -// PutBucketLifecycleRequest generates a request for the PutBucketLifecycle operation. -func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *aws.Request, output *PutBucketLifecycleOutput) { - op := &aws.Operation{ - Name: opPutBucketLifecycle, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?lifecycle", - } - - if input == nil { - input = &PutBucketLifecycleInput{} - } - - req = c.newRequest(op, input, output) - output = &PutBucketLifecycleOutput{} - req.Data = output - return -} - -// Sets lifecycle configuration for your bucket. If a lifecycle configuration -// exists, it replaces it. -func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { - req, out := c.PutBucketLifecycleRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketLogging = "PutBucketLogging" - -// PutBucketLoggingRequest generates a request for the PutBucketLogging operation. -func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *aws.Request, output *PutBucketLoggingOutput) { - op := &aws.Operation{ - Name: opPutBucketLogging, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?logging", - } - - if input == nil { - input = &PutBucketLoggingInput{} - } - - req = c.newRequest(op, input, output) - output = &PutBucketLoggingOutput{} - req.Data = output - return -} - -// Set the logging parameters for a bucket and to specify permissions for who -// can view and modify the logging parameters. To set the logging status of -// a bucket, you must be the bucket owner. -func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { - req, out := c.PutBucketLoggingRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketNotification = "PutBucketNotification" - -// PutBucketNotificationRequest generates a request for the PutBucketNotification operation. -func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *aws.Request, output *PutBucketNotificationOutput) { - op := &aws.Operation{ - Name: opPutBucketNotification, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?notification", - } - - if input == nil { - input = &PutBucketNotificationInput{} - } - - req = c.newRequest(op, input, output) - output = &PutBucketNotificationOutput{} - req.Data = output - return -} - -// Deprecated, see the PutBucketNotificationConfiguraiton operation. -func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { - req, out := c.PutBucketNotificationRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" - -// PutBucketNotificationConfigurationRequest generates a request for the PutBucketNotificationConfiguration operation. -func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *aws.Request, output *PutBucketNotificationConfigurationOutput) { - op := &aws.Operation{ - Name: opPutBucketNotificationConfiguration, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?notification", - } - - if input == nil { - input = &PutBucketNotificationConfigurationInput{} - } - - req = c.newRequest(op, input, output) - output = &PutBucketNotificationConfigurationOutput{} - req.Data = output - return -} - -// Enables notifications of specified events for a bucket. -func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { - req, out := c.PutBucketNotificationConfigurationRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketPolicy = "PutBucketPolicy" - -// PutBucketPolicyRequest generates a request for the PutBucketPolicy operation. -func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *aws.Request, output *PutBucketPolicyOutput) { - op := &aws.Operation{ - Name: opPutBucketPolicy, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?policy", - } - - if input == nil { - input = &PutBucketPolicyInput{} - } - - req = c.newRequest(op, input, output) - output = &PutBucketPolicyOutput{} - req.Data = output - return -} - -// Replaces a policy on a bucket. If the bucket already has a policy, the one -// in this request completely replaces it. -func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { - req, out := c.PutBucketPolicyRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketReplication = "PutBucketReplication" - -// PutBucketReplicationRequest generates a request for the PutBucketReplication operation. -func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *aws.Request, output *PutBucketReplicationOutput) { - op := &aws.Operation{ - Name: opPutBucketReplication, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?replication", - } - - if input == nil { - input = &PutBucketReplicationInput{} - } - - req = c.newRequest(op, input, output) - output = &PutBucketReplicationOutput{} - req.Data = output - return -} - -// Creates a new replication configuration (or replaces an existing one, if -// present). -func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { - req, out := c.PutBucketReplicationRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketRequestPayment = "PutBucketRequestPayment" - -// PutBucketRequestPaymentRequest generates a request for the PutBucketRequestPayment operation. -func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *aws.Request, output *PutBucketRequestPaymentOutput) { - op := &aws.Operation{ - Name: opPutBucketRequestPayment, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?requestPayment", - } - - if input == nil { - input = &PutBucketRequestPaymentInput{} - } - - req = c.newRequest(op, input, output) - output = &PutBucketRequestPaymentOutput{} - req.Data = output - return -} - -// Sets the request payment configuration for a bucket. By default, the bucket -// owner pays for downloads from the bucket. This configuration parameter enables -// the bucket owner (only) to specify that the person requesting the download -// will be charged for the download. Documentation on requester pays buckets -// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html -func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { - req, out := c.PutBucketRequestPaymentRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketTagging = "PutBucketTagging" - -// PutBucketTaggingRequest generates a request for the PutBucketTagging operation. -func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *aws.Request, output *PutBucketTaggingOutput) { - op := &aws.Operation{ - Name: opPutBucketTagging, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?tagging", - } - - if input == nil { - input = &PutBucketTaggingInput{} - } - - req = c.newRequest(op, input, output) - output = &PutBucketTaggingOutput{} - req.Data = output - return -} - -// Sets the tags for a bucket. -func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { - req, out := c.PutBucketTaggingRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketVersioning = "PutBucketVersioning" - -// PutBucketVersioningRequest generates a request for the PutBucketVersioning operation. -func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *aws.Request, output *PutBucketVersioningOutput) { - op := &aws.Operation{ - Name: opPutBucketVersioning, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?versioning", - } - - if input == nil { - input = &PutBucketVersioningInput{} - } - - req = c.newRequest(op, input, output) - output = &PutBucketVersioningOutput{} - req.Data = output - return -} - -// Sets the versioning state of an existing bucket. To set the versioning state, -// you must be the bucket owner. -func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { - req, out := c.PutBucketVersioningRequest(input) - err := req.Send() - return out, err -} - -const opPutBucketWebsite = "PutBucketWebsite" - -// PutBucketWebsiteRequest generates a request for the PutBucketWebsite operation. -func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *aws.Request, output *PutBucketWebsiteOutput) { - op := &aws.Operation{ - Name: opPutBucketWebsite, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}?website", - } - - if input == nil { - input = &PutBucketWebsiteInput{} - } - - req = c.newRequest(op, input, output) - output = &PutBucketWebsiteOutput{} - req.Data = output - return -} - -// Set the website configuration for a bucket. -func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { - req, out := c.PutBucketWebsiteRequest(input) - err := req.Send() - return out, err -} - -const opPutObject = "PutObject" - -// PutObjectRequest generates a request for the PutObject operation. -func (c *S3) PutObjectRequest(input *PutObjectInput) (req *aws.Request, output *PutObjectOutput) { - op := &aws.Operation{ - Name: opPutObject, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &PutObjectInput{} - } - - req = c.newRequest(op, input, output) - output = &PutObjectOutput{} - req.Data = output - return -} - -// Adds an object to a bucket. -func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { - req, out := c.PutObjectRequest(input) - err := req.Send() - return out, err -} - -const opPutObjectACL = "PutObjectAcl" - -// PutObjectACLRequest generates a request for the PutObjectACL operation. -func (c *S3) PutObjectACLRequest(input *PutObjectACLInput) (req *aws.Request, output *PutObjectACLOutput) { - op := &aws.Operation{ - Name: opPutObjectACL, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}?acl", - } - - if input == nil { - input = &PutObjectACLInput{} - } - - req = c.newRequest(op, input, output) - output = &PutObjectACLOutput{} - req.Data = output - return -} - -// uses the acl subresource to set the access control list (ACL) permissions -// for an object that already exists in a bucket -func (c *S3) PutObjectACL(input *PutObjectACLInput) (*PutObjectACLOutput, error) { - req, out := c.PutObjectACLRequest(input) - err := req.Send() - return out, err -} - -const opRestoreObject = "RestoreObject" - -// RestoreObjectRequest generates a request for the RestoreObject operation. -func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *aws.Request, output *RestoreObjectOutput) { - op := &aws.Operation{ - Name: opRestoreObject, - HTTPMethod: "POST", - HTTPPath: "/{Bucket}/{Key+}?restore", - } - - if input == nil { - input = &RestoreObjectInput{} - } - - req = c.newRequest(op, input, output) - output = &RestoreObjectOutput{} - req.Data = output - return -} - -// Restores an archived copy of an object back into Amazon S3 -func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { - req, out := c.RestoreObjectRequest(input) - err := req.Send() - return out, err -} - -const opUploadPart = "UploadPart" - -// UploadPartRequest generates a request for the UploadPart operation. -func (c *S3) UploadPartRequest(input *UploadPartInput) (req *aws.Request, output *UploadPartOutput) { - op := &aws.Operation{ - Name: opUploadPart, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &UploadPartInput{} - } - - req = c.newRequest(op, input, output) - output = &UploadPartOutput{} - req.Data = output - return -} - -// Uploads a part in a multipart upload. -// -// Note: After you initiate multipart upload and upload one or more parts, -// you must either complete or abort multipart upload in order to stop getting -// charged for storage of the uploaded parts. Only after you either complete -// or abort multipart upload, Amazon S3 frees up the parts storage and stops -// charging you for the parts storage. -func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { - req, out := c.UploadPartRequest(input) - err := req.Send() - return out, err -} - -const opUploadPartCopy = "UploadPartCopy" - -// UploadPartCopyRequest generates a request for the UploadPartCopy operation. -func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *aws.Request, output *UploadPartCopyOutput) { - op := &aws.Operation{ - Name: opUploadPartCopy, - HTTPMethod: "PUT", - HTTPPath: "/{Bucket}/{Key+}", - } - - if input == nil { - input = &UploadPartCopyInput{} - } - - req = c.newRequest(op, input, output) - output = &UploadPartCopyOutput{} - req.Data = output - return -} - -// Uploads a part by copying data from an existing object as data source. -func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { - req, out := c.UploadPartCopyRequest(input) - err := req.Send() - return out, err -} - -type AbortMultipartUploadInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - UploadID *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` - - metadataAbortMultipartUploadInput `json:"-" xml:"-"` -} - -type metadataAbortMultipartUploadInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s AbortMultipartUploadInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AbortMultipartUploadInput) GoString() string { - return s.String() -} - -type AbortMultipartUploadOutput struct { - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - metadataAbortMultipartUploadOutput `json:"-" xml:"-"` -} - -type metadataAbortMultipartUploadOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s AbortMultipartUploadOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AbortMultipartUploadOutput) GoString() string { - return s.String() -} - -type AccessControlPolicy struct { - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` - - Owner *Owner `type:"structure"` - - metadataAccessControlPolicy `json:"-" xml:"-"` -} - -type metadataAccessControlPolicy struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s AccessControlPolicy) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s AccessControlPolicy) GoString() string { - return s.String() -} - -type Bucket struct { - // Date the bucket was created. - CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // The name of the bucket. - Name *string `type:"string"` - - metadataBucket `json:"-" xml:"-"` -} - -type metadataBucket struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s Bucket) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Bucket) GoString() string { - return s.String() -} - -type BucketLoggingStatus struct { - LoggingEnabled *LoggingEnabled `type:"structure"` - - metadataBucketLoggingStatus `json:"-" xml:"-"` -} - -type metadataBucketLoggingStatus struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s BucketLoggingStatus) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s BucketLoggingStatus) GoString() string { - return s.String() -} - -type CORSConfiguration struct { - CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` - - metadataCORSConfiguration `json:"-" xml:"-"` -} - -type metadataCORSConfiguration struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s CORSConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CORSConfiguration) GoString() string { - return s.String() -} - -type CORSRule struct { - // Specifies which headers are allowed in a pre-flight OPTIONS request. - AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` - - // Identifies HTTP methods that the domain/origin specified in the rule is allowed - // to execute. - AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true"` - - // One or more origins you want customers to be able to access the bucket from. - AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true"` - - // One or more headers in the response that you want customers to be able to - // access from their applications (for example, from a JavaScript XMLHttpRequest - // object). - ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` - - // The time in seconds that your browser is to cache the preflight response - // for the specified resource. - MaxAgeSeconds *int64 `type:"integer"` - - metadataCORSRule `json:"-" xml:"-"` -} - -type metadataCORSRule struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s CORSRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CORSRule) GoString() string { - return s.String() -} - -type CloudFunctionConfiguration struct { - CloudFunction *string `type:"string"` - - // Bucket event for which to send notifications. - Event *string `type:"string" enum:"Event"` - - Events []*string `locationName:"Event" type:"list" flattened:"true"` - - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - ID *string `locationName:"Id" type:"string"` - - InvocationRole *string `type:"string"` - - metadataCloudFunctionConfiguration `json:"-" xml:"-"` -} - -type metadataCloudFunctionConfiguration struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s CloudFunctionConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CloudFunctionConfiguration) GoString() string { - return s.String() -} - -type CommonPrefix struct { - Prefix *string `type:"string"` - - metadataCommonPrefix `json:"-" xml:"-"` -} - -type metadataCommonPrefix struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s CommonPrefix) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CommonPrefix) GoString() string { - return s.String() -} - -type CompleteMultipartUploadInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` - - MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - UploadID *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` - - metadataCompleteMultipartUploadInput `json:"-" xml:"-"` -} - -type metadataCompleteMultipartUploadInput struct { - SDKShapeTraits bool `type:"structure" payload:"MultipartUpload"` -} - -// String returns the string representation -func (s CompleteMultipartUploadInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CompleteMultipartUploadInput) GoString() string { - return s.String() -} - -type CompleteMultipartUploadOutput struct { - Bucket *string `type:"string"` - - // Entity tag of the object. - ETag *string `type:"string"` - - // If the object expiration is configured, this will contain the expiration - // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - Key *string `type:"string"` - - Location *string `type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Version of the object. - VersionID *string `location:"header" locationName:"x-amz-version-id" type:"string"` - - metadataCompleteMultipartUploadOutput `json:"-" xml:"-"` -} - -type metadataCompleteMultipartUploadOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s CompleteMultipartUploadOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CompleteMultipartUploadOutput) GoString() string { - return s.String() -} - -type CompletedMultipartUpload struct { - Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` - - metadataCompletedMultipartUpload `json:"-" xml:"-"` -} - -type metadataCompletedMultipartUpload struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s CompletedMultipartUpload) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CompletedMultipartUpload) GoString() string { - return s.String() -} - -type CompletedPart struct { - // Entity tag returned when the part was uploaded. - ETag *string `type:"string"` - - // Part number that identifies the part. This is a positive integer between - // 1 and 10,000. - PartNumber *int64 `type:"integer"` - - metadataCompletedPart `json:"-" xml:"-"` -} - -type metadataCompletedPart struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s CompletedPart) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CompletedPart) GoString() string { - return s.String() -} - -type Condition struct { - // The HTTP error code when the redirect is applied. In the event of an error, - // if the error code equals this value, then the specified redirect is applied. - // Required when parent element Condition is specified and sibling KeyPrefixEquals - // is not specified. If both are specified, then both must be true for the redirect - // to be applied. - HTTPErrorCodeReturnedEquals *string `locationName:"HttpErrorCodeReturnedEquals" type:"string"` - - // The object key name prefix when the redirect is applied. For example, to - // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. - // To redirect request for all pages with the prefix docs/, the key prefix will - // be /docs, which identifies all objects in the docs/ folder. Required when - // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals - // is not specified. If both conditions are specified, both must be true for - // the redirect to be applied. - KeyPrefixEquals *string `type:"string"` - - metadataCondition `json:"-" xml:"-"` -} - -type metadataCondition struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s Condition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Condition) GoString() string { - return s.String() -} - -type CopyObjectInput struct { - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The name of the source bucket and key name of the source object, separated - // by a slash (/). Must be URL-encoded. - CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` - - // Copies the object if its entity tag (ETag) matches the specified tag. - CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` - - // Copies the object if it has been modified since the specified time. - CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` - - // Copies the object if its entity tag (ETag) is different than the specified - // ETag. - CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` - - // Copies the object if it hasn't been modified since the specified time. - CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` - - // Specifies the algorithm to use when decrypting the source object (e.g., AES256). - CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt - // the source object. The encryption key provided in this header must be one - // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // Specifies whether the metadata is copied from the source object or replaced - // with metadata provided in the request. - MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` - - metadataCopyObjectInput `json:"-" xml:"-"` -} - -type metadataCopyObjectInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s CopyObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CopyObjectInput) GoString() string { - return s.String() -} - -type CopyObjectOutput struct { - CopyObjectResult *CopyObjectResult `type:"structure"` - - CopySourceVersionID *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` - - // If the object expiration is configured, the response includes this header. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - metadataCopyObjectOutput `json:"-" xml:"-"` -} - -type metadataCopyObjectOutput struct { - SDKShapeTraits bool `type:"structure" payload:"CopyObjectResult"` -} - -// String returns the string representation -func (s CopyObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CopyObjectOutput) GoString() string { - return s.String() -} - -type CopyObjectResult struct { - ETag *string `type:"string"` - - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - metadataCopyObjectResult `json:"-" xml:"-"` -} - -type metadataCopyObjectResult struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s CopyObjectResult) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CopyObjectResult) GoString() string { - return s.String() -} - -type CopyPartResult struct { - // Entity tag of the object. - ETag *string `type:"string"` - - // Date and time at which the object was uploaded. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - metadataCopyPartResult `json:"-" xml:"-"` -} - -type metadataCopyPartResult struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s CopyPartResult) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CopyPartResult) GoString() string { - return s.String() -} - -type CreateBucketConfiguration struct { - // Specifies the region where the bucket will be created. If you don't specify - // a region, the bucket will be created in US Standard. - LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` - - metadataCreateBucketConfiguration `json:"-" xml:"-"` -} - -type metadataCreateBucketConfiguration struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s CreateBucketConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateBucketConfiguration) GoString() string { - return s.String() -} - -type CreateBucketInput struct { - // The canned ACL to apply to the bucket. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"` - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to list the objects in the bucket. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the bucket ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to create, overwrite, and delete any object in the bucket. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` - - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - metadataCreateBucketInput `json:"-" xml:"-"` -} - -type metadataCreateBucketInput struct { - SDKShapeTraits bool `type:"structure" payload:"CreateBucketConfiguration"` -} - -// String returns the string representation -func (s CreateBucketInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateBucketInput) GoString() string { - return s.String() -} - -type CreateBucketOutput struct { - Location *string `location:"header" locationName:"Location" type:"string"` - - metadataCreateBucketOutput `json:"-" xml:"-"` -} - -type metadataCreateBucketOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s CreateBucketOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateBucketOutput) GoString() string { - return s.String() -} - -type CreateMultipartUploadInput struct { - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` - - metadataCreateMultipartUploadInput `json:"-" xml:"-"` -} - -type metadataCreateMultipartUploadInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s CreateMultipartUploadInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateMultipartUploadInput) GoString() string { - return s.String() -} - -type CreateMultipartUploadOutput struct { - // Name of the bucket to which the multipart upload was initiated. - Bucket *string `locationName:"Bucket" type:"string"` - - // Object key for which the multipart upload was initiated. - Key *string `type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // ID for the initiated multipart upload. - UploadID *string `locationName:"UploadId" type:"string"` - - metadataCreateMultipartUploadOutput `json:"-" xml:"-"` -} - -type metadataCreateMultipartUploadOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s CreateMultipartUploadOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateMultipartUploadOutput) GoString() string { - return s.String() -} - -type Delete struct { - Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` - - // Element to enable quiet mode for the request. When you add this element, - // you must set its value to true. - Quiet *bool `type:"boolean"` - - metadataDelete `json:"-" xml:"-"` -} - -type metadataDelete struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s Delete) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Delete) GoString() string { - return s.String() -} - -type DeleteBucketCORSInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - metadataDeleteBucketCORSInput `json:"-" xml:"-"` -} - -type metadataDeleteBucketCORSInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketCORSInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketCORSInput) GoString() string { - return s.String() -} - -type DeleteBucketCORSOutput struct { - metadataDeleteBucketCORSOutput `json:"-" xml:"-"` -} - -type metadataDeleteBucketCORSOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketCORSOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketCORSOutput) GoString() string { - return s.String() -} - -type DeleteBucketInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - metadataDeleteBucketInput `json:"-" xml:"-"` -} - -type metadataDeleteBucketInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketInput) GoString() string { - return s.String() -} - -type DeleteBucketLifecycleInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - metadataDeleteBucketLifecycleInput `json:"-" xml:"-"` -} - -type metadataDeleteBucketLifecycleInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketLifecycleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketLifecycleInput) GoString() string { - return s.String() -} - -type DeleteBucketLifecycleOutput struct { - metadataDeleteBucketLifecycleOutput `json:"-" xml:"-"` -} - -type metadataDeleteBucketLifecycleOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketLifecycleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketLifecycleOutput) GoString() string { - return s.String() -} - -type DeleteBucketOutput struct { - metadataDeleteBucketOutput `json:"-" xml:"-"` -} - -type metadataDeleteBucketOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketOutput) GoString() string { - return s.String() -} - -type DeleteBucketPolicyInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - metadataDeleteBucketPolicyInput `json:"-" xml:"-"` -} - -type metadataDeleteBucketPolicyInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketPolicyInput) GoString() string { - return s.String() -} - -type DeleteBucketPolicyOutput struct { - metadataDeleteBucketPolicyOutput `json:"-" xml:"-"` -} - -type metadataDeleteBucketPolicyOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketPolicyOutput) GoString() string { - return s.String() -} - -type DeleteBucketReplicationInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - metadataDeleteBucketReplicationInput `json:"-" xml:"-"` -} - -type metadataDeleteBucketReplicationInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketReplicationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketReplicationInput) GoString() string { - return s.String() -} - -type DeleteBucketReplicationOutput struct { - metadataDeleteBucketReplicationOutput `json:"-" xml:"-"` -} - -type metadataDeleteBucketReplicationOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketReplicationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketReplicationOutput) GoString() string { - return s.String() -} - -type DeleteBucketTaggingInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - metadataDeleteBucketTaggingInput `json:"-" xml:"-"` -} - -type metadataDeleteBucketTaggingInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketTaggingInput) GoString() string { - return s.String() -} - -type DeleteBucketTaggingOutput struct { - metadataDeleteBucketTaggingOutput `json:"-" xml:"-"` -} - -type metadataDeleteBucketTaggingOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketTaggingOutput) GoString() string { - return s.String() -} - -type DeleteBucketWebsiteInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - metadataDeleteBucketWebsiteInput `json:"-" xml:"-"` -} - -type metadataDeleteBucketWebsiteInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketWebsiteInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketWebsiteInput) GoString() string { - return s.String() -} - -type DeleteBucketWebsiteOutput struct { - metadataDeleteBucketWebsiteOutput `json:"-" xml:"-"` -} - -type metadataDeleteBucketWebsiteOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s DeleteBucketWebsiteOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteBucketWebsiteOutput) GoString() string { - return s.String() -} - -type DeleteMarkerEntry struct { - // Specifies whether the object is (true) or is not (false) the latest version - // of an object. - IsLatest *bool `type:"boolean"` - - // The object key. - Key *string `type:"string"` - - // Date and time the object was last modified. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - Owner *Owner `type:"structure"` - - // Version ID of an object. - VersionID *string `locationName:"VersionId" type:"string"` - - metadataDeleteMarkerEntry `json:"-" xml:"-"` -} - -type metadataDeleteMarkerEntry struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s DeleteMarkerEntry) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteMarkerEntry) GoString() string { - return s.String() -} - -type DeleteObjectInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` - - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // VersionId used to reference a specific version of the object. - VersionID *string `location:"querystring" locationName:"versionId" type:"string"` - - metadataDeleteObjectInput `json:"-" xml:"-"` -} - -type metadataDeleteObjectInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s DeleteObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectInput) GoString() string { - return s.String() -} - -type DeleteObjectOutput struct { - // Specifies whether the versioned object that was permanently deleted was (true) - // or was not (false) a delete marker. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Returns the version ID of the delete marker created as a result of the DELETE - // operation. - VersionID *string `location:"header" locationName:"x-amz-version-id" type:"string"` - - metadataDeleteObjectOutput `json:"-" xml:"-"` -} - -type metadataDeleteObjectOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s DeleteObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectOutput) GoString() string { - return s.String() -} - -type DeleteObjectsInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - Delete *Delete `locationName:"Delete" type:"structure" required:"true"` - - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - metadataDeleteObjectsInput `json:"-" xml:"-"` -} - -type metadataDeleteObjectsInput struct { - SDKShapeTraits bool `type:"structure" payload:"Delete"` -} - -// String returns the string representation -func (s DeleteObjectsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectsInput) GoString() string { - return s.String() -} - -type DeleteObjectsOutput struct { - Deleted []*DeletedObject `type:"list" flattened:"true"` - - Errors []*Error `locationName:"Error" type:"list" flattened:"true"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - metadataDeleteObjectsOutput `json:"-" xml:"-"` -} - -type metadataDeleteObjectsOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s DeleteObjectsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteObjectsOutput) GoString() string { - return s.String() -} - -type DeletedObject struct { - DeleteMarker *bool `type:"boolean"` - - DeleteMarkerVersionID *string `locationName:"DeleteMarkerVersionId" type:"string"` - - Key *string `type:"string"` - - VersionID *string `locationName:"VersionId" type:"string"` - - metadataDeletedObject `json:"-" xml:"-"` -} - -type metadataDeletedObject struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s DeletedObject) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeletedObject) GoString() string { - return s.String() -} - -type Destination struct { - // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store - // replicas of the object identified by the rule. - Bucket *string `type:"string" required:"true"` - - metadataDestination `json:"-" xml:"-"` -} - -type metadataDestination struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s Destination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Destination) GoString() string { - return s.String() -} - -type Error struct { - Code *string `type:"string"` - - Key *string `type:"string"` - - Message *string `type:"string"` - - VersionID *string `locationName:"VersionId" type:"string"` - - metadataError `json:"-" xml:"-"` -} - -type metadataError struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s Error) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Error) GoString() string { - return s.String() -} - -type ErrorDocument struct { - // The object key name to use when a 4XX class error occurs. - Key *string `type:"string" required:"true"` - - metadataErrorDocument `json:"-" xml:"-"` -} - -type metadataErrorDocument struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s ErrorDocument) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ErrorDocument) GoString() string { - return s.String() -} - -type GetBucketACLInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - metadataGetBucketACLInput `json:"-" xml:"-"` -} - -type metadataGetBucketACLInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketACLInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketACLInput) GoString() string { - return s.String() -} - -type GetBucketACLOutput struct { - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` - - Owner *Owner `type:"structure"` - - metadataGetBucketACLOutput `json:"-" xml:"-"` -} - -type metadataGetBucketACLOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketACLOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketACLOutput) GoString() string { - return s.String() -} - -type GetBucketCORSInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - metadataGetBucketCORSInput `json:"-" xml:"-"` -} - -type metadataGetBucketCORSInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketCORSInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketCORSInput) GoString() string { - return s.String() -} - -type GetBucketCORSOutput struct { - CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` - - metadataGetBucketCORSOutput `json:"-" xml:"-"` -} - -type metadataGetBucketCORSOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketCORSOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketCORSOutput) GoString() string { - return s.String() -} - -type GetBucketLifecycleInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - metadataGetBucketLifecycleInput `json:"-" xml:"-"` -} - -type metadataGetBucketLifecycleInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketLifecycleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLifecycleInput) GoString() string { - return s.String() -} - -type GetBucketLifecycleOutput struct { - Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` - - metadataGetBucketLifecycleOutput `json:"-" xml:"-"` -} - -type metadataGetBucketLifecycleOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketLifecycleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLifecycleOutput) GoString() string { - return s.String() -} - -type GetBucketLocationInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - metadataGetBucketLocationInput `json:"-" xml:"-"` -} - -type metadataGetBucketLocationInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketLocationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLocationInput) GoString() string { - return s.String() -} - -type GetBucketLocationOutput struct { - LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` - - metadataGetBucketLocationOutput `json:"-" xml:"-"` -} - -type metadataGetBucketLocationOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketLocationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLocationOutput) GoString() string { - return s.String() -} - -type GetBucketLoggingInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - metadataGetBucketLoggingInput `json:"-" xml:"-"` -} - -type metadataGetBucketLoggingInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketLoggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLoggingInput) GoString() string { - return s.String() -} - -type GetBucketLoggingOutput struct { - LoggingEnabled *LoggingEnabled `type:"structure"` - - metadataGetBucketLoggingOutput `json:"-" xml:"-"` -} - -type metadataGetBucketLoggingOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketLoggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketLoggingOutput) GoString() string { - return s.String() -} - -type GetBucketNotificationConfigurationRequest struct { - // Name of the buket to get the notification configuration for. - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - metadataGetBucketNotificationConfigurationRequest `json:"-" xml:"-"` -} - -type metadataGetBucketNotificationConfigurationRequest struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketNotificationConfigurationRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketNotificationConfigurationRequest) GoString() string { - return s.String() -} - -type GetBucketPolicyInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - metadataGetBucketPolicyInput `json:"-" xml:"-"` -} - -type metadataGetBucketPolicyInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketPolicyInput) GoString() string { - return s.String() -} - -type GetBucketPolicyOutput struct { - // The bucket policy as a JSON document. - Policy *string `type:"string"` - - metadataGetBucketPolicyOutput `json:"-" xml:"-"` -} - -type metadataGetBucketPolicyOutput struct { - SDKShapeTraits bool `type:"structure" payload:"Policy"` -} - -// String returns the string representation -func (s GetBucketPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketPolicyOutput) GoString() string { - return s.String() -} - -type GetBucketReplicationInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - metadataGetBucketReplicationInput `json:"-" xml:"-"` -} - -type metadataGetBucketReplicationInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketReplicationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketReplicationInput) GoString() string { - return s.String() -} - -type GetBucketReplicationOutput struct { - // Container for replication rules. You can add as many as 1,000 rules. Total - // replication configuration size can be up to 2 MB. - ReplicationConfiguration *ReplicationConfiguration `type:"structure"` - - metadataGetBucketReplicationOutput `json:"-" xml:"-"` -} - -type metadataGetBucketReplicationOutput struct { - SDKShapeTraits bool `type:"structure" payload:"ReplicationConfiguration"` -} - -// String returns the string representation -func (s GetBucketReplicationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketReplicationOutput) GoString() string { - return s.String() -} - -type GetBucketRequestPaymentInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - metadataGetBucketRequestPaymentInput `json:"-" xml:"-"` -} - -type metadataGetBucketRequestPaymentInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketRequestPaymentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketRequestPaymentInput) GoString() string { - return s.String() -} - -type GetBucketRequestPaymentOutput struct { - // Specifies who pays for the download and request fees. - Payer *string `type:"string" enum:"Payer"` - - metadataGetBucketRequestPaymentOutput `json:"-" xml:"-"` -} - -type metadataGetBucketRequestPaymentOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketRequestPaymentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketRequestPaymentOutput) GoString() string { - return s.String() -} - -type GetBucketTaggingInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - metadataGetBucketTaggingInput `json:"-" xml:"-"` -} - -type metadataGetBucketTaggingInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketTaggingInput) GoString() string { - return s.String() -} - -type GetBucketTaggingOutput struct { - TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` - - metadataGetBucketTaggingOutput `json:"-" xml:"-"` -} - -type metadataGetBucketTaggingOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketTaggingOutput) GoString() string { - return s.String() -} - -type GetBucketVersioningInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - metadataGetBucketVersioningInput `json:"-" xml:"-"` -} - -type metadataGetBucketVersioningInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketVersioningInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketVersioningInput) GoString() string { - return s.String() -} - -type GetBucketVersioningOutput struct { - // Specifies whether MFA delete is enabled in the bucket versioning configuration. - // This element is only returned if the bucket has been configured with MFA - // delete. If the bucket has never been so configured, this element is not returned. - MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` - - // The versioning state of the bucket. - Status *string `type:"string" enum:"BucketVersioningStatus"` - - metadataGetBucketVersioningOutput `json:"-" xml:"-"` -} - -type metadataGetBucketVersioningOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketVersioningOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketVersioningOutput) GoString() string { - return s.String() -} - -type GetBucketWebsiteInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - metadataGetBucketWebsiteInput `json:"-" xml:"-"` -} - -type metadataGetBucketWebsiteInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketWebsiteInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketWebsiteInput) GoString() string { - return s.String() -} - -type GetBucketWebsiteOutput struct { - ErrorDocument *ErrorDocument `type:"structure"` - - IndexDocument *IndexDocument `type:"structure"` - - RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` - - RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` - - metadataGetBucketWebsiteOutput `json:"-" xml:"-"` -} - -type metadataGetBucketWebsiteOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetBucketWebsiteOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetBucketWebsiteOutput) GoString() string { - return s.String() -} - -type GetObjectACLInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // VersionId used to reference a specific version of the object. - VersionID *string `location:"querystring" locationName:"versionId" type:"string"` - - metadataGetObjectACLInput `json:"-" xml:"-"` -} - -type metadataGetObjectACLInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetObjectACLInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectACLInput) GoString() string { - return s.String() -} - -type GetObjectACLOutput struct { - // A list of grants. - Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` - - Owner *Owner `type:"structure"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - metadataGetObjectACLOutput `json:"-" xml:"-"` -} - -type metadataGetObjectACLOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetObjectACLOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectACLOutput) GoString() string { - return s.String() -} - -type GetObjectInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Return the object only if its entity tag (ETag) is the same as the one specified, - // otherwise return a 412 (precondition failed). - IfMatch *string `location:"header" locationName:"If-Match" type:"string"` - - // Return the object only if it has been modified since the specified time, - // otherwise return a 304 (not modified). - IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` - - // Return the object only if its entity tag (ETag) is different from the one - // specified, otherwise return a 304 (not modified). - IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` - - // Return the object only if it has not been modified since the specified time, - // otherwise return a 412 (precondition failed). - IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` - - Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` - - // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. - Range *string `location:"header" locationName:"Range" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Sets the Cache-Control header of the response. - ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` - - // Sets the Content-Disposition header of the response - ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` - - // Sets the Content-Encoding header of the response. - ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` - - // Sets the Content-Language header of the response. - ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` - - // Sets the Content-Type header of the response. - ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` - - // Sets the Expires header of the response. - ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // VersionId used to reference a specific version of the object. - VersionID *string `location:"querystring" locationName:"versionId" type:"string"` - - metadataGetObjectInput `json:"-" xml:"-"` -} - -type metadataGetObjectInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectInput) GoString() string { - return s.String() -} - -type GetObjectOutput struct { - AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` - - // Object data. - Body io.ReadCloser `type:"blob"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // Size of the body in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` - - // The portion of the object returned in the response. - ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - - // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key value pairs - // providing object expiration information. The value of the rule-id is URL - // encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *string `location:"header" locationName:"Expires" type:"string"` - - // Last modified date of the object - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // This is set to the number of metadata entries not returned in x-amz-meta - // headers. This can happen if you create metadata using an API like SOAP that - // supports more flexible metadata than the REST API. For example, using SOAP, - // you can create metadata whose values are not legal HTTP headers. - MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` - - ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Provides information about object restoration operation and expiration time - // of the restored object copy. - Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // Version of the object. - VersionID *string `location:"header" locationName:"x-amz-version-id" type:"string"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` - - metadataGetObjectOutput `json:"-" xml:"-"` -} - -type metadataGetObjectOutput struct { - SDKShapeTraits bool `type:"structure" payload:"Body"` -} - -// String returns the string representation -func (s GetObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectOutput) GoString() string { - return s.String() -} - -type GetObjectTorrentInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - metadataGetObjectTorrentInput `json:"-" xml:"-"` -} - -type metadataGetObjectTorrentInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s GetObjectTorrentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectTorrentInput) GoString() string { - return s.String() -} - -type GetObjectTorrentOutput struct { - Body io.ReadCloser `type:"blob"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - metadataGetObjectTorrentOutput `json:"-" xml:"-"` -} - -type metadataGetObjectTorrentOutput struct { - SDKShapeTraits bool `type:"structure" payload:"Body"` -} - -// String returns the string representation -func (s GetObjectTorrentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetObjectTorrentOutput) GoString() string { - return s.String() -} - -type Grant struct { - Grantee *Grantee `type:"structure"` - - // Specifies the permission given to the grantee. - Permission *string `type:"string" enum:"Permission"` - - metadataGrant `json:"-" xml:"-"` -} - -type metadataGrant struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s Grant) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Grant) GoString() string { - return s.String() -} - -type Grantee struct { - // Screen name of the grantee. - DisplayName *string `type:"string"` - - // Email address of the grantee. - EmailAddress *string `type:"string"` - - // The canonical user ID of the grantee. - ID *string `type:"string"` - - // Type of grantee - Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` - - // URI of the grantee group. - URI *string `type:"string"` - - metadataGrantee `json:"-" xml:"-"` -} - -type metadataGrantee struct { - SDKShapeTraits bool `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` -} - -// String returns the string representation -func (s Grantee) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Grantee) GoString() string { - return s.String() -} - -type HeadBucketInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - metadataHeadBucketInput `json:"-" xml:"-"` -} - -type metadataHeadBucketInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s HeadBucketInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HeadBucketInput) GoString() string { - return s.String() -} - -type HeadBucketOutput struct { - metadataHeadBucketOutput `json:"-" xml:"-"` -} - -type metadataHeadBucketOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s HeadBucketOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HeadBucketOutput) GoString() string { - return s.String() -} - -type HeadObjectInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Return the object only if its entity tag (ETag) is the same as the one specified, - // otherwise return a 412 (precondition failed). - IfMatch *string `location:"header" locationName:"If-Match" type:"string"` - - // Return the object only if it has been modified since the specified time, - // otherwise return a 304 (not modified). - IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` - - // Return the object only if its entity tag (ETag) is different from the one - // specified, otherwise return a 304 (not modified). - IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` - - // Return the object only if it has not been modified since the specified time, - // otherwise return a 412 (precondition failed). - IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` - - Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` - - // Downloads the specified range bytes of an object. For more information about - // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. - Range *string `location:"header" locationName:"Range" type:"string"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // VersionId used to reference a specific version of the object. - VersionID *string `location:"querystring" locationName:"versionId" type:"string"` - - metadataHeadObjectInput `json:"-" xml:"-"` -} - -type metadataHeadObjectInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s HeadObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HeadObjectInput) GoString() string { - return s.String() -} - -type HeadObjectOutput struct { - AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // Size of the body in bytes. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // Specifies whether the object retrieved was (true) or was not (false) a Delete - // Marker. If false, this response header does not appear in the response. - DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` - - // An ETag is an opaque identifier assigned by a web server to a specific version - // of a resource found at a URL - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If the object expiration is configured (see PUT Bucket lifecycle), the response - // includes this header. It includes the expiry-date and rule-id key value pairs - // providing object expiration information. The value of the rule-id is URL - // encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *string `location:"header" locationName:"Expires" type:"string"` - - // Last modified date of the object - LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // This is set to the number of metadata entries not returned in x-amz-meta - // headers. This can happen if you create metadata using an API like SOAP that - // supports more flexible metadata than the REST API. For example, using SOAP, - // you can create metadata whose values are not legal HTTP headers. - MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` - - ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // Provides information about object restoration operation and expiration time - // of the restored object copy. - Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // Version of the object. - VersionID *string `location:"header" locationName:"x-amz-version-id" type:"string"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` - - metadataHeadObjectOutput `json:"-" xml:"-"` -} - -type metadataHeadObjectOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s HeadObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s HeadObjectOutput) GoString() string { - return s.String() -} - -type IndexDocument struct { - // A suffix that is appended to a request that is for a directory on the website - // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ - // the data that is returned will be for the object with the key name images/index.html) - // The suffix must not be empty and must not include a slash character. - Suffix *string `type:"string" required:"true"` - - metadataIndexDocument `json:"-" xml:"-"` -} - -type metadataIndexDocument struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s IndexDocument) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s IndexDocument) GoString() string { - return s.String() -} - -type Initiator struct { - // Name of the Principal. - DisplayName *string `type:"string"` - - // If the principal is an AWS account, it provides the Canonical User ID. If - // the principal is an IAM User, it provides a user ARN value. - ID *string `type:"string"` - - metadataInitiator `json:"-" xml:"-"` -} - -type metadataInitiator struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s Initiator) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Initiator) GoString() string { - return s.String() -} - -// Container for specifying the AWS Lambda notification configuration. -type LambdaFunctionConfiguration struct { - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - ID *string `locationName:"Id" type:"string"` - - // Lambda cloud function ARN that Amazon S3 can invoke when it detects events - // of the specified type. - LambdaFunctionARN *string `locationName:"CloudFunction" type:"string" required:"true"` - - metadataLambdaFunctionConfiguration `json:"-" xml:"-"` -} - -type metadataLambdaFunctionConfiguration struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s LambdaFunctionConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LambdaFunctionConfiguration) GoString() string { - return s.String() -} - -type LifecycleConfiguration struct { - Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` - - metadataLifecycleConfiguration `json:"-" xml:"-"` -} - -type metadataLifecycleConfiguration struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s LifecycleConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LifecycleConfiguration) GoString() string { - return s.String() -} - -type LifecycleExpiration struct { - // Indicates at what date the object is to be moved or deleted. Should be in - // GMT ISO 8601 Format. - Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // Indicates the lifetime, in days, of the objects that are subject to the rule. - // The value must be a non-zero positive integer. - Days *int64 `type:"integer"` - - metadataLifecycleExpiration `json:"-" xml:"-"` -} - -type metadataLifecycleExpiration struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s LifecycleExpiration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LifecycleExpiration) GoString() string { - return s.String() -} - -type LifecycleRule struct { - Expiration *LifecycleExpiration `type:"structure"` - - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string `type:"string"` - - // Specifies when noncurrent object versions expire. Upon expiration, Amazon - // S3 permanently deletes the noncurrent object versions. You set this lifecycle - // configuration action on a bucket that has versioning enabled (or suspended) - // to request that Amazon S3 delete noncurrent object versions at a specific - // period in the object's lifetime. - NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` - - // Container for the transition rule that describes when noncurrent objects - // transition to the GLACIER storage class. If your bucket is versioning-enabled - // (or versioning is suspended), you can set this action to request that Amazon - // S3 transition noncurrent object versions to the GLACIER storage class at - // a specific period in the object's lifetime. - NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` - - // Prefix identifying one or more objects to which the rule applies. - Prefix *string `type:"string" required:"true"` - - // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule - // is not currently being applied. - Status *string `type:"string" required:"true" enum:"ExpirationStatus"` - - Transition *Transition `type:"structure"` - - metadataLifecycleRule `json:"-" xml:"-"` -} - -type metadataLifecycleRule struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s LifecycleRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LifecycleRule) GoString() string { - return s.String() -} - -type ListBucketsInput struct { - metadataListBucketsInput `json:"-" xml:"-"` -} - -type metadataListBucketsInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s ListBucketsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketsInput) GoString() string { - return s.String() -} - -type ListBucketsOutput struct { - Buckets []*Bucket `locationNameList:"Bucket" type:"list"` - - Owner *Owner `type:"structure"` - - metadataListBucketsOutput `json:"-" xml:"-"` -} - -type metadataListBucketsOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s ListBucketsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListBucketsOutput) GoString() string { - return s.String() -} - -type ListMultipartUploadsInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // Together with upload-id-marker, this parameter specifies the multipart upload - // after which listing should begin. - KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` - - // Sets the maximum number of multipart uploads, from 1 to 1,000, to return - // in the response body. 1,000 is the maximum number of uploads that can be - // returned in a response. - MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` - - // Lists in-progress uploads only for those keys that begin with the specified - // prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - - // Together with key-marker, specifies the multipart upload after which listing - // should begin. If key-marker is not specified, the upload-id-marker parameter - // is ignored. - UploadIDMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` - - metadataListMultipartUploadsInput `json:"-" xml:"-"` -} - -type metadataListMultipartUploadsInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s ListMultipartUploadsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListMultipartUploadsInput) GoString() string { - return s.String() -} - -type ListMultipartUploadsOutput struct { - // Name of the bucket to which the multipart upload was initiated. - Bucket *string `type:"string"` - - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` - - // Indicates whether the returned list of multipart uploads is truncated. A - // value of true indicates that the list was truncated. The list can be truncated - // if the number of multipart uploads exceeds the limit allowed or specified - // by max uploads. - IsTruncated *bool `type:"boolean"` - - // The key at or after which the listing began. - KeyMarker *string `type:"string"` - - // Maximum number of multipart uploads that could have been included in the - // response. - MaxUploads *int64 `type:"integer"` - - // When a list is truncated, this element specifies the value that should be - // used for the key-marker request parameter in a subsequent request. - NextKeyMarker *string `type:"string"` - - // When a list is truncated, this element specifies the value that should be - // used for the upload-id-marker request parameter in a subsequent request. - NextUploadIDMarker *string `locationName:"NextUploadIdMarker" type:"string"` - - // When a prefix is provided in the request, this field contains the specified - // prefix. The result contains only keys starting with the specified prefix. - Prefix *string `type:"string"` - - // Upload ID after which listing began. - UploadIDMarker *string `locationName:"UploadIdMarker" type:"string"` - - Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` - - metadataListMultipartUploadsOutput `json:"-" xml:"-"` -} - -type metadataListMultipartUploadsOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s ListMultipartUploadsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListMultipartUploadsOutput) GoString() string { - return s.String() -} - -type ListObjectVersionsInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // A delimiter is a character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // Specifies the key to start with when listing objects in a bucket. - KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` - - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - - // Limits the response to keys that begin with the specified prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - - // Specifies the object version you want to start listing from. - VersionIDMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` - - metadataListObjectVersionsInput `json:"-" xml:"-"` -} - -type metadataListObjectVersionsInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s ListObjectVersionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectVersionsInput) GoString() string { - return s.String() -} - -type ListObjectVersionsOutput struct { - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` - - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` - - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. If your results were truncated, you can - // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker - // response parameters as a starting place in another request to return the - // rest of the results. - IsTruncated *bool `type:"boolean"` - - // Marks the last Key returned in a truncated response. - KeyMarker *string `type:"string"` - - MaxKeys *int64 `type:"integer"` - - Name *string `type:"string"` - - // Use this value for the key marker request parameter in a subsequent request. - NextKeyMarker *string `type:"string"` - - // Use this value for the next version id marker parameter in a subsequent request. - NextVersionIDMarker *string `locationName:"NextVersionIdMarker" type:"string"` - - Prefix *string `type:"string"` - - VersionIDMarker *string `locationName:"VersionIdMarker" type:"string"` - - Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` - - metadataListObjectVersionsOutput `json:"-" xml:"-"` -} - -type metadataListObjectVersionsOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s ListObjectVersionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectVersionsOutput) GoString() string { - return s.String() -} - -type ListObjectsInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // A delimiter is a character you use to group keys. - Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` - - // Requests Amazon S3 to encode the object keys in the response and specifies - // the encoding method to use. An object key may contain any Unicode character; - // however, XML 1.0 parser cannot parse some characters, such as characters - // with an ASCII value from 0 to 10. For characters that are not supported in - // XML 1.0, you can add this parameter to request that Amazon S3 encode the - // keys in the response. - EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` - - // Specifies the key to start with when listing objects in a bucket. - Marker *string `location:"querystring" locationName:"marker" type:"string"` - - // Sets the maximum number of keys returned in the response. The response might - // contain fewer keys but will never contain more. - MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` - - // Limits the response to keys that begin with the specified prefix. - Prefix *string `location:"querystring" locationName:"prefix" type:"string"` - - metadataListObjectsInput `json:"-" xml:"-"` -} - -type metadataListObjectsInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s ListObjectsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectsInput) GoString() string { - return s.String() -} - -type ListObjectsOutput struct { - CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` - - Contents []*Object `type:"list" flattened:"true"` - - Delimiter *string `type:"string"` - - // Encoding type used by Amazon S3 to encode object keys in the response. - EncodingType *string `type:"string" enum:"EncodingType"` - - // A flag that indicates whether or not Amazon S3 returned all of the results - // that satisfied the search criteria. - IsTruncated *bool `type:"boolean"` - - Marker *string `type:"string"` - - MaxKeys *int64 `type:"integer"` - - Name *string `type:"string"` - - // When response is truncated (the IsTruncated element value in the response - // is true), you can use the key name in this field as marker in the subsequent - // request to get next set of objects. Amazon S3 lists objects in alphabetical - // order Note: This element is returned only if you have delimiter request parameter - // specified. If response does not include the NextMaker and it is truncated, - // you can use the value of the last Key in the response as the marker in the - // subsequent request to get the next set of object keys. - NextMarker *string `type:"string"` - - Prefix *string `type:"string"` - - metadataListObjectsOutput `json:"-" xml:"-"` -} - -type metadataListObjectsOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s ListObjectsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListObjectsOutput) GoString() string { - return s.String() -} - -type ListPartsInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` - - // Sets the maximum number of parts to return. - MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` - - // Specifies the part after which listing should begin. Only parts with higher - // part numbers will be listed. - PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Upload ID identifying the multipart upload whose parts are being listed. - UploadID *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` - - metadataListPartsInput `json:"-" xml:"-"` -} - -type metadataListPartsInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s ListPartsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListPartsInput) GoString() string { - return s.String() -} - -type ListPartsOutput struct { - // Name of the bucket to which the multipart upload was initiated. - Bucket *string `type:"string"` - - // Identifies who initiated the multipart upload. - Initiator *Initiator `type:"structure"` - - // Indicates whether the returned list of parts is truncated. - IsTruncated *bool `type:"boolean"` - - // Object key for which the multipart upload was initiated. - Key *string `type:"string"` - - // Maximum number of parts that were allowed in the response. - MaxParts *int64 `type:"integer"` - - // When a list is truncated, this element specifies the last part in the list, - // as well as the value to use for the part-number-marker request parameter - // in a subsequent request. - NextPartNumberMarker *int64 `type:"integer"` - - Owner *Owner `type:"structure"` - - // Part number after which listing begins. - PartNumberMarker *int64 `type:"integer"` - - Parts []*Part `locationName:"Part" type:"list" flattened:"true"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"StorageClass"` - - // Upload ID identifying the multipart upload whose parts are being listed. - UploadID *string `locationName:"UploadId" type:"string"` - - metadataListPartsOutput `json:"-" xml:"-"` -} - -type metadataListPartsOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s ListPartsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ListPartsOutput) GoString() string { - return s.String() -} - -type LoggingEnabled struct { - // Specifies the bucket where you want Amazon S3 to store server access logs. - // You can have your logs delivered to any bucket that you own, including the - // same bucket that is being logged. You can also configure multiple buckets - // to deliver their logs to the same target bucket. In this case you should - // choose a different TargetPrefix for each source bucket so that the delivered - // log files can be distinguished by key. - TargetBucket *string `type:"string"` - - TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` - - // This element lets you specify a prefix for the keys that the log files will - // be stored under. - TargetPrefix *string `type:"string"` - - metadataLoggingEnabled `json:"-" xml:"-"` -} - -type metadataLoggingEnabled struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s LoggingEnabled) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LoggingEnabled) GoString() string { - return s.String() -} - -type MultipartUpload struct { - // Date and time at which the multipart upload was initiated. - Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // Identifies who initiated the multipart upload. - Initiator *Initiator `type:"structure"` - - // Key of the object for which the multipart upload was initiated. - Key *string `type:"string"` - - Owner *Owner `type:"structure"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"StorageClass"` - - // Upload ID that identifies the multipart upload. - UploadID *string `locationName:"UploadId" type:"string"` - - metadataMultipartUpload `json:"-" xml:"-"` -} - -type metadataMultipartUpload struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s MultipartUpload) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MultipartUpload) GoString() string { - return s.String() -} - -// Specifies when noncurrent object versions expire. Upon expiration, Amazon -// S3 permanently deletes the noncurrent object versions. You set this lifecycle -// configuration action on a bucket that has versioning enabled (or suspended) -// to request that Amazon S3 delete noncurrent object versions at a specific -// period in the object's lifetime. -type NoncurrentVersionExpiration struct { - // Specifies the number of days an object is noncurrent before Amazon S3 can - // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (/AmazonS3/latest/dev/s3-access-control.html) in the Amazon Simple Storage - // Service Developer Guide. - NoncurrentDays *int64 `type:"integer"` - - metadataNoncurrentVersionExpiration `json:"-" xml:"-"` -} - -type metadataNoncurrentVersionExpiration struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s NoncurrentVersionExpiration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NoncurrentVersionExpiration) GoString() string { - return s.String() -} - -// Container for the transition rule that describes when noncurrent objects -// transition to the GLACIER storage class. If your bucket is versioning-enabled -// (or versioning is suspended), you can set this action to request that Amazon -// S3 transition noncurrent object versions to the GLACIER storage class at -// a specific period in the object's lifetime. -type NoncurrentVersionTransition struct { - // Specifies the number of days an object is noncurrent before Amazon S3 can - // perform the associated action. For information about the noncurrent days - // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent - // (/AmazonS3/latest/dev/s3-access-control.html) in the Amazon Simple Storage - // Service Developer Guide. - NoncurrentDays *int64 `type:"integer"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"TransitionStorageClass"` - - metadataNoncurrentVersionTransition `json:"-" xml:"-"` -} - -type metadataNoncurrentVersionTransition struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s NoncurrentVersionTransition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NoncurrentVersionTransition) GoString() string { - return s.String() -} - -// Container for specifying the notification configuration of the bucket. If -// this element is empty, notifications are turned off on the bucket. -type NotificationConfiguration struct { - LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` - - QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` - - TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` - - metadataNotificationConfiguration `json:"-" xml:"-"` -} - -type metadataNotificationConfiguration struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s NotificationConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NotificationConfiguration) GoString() string { - return s.String() -} - -type NotificationConfigurationDeprecated struct { - CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` - - QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` - - TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` - - metadataNotificationConfigurationDeprecated `json:"-" xml:"-"` -} - -type metadataNotificationConfigurationDeprecated struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s NotificationConfigurationDeprecated) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s NotificationConfigurationDeprecated) GoString() string { - return s.String() -} - -type Object struct { - ETag *string `type:"string"` - - Key *string `type:"string"` - - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - Owner *Owner `type:"structure"` - - Size *int64 `type:"integer"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"ObjectStorageClass"` - - metadataObject `json:"-" xml:"-"` -} - -type metadataObject struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s Object) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Object) GoString() string { - return s.String() -} - -type ObjectIdentifier struct { - // Key name of the object to delete. - Key *string `type:"string" required:"true"` - - // VersionId for the specific version of the object to delete. - VersionID *string `locationName:"VersionId" type:"string"` - - metadataObjectIdentifier `json:"-" xml:"-"` -} - -type metadataObjectIdentifier struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s ObjectIdentifier) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ObjectIdentifier) GoString() string { - return s.String() -} - -type ObjectVersion struct { - ETag *string `type:"string"` - - // Specifies whether the object is (true) or is not (false) the latest version - // of an object. - IsLatest *bool `type:"boolean"` - - // The object key. - Key *string `type:"string"` - - // Date and time the object was last modified. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - Owner *Owner `type:"structure"` - - // Size in bytes of the object. - Size *int64 `type:"integer"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` - - // Version ID of an object. - VersionID *string `locationName:"VersionId" type:"string"` - - metadataObjectVersion `json:"-" xml:"-"` -} - -type metadataObjectVersion struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s ObjectVersion) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ObjectVersion) GoString() string { - return s.String() -} - -type Owner struct { - DisplayName *string `type:"string"` - - ID *string `type:"string"` - - metadataOwner `json:"-" xml:"-"` -} - -type metadataOwner struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s Owner) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Owner) GoString() string { - return s.String() -} - -type Part struct { - // Entity tag returned when the part was uploaded. - ETag *string `type:"string"` - - // Date and time at which the part was uploaded. - LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // Part number identifying the part. This is a positive integer between 1 and - // 10,000. - PartNumber *int64 `type:"integer"` - - // Size of the uploaded part data. - Size *int64 `type:"integer"` - - metadataPart `json:"-" xml:"-"` -} - -type metadataPart struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s Part) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Part) GoString() string { - return s.String() -} - -type PutBucketACLInput struct { - // The canned ACL to apply to the bucket. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` - - AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to list the objects in the bucket. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the bucket ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to create, overwrite, and delete any object in the bucket. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` - - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - metadataPutBucketACLInput `json:"-" xml:"-"` -} - -type metadataPutBucketACLInput struct { - SDKShapeTraits bool `type:"structure" payload:"AccessControlPolicy"` -} - -// String returns the string representation -func (s PutBucketACLInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketACLInput) GoString() string { - return s.String() -} - -type PutBucketACLOutput struct { - metadataPutBucketACLOutput `json:"-" xml:"-"` -} - -type metadataPutBucketACLOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s PutBucketACLOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketACLOutput) GoString() string { - return s.String() -} - -type PutBucketCORSInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure"` - - metadataPutBucketCORSInput `json:"-" xml:"-"` -} - -type metadataPutBucketCORSInput struct { - SDKShapeTraits bool `type:"structure" payload:"CORSConfiguration"` -} - -// String returns the string representation -func (s PutBucketCORSInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketCORSInput) GoString() string { - return s.String() -} - -type PutBucketCORSOutput struct { - metadataPutBucketCORSOutput `json:"-" xml:"-"` -} - -type metadataPutBucketCORSOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s PutBucketCORSOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketCORSOutput) GoString() string { - return s.String() -} - -type PutBucketLifecycleInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` - - metadataPutBucketLifecycleInput `json:"-" xml:"-"` -} - -type metadataPutBucketLifecycleInput struct { - SDKShapeTraits bool `type:"structure" payload:"LifecycleConfiguration"` -} - -// String returns the string representation -func (s PutBucketLifecycleInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLifecycleInput) GoString() string { - return s.String() -} - -type PutBucketLifecycleOutput struct { - metadataPutBucketLifecycleOutput `json:"-" xml:"-"` -} - -type metadataPutBucketLifecycleOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s PutBucketLifecycleOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLifecycleOutput) GoString() string { - return s.String() -} - -type PutBucketLoggingInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"` - - metadataPutBucketLoggingInput `json:"-" xml:"-"` -} - -type metadataPutBucketLoggingInput struct { - SDKShapeTraits bool `type:"structure" payload:"BucketLoggingStatus"` -} - -// String returns the string representation -func (s PutBucketLoggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLoggingInput) GoString() string { - return s.String() -} - -type PutBucketLoggingOutput struct { - metadataPutBucketLoggingOutput `json:"-" xml:"-"` -} - -type metadataPutBucketLoggingOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s PutBucketLoggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketLoggingOutput) GoString() string { - return s.String() -} - -type PutBucketNotificationConfigurationInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Container for specifying the notification configuration of the bucket. If - // this element is empty, notifications are turned off on the bucket. - NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"` - - metadataPutBucketNotificationConfigurationInput `json:"-" xml:"-"` -} - -type metadataPutBucketNotificationConfigurationInput struct { - SDKShapeTraits bool `type:"structure" payload:"NotificationConfiguration"` -} - -// String returns the string representation -func (s PutBucketNotificationConfigurationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketNotificationConfigurationInput) GoString() string { - return s.String() -} - -type PutBucketNotificationConfigurationOutput struct { - metadataPutBucketNotificationConfigurationOutput `json:"-" xml:"-"` -} - -type metadataPutBucketNotificationConfigurationOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s PutBucketNotificationConfigurationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketNotificationConfigurationOutput) GoString() string { - return s.String() -} - -type PutBucketNotificationInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"` - - metadataPutBucketNotificationInput `json:"-" xml:"-"` -} - -type metadataPutBucketNotificationInput struct { - SDKShapeTraits bool `type:"structure" payload:"NotificationConfiguration"` -} - -// String returns the string representation -func (s PutBucketNotificationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketNotificationInput) GoString() string { - return s.String() -} - -type PutBucketNotificationOutput struct { - metadataPutBucketNotificationOutput `json:"-" xml:"-"` -} - -type metadataPutBucketNotificationOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s PutBucketNotificationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketNotificationOutput) GoString() string { - return s.String() -} - -type PutBucketPolicyInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The bucket policy as a JSON document. - Policy *string `type:"string" required:"true"` - - metadataPutBucketPolicyInput `json:"-" xml:"-"` -} - -type metadataPutBucketPolicyInput struct { - SDKShapeTraits bool `type:"structure" payload:"Policy"` -} - -// String returns the string representation -func (s PutBucketPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketPolicyInput) GoString() string { - return s.String() -} - -type PutBucketPolicyOutput struct { - metadataPutBucketPolicyOutput `json:"-" xml:"-"` -} - -type metadataPutBucketPolicyOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s PutBucketPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketPolicyOutput) GoString() string { - return s.String() -} - -type PutBucketReplicationInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Container for replication rules. You can add as many as 1,000 rules. Total - // replication configuration size can be up to 2 MB. - ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"` - - metadataPutBucketReplicationInput `json:"-" xml:"-"` -} - -type metadataPutBucketReplicationInput struct { - SDKShapeTraits bool `type:"structure" payload:"ReplicationConfiguration"` -} - -// String returns the string representation -func (s PutBucketReplicationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketReplicationInput) GoString() string { - return s.String() -} - -type PutBucketReplicationOutput struct { - metadataPutBucketReplicationOutput `json:"-" xml:"-"` -} - -type metadataPutBucketReplicationOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s PutBucketReplicationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketReplicationOutput) GoString() string { - return s.String() -} - -type PutBucketRequestPaymentInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"` - - metadataPutBucketRequestPaymentInput `json:"-" xml:"-"` -} - -type metadataPutBucketRequestPaymentInput struct { - SDKShapeTraits bool `type:"structure" payload:"RequestPaymentConfiguration"` -} - -// String returns the string representation -func (s PutBucketRequestPaymentInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketRequestPaymentInput) GoString() string { - return s.String() -} - -type PutBucketRequestPaymentOutput struct { - metadataPutBucketRequestPaymentOutput `json:"-" xml:"-"` -} - -type metadataPutBucketRequestPaymentOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s PutBucketRequestPaymentOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketRequestPaymentOutput) GoString() string { - return s.String() -} - -type PutBucketTaggingInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` - - metadataPutBucketTaggingInput `json:"-" xml:"-"` -} - -type metadataPutBucketTaggingInput struct { - SDKShapeTraits bool `type:"structure" payload:"Tagging"` -} - -// String returns the string representation -func (s PutBucketTaggingInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketTaggingInput) GoString() string { - return s.String() -} - -type PutBucketTaggingOutput struct { - metadataPutBucketTaggingOutput `json:"-" xml:"-"` -} - -type metadataPutBucketTaggingOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s PutBucketTaggingOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketTaggingOutput) GoString() string { - return s.String() -} - -type PutBucketVersioningInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The concatenation of the authentication device's serial number, a space, - // and the value that is displayed on your authentication device. - MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` - - VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"` - - metadataPutBucketVersioningInput `json:"-" xml:"-"` -} - -type metadataPutBucketVersioningInput struct { - SDKShapeTraits bool `type:"structure" payload:"VersioningConfiguration"` -} - -// String returns the string representation -func (s PutBucketVersioningInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketVersioningInput) GoString() string { - return s.String() -} - -type PutBucketVersioningOutput struct { - metadataPutBucketVersioningOutput `json:"-" xml:"-"` -} - -type metadataPutBucketVersioningOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s PutBucketVersioningOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketVersioningOutput) GoString() string { - return s.String() -} - -type PutBucketWebsiteInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"` - - metadataPutBucketWebsiteInput `json:"-" xml:"-"` -} - -type metadataPutBucketWebsiteInput struct { - SDKShapeTraits bool `type:"structure" payload:"WebsiteConfiguration"` -} - -// String returns the string representation -func (s PutBucketWebsiteInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketWebsiteInput) GoString() string { - return s.String() -} - -type PutBucketWebsiteOutput struct { - metadataPutBucketWebsiteOutput `json:"-" xml:"-"` -} - -type metadataPutBucketWebsiteOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s PutBucketWebsiteOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutBucketWebsiteOutput) GoString() string { - return s.String() -} - -type PutObjectACLInput struct { - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Allows grantee the read, write, read ACP, and write ACP permissions on the - // bucket. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to list the objects in the bucket. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the bucket ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to create, overwrite, and delete any object in the bucket. - GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` - - // Allows grantee to write the ACL for the applicable bucket. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - metadataPutObjectACLInput `json:"-" xml:"-"` -} - -type metadataPutObjectACLInput struct { - SDKShapeTraits bool `type:"structure" payload:"AccessControlPolicy"` -} - -// String returns the string representation -func (s PutObjectACLInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectACLInput) GoString() string { - return s.String() -} - -type PutObjectACLOutput struct { - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - metadataPutObjectACLOutput `json:"-" xml:"-"` -} - -type metadataPutObjectACLOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s PutObjectACLOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectACLOutput) GoString() string { - return s.String() -} - -type PutObjectInput struct { - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` - - // Object data. - Body io.ReadSeeker `type:"blob"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // Size of the body in bytes. This parameter is useful when the size of the - // body cannot be determined automatically. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` - - metadataPutObjectInput `json:"-" xml:"-"` -} - -type metadataPutObjectInput struct { - SDKShapeTraits bool `type:"structure" payload:"Body"` -} - -// String returns the string representation -func (s PutObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectInput) GoString() string { - return s.String() -} - -type PutObjectOutput struct { - // Entity tag for the uploaded object. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If the object expiration is configured, this will contain the expiration - // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. - Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - // Version of the object. - VersionID *string `location:"header" locationName:"x-amz-version-id" type:"string"` - - metadataPutObjectOutput `json:"-" xml:"-"` -} - -type metadataPutObjectOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s PutObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutObjectOutput) GoString() string { - return s.String() -} - -// Container for specifying an configuration when you want Amazon S3 to publish -// events to an Amazon Simple Queue Service (Amazon SQS) queue. -type QueueConfiguration struct { - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - ID *string `locationName:"Id" type:"string"` - - // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects - // events of specified type. - QueueARN *string `locationName:"Queue" type:"string" required:"true"` - - metadataQueueConfiguration `json:"-" xml:"-"` -} - -type metadataQueueConfiguration struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s QueueConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s QueueConfiguration) GoString() string { - return s.String() -} - -type QueueConfigurationDeprecated struct { - // Bucket event for which to send notifications. - Event *string `type:"string" enum:"Event"` - - Events []*string `locationName:"Event" type:"list" flattened:"true"` - - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - ID *string `locationName:"Id" type:"string"` - - Queue *string `type:"string"` - - metadataQueueConfigurationDeprecated `json:"-" xml:"-"` -} - -type metadataQueueConfigurationDeprecated struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s QueueConfigurationDeprecated) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s QueueConfigurationDeprecated) GoString() string { - return s.String() -} - -type Redirect struct { - // The HTTP redirect code to use on the response. Not required if one of the - // siblings is present. - HTTPRedirectCode *string `locationName:"HttpRedirectCode" type:"string"` - - // The host name to use in the redirect request. - HostName *string `type:"string"` - - // Protocol to use (http, https) when redirecting requests. The default is the - // protocol that is used in the original request. - Protocol *string `type:"string" enum:"Protocol"` - - // The object key prefix to use in the redirect request. For example, to redirect - // requests for all pages with prefix docs/ (objects in the docs/ folder) to - // documents/, you can set a condition block with KeyPrefixEquals set to docs/ - // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required - // if one of the siblings is present. Can be present only if ReplaceKeyWith - // is not provided. - ReplaceKeyPrefixWith *string `type:"string"` - - // The specific object key to use in the redirect request. For example, redirect - // request to error.html. Not required if one of the sibling is present. Can - // be present only if ReplaceKeyPrefixWith is not provided. - ReplaceKeyWith *string `type:"string"` - - metadataRedirect `json:"-" xml:"-"` -} - -type metadataRedirect struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s Redirect) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Redirect) GoString() string { - return s.String() -} - -type RedirectAllRequestsTo struct { - // Name of the host where requests will be redirected. - HostName *string `type:"string" required:"true"` - - // Protocol to use (http, https) when redirecting requests. The default is the - // protocol that is used in the original request. - Protocol *string `type:"string" enum:"Protocol"` - - metadataRedirectAllRequestsTo `json:"-" xml:"-"` -} - -type metadataRedirectAllRequestsTo struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s RedirectAllRequestsTo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RedirectAllRequestsTo) GoString() string { - return s.String() -} - -// Container for replication rules. You can add as many as 1,000 rules. Total -// replication configuration size can be up to 2 MB. -type ReplicationConfiguration struct { - // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating - // the objects. - Role *string `type:"string" required:"true"` - - // Container for information about a particular replication rule. Replication - // configuration must have at least one rule and can contain up to 1,000 rules. - Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` - - metadataReplicationConfiguration `json:"-" xml:"-"` -} - -type metadataReplicationConfiguration struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s ReplicationConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ReplicationConfiguration) GoString() string { - return s.String() -} - -type ReplicationRule struct { - Destination *Destination `type:"structure" required:"true"` - - // Unique identifier for the rule. The value cannot be longer than 255 characters. - ID *string `type:"string"` - - // Object keyname prefix identifying one or more objects to which the rule applies. - // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes - // are not supported. - Prefix *string `type:"string" required:"true"` - - // The rule is ignored if status is not Enabled. - Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` - - metadataReplicationRule `json:"-" xml:"-"` -} - -type metadataReplicationRule struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s ReplicationRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ReplicationRule) GoString() string { - return s.String() -} - -type RequestPaymentConfiguration struct { - // Specifies who pays for the download and request fees. - Payer *string `type:"string" required:"true" enum:"Payer"` - - metadataRequestPaymentConfiguration `json:"-" xml:"-"` -} - -type metadataRequestPaymentConfiguration struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s RequestPaymentConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RequestPaymentConfiguration) GoString() string { - return s.String() -} - -type RestoreObjectInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"` - - VersionID *string `location:"querystring" locationName:"versionId" type:"string"` - - metadataRestoreObjectInput `json:"-" xml:"-"` -} - -type metadataRestoreObjectInput struct { - SDKShapeTraits bool `type:"structure" payload:"RestoreRequest"` -} - -// String returns the string representation -func (s RestoreObjectInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RestoreObjectInput) GoString() string { - return s.String() -} - -type RestoreObjectOutput struct { - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - metadataRestoreObjectOutput `json:"-" xml:"-"` -} - -type metadataRestoreObjectOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s RestoreObjectOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RestoreObjectOutput) GoString() string { - return s.String() -} - -type RestoreRequest struct { - // Lifetime of the active copy in days - Days *int64 `type:"integer" required:"true"` - - metadataRestoreRequest `json:"-" xml:"-"` -} - -type metadataRestoreRequest struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s RestoreRequest) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RestoreRequest) GoString() string { - return s.String() -} - -type RoutingRule struct { - // A container for describing a condition that must be met for the specified - // redirect to apply. For example, 1. If request is for pages in the /docs folder, - // redirect to the /documents folder. 2. If request results in HTTP error 4xx, - // redirect request to another host where you might process the error. - Condition *Condition `type:"structure"` - - // Container for redirect information. You can redirect requests to another - // host, to another page, or with another protocol. In the event of an error, - // you can can specify a different error code to return. - Redirect *Redirect `type:"structure" required:"true"` - - metadataRoutingRule `json:"-" xml:"-"` -} - -type metadataRoutingRule struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s RoutingRule) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RoutingRule) GoString() string { - return s.String() -} - -type Tag struct { - // Name of the tag. - Key *string `type:"string" required:"true"` - - // Value of the tag. - Value *string `type:"string" required:"true"` - - metadataTag `json:"-" xml:"-"` -} - -type metadataTag struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s Tag) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Tag) GoString() string { - return s.String() -} - -type Tagging struct { - TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` - - metadataTagging `json:"-" xml:"-"` -} - -type metadataTagging struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s Tagging) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Tagging) GoString() string { - return s.String() -} - -type TargetGrant struct { - Grantee *Grantee `type:"structure"` - - // Logging permissions assigned to the Grantee for the bucket. - Permission *string `type:"string" enum:"BucketLogsPermission"` - - metadataTargetGrant `json:"-" xml:"-"` -} - -type metadataTargetGrant struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s TargetGrant) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TargetGrant) GoString() string { - return s.String() -} - -// Container for specifying the configuration when you want Amazon S3 to publish -// events to an Amazon Simple Notification Service (Amazon SNS) topic. -type TopicConfiguration struct { - Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` - - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - ID *string `locationName:"Id" type:"string"` - - // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects - // events of specified type. - TopicARN *string `locationName:"Topic" type:"string" required:"true"` - - metadataTopicConfiguration `json:"-" xml:"-"` -} - -type metadataTopicConfiguration struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s TopicConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TopicConfiguration) GoString() string { - return s.String() -} - -type TopicConfigurationDeprecated struct { - // Bucket event for which to send notifications. - Event *string `type:"string" enum:"Event"` - - Events []*string `locationName:"Event" type:"list" flattened:"true"` - - // Optional unique identifier for configurations in a notification configuration. - // If you don't provide one, Amazon S3 will assign an ID. - ID *string `locationName:"Id" type:"string"` - - // Amazon SNS topic to which Amazon S3 will publish a message to report the - // specified events for the bucket. - Topic *string `type:"string"` - - metadataTopicConfigurationDeprecated `json:"-" xml:"-"` -} - -type metadataTopicConfigurationDeprecated struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s TopicConfigurationDeprecated) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TopicConfigurationDeprecated) GoString() string { - return s.String() -} - -type Transition struct { - // Indicates at what date the object is to be moved or deleted. Should be in - // GMT ISO 8601 Format. - Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` - - // Indicates the lifetime, in days, of the objects that are subject to the rule. - // The value must be a non-zero positive integer. - Days *int64 `type:"integer"` - - // The class of storage used to store the object. - StorageClass *string `type:"string" enum:"TransitionStorageClass"` - - metadataTransition `json:"-" xml:"-"` -} - -type metadataTransition struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s Transition) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Transition) GoString() string { - return s.String() -} - -type UploadPartCopyInput struct { - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // The name of the source bucket and key name of the source object, separated - // by a slash (/). Must be URL-encoded. - CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` - - // Copies the object if its entity tag (ETag) matches the specified tag. - CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` - - // Copies the object if it has been modified since the specified time. - CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` - - // Copies the object if its entity tag (ETag) is different than the specified - // ETag. - CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` - - // Copies the object if it hasn't been modified since the specified time. - CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` - - // The range of bytes to copy from the source object. The range value must use - // the form bytes=first-last, where the first and last are the zero-based byte - // offsets to copy. For example, bytes=0-9 indicates that you want to copy the - // first ten bytes of the source. You can copy a range only if the source object - // is greater than 5 GB. - CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` - - // Specifies the algorithm to use when decrypting the source object (e.g., AES256). - CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt - // the source object. The encryption key provided in this header must be one - // that was used when the source object was created. - CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` - - Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` - - // Part number of part being copied. This is a positive integer between 1 and - // 10,000. - PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. This must be the same encryption key specified in the initiate multipart - // upload request. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Upload ID identifying the multipart upload whose part is being copied. - UploadID *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` - - metadataUploadPartCopyInput `json:"-" xml:"-"` -} - -type metadataUploadPartCopyInput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s UploadPartCopyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UploadPartCopyInput) GoString() string { - return s.String() -} - -type UploadPartCopyOutput struct { - CopyPartResult *CopyPartResult `type:"structure"` - - // The version of the source object that was copied, if you have enabled versioning - // on the source bucket. - CopySourceVersionID *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - metadataUploadPartCopyOutput `json:"-" xml:"-"` -} - -type metadataUploadPartCopyOutput struct { - SDKShapeTraits bool `type:"structure" payload:"CopyPartResult"` -} - -// String returns the string representation -func (s UploadPartCopyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UploadPartCopyOutput) GoString() string { - return s.String() -} - -type UploadPartInput struct { - Body io.ReadSeeker `type:"blob"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Size of the body in bytes. This parameter is useful when the size of the - // body cannot be determined automatically. - ContentLength *int64 `location:"header" locationName:"Content-Length" type:"integer"` - - Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` - - // Part number of part being uploaded. This is a positive integer between 1 - // and 10,000. - PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. This must be the same encryption key specified in the initiate multipart - // upload request. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Upload ID identifying the multipart upload whose part is being uploaded. - UploadID *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` - - metadataUploadPartInput `json:"-" xml:"-"` -} - -type metadataUploadPartInput struct { - SDKShapeTraits bool `type:"structure" payload:"Body"` -} - -// String returns the string representation -func (s UploadPartInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UploadPartInput) GoString() string { - return s.String() -} - -type UploadPartOutput struct { - // Entity tag for the uploaded object. - ETag *string `location:"header" locationName:"ETag" type:"string"` - - // If present, indicates that the requester was successfully charged for the - // request. - RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header confirming the encryption algorithm - // used. - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // If server-side encryption with a customer-provided encryption key was requested, - // the response will include this header to provide round trip message integrity - // verification of the customer-provided encryption key. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // If present, specifies the ID of the AWS Key Management Service (KMS) master - // encryption key that was used for the object. - SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` - - metadataUploadPartOutput `json:"-" xml:"-"` -} - -type metadataUploadPartOutput struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s UploadPartOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s UploadPartOutput) GoString() string { - return s.String() -} - -type VersioningConfiguration struct { - // Specifies whether MFA delete is enabled in the bucket versioning configuration. - // This element is only returned if the bucket has been configured with MFA - // delete. If the bucket has never been so configured, this element is not returned. - MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` - - // The versioning state of the bucket. - Status *string `type:"string" enum:"BucketVersioningStatus"` - - metadataVersioningConfiguration `json:"-" xml:"-"` -} - -type metadataVersioningConfiguration struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s VersioningConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s VersioningConfiguration) GoString() string { - return s.String() -} - -type WebsiteConfiguration struct { - ErrorDocument *ErrorDocument `type:"structure"` - - IndexDocument *IndexDocument `type:"structure"` - - RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` - - RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` - - metadataWebsiteConfiguration `json:"-" xml:"-"` -} - -type metadataWebsiteConfiguration struct { - SDKShapeTraits bool `type:"structure"` -} - -// String returns the string representation -func (s WebsiteConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s WebsiteConfiguration) GoString() string { - return s.String() -} - -const ( - // @enum BucketCannedACL - BucketCannedACLPrivate = "private" - // @enum BucketCannedACL - BucketCannedACLPublicRead = "public-read" - // @enum BucketCannedACL - BucketCannedACLPublicReadWrite = "public-read-write" - // @enum BucketCannedACL - BucketCannedACLAuthenticatedRead = "authenticated-read" -) - -const ( - // @enum BucketLocationConstraint - BucketLocationConstraintEu = "EU" - // @enum BucketLocationConstraint - BucketLocationConstraintEuWest1 = "eu-west-1" - // @enum BucketLocationConstraint - BucketLocationConstraintUsWest1 = "us-west-1" - // @enum BucketLocationConstraint - BucketLocationConstraintUsWest2 = "us-west-2" - // @enum BucketLocationConstraint - BucketLocationConstraintApSoutheast1 = "ap-southeast-1" - // @enum BucketLocationConstraint - BucketLocationConstraintApSoutheast2 = "ap-southeast-2" - // @enum BucketLocationConstraint - BucketLocationConstraintApNortheast1 = "ap-northeast-1" - // @enum BucketLocationConstraint - BucketLocationConstraintSaEast1 = "sa-east-1" - // @enum BucketLocationConstraint - BucketLocationConstraintCnNorth1 = "cn-north-1" - // @enum BucketLocationConstraint - BucketLocationConstraintEuCentral1 = "eu-central-1" -) - -const ( - // @enum BucketLogsPermission - BucketLogsPermissionFullControl = "FULL_CONTROL" - // @enum BucketLogsPermission - BucketLogsPermissionRead = "READ" - // @enum BucketLogsPermission - BucketLogsPermissionWrite = "WRITE" -) - -const ( - // @enum BucketVersioningStatus - BucketVersioningStatusEnabled = "Enabled" - // @enum BucketVersioningStatus - BucketVersioningStatusSuspended = "Suspended" -) - -// Requests Amazon S3 to encode the object keys in the response and specifies -// the encoding method to use. An object key may contain any Unicode character; -// however, XML 1.0 parser cannot parse some characters, such as characters -// with an ASCII value from 0 to 10. For characters that are not supported in -// XML 1.0, you can add this parameter to request that Amazon S3 encode the -// keys in the response. -const ( - // @enum EncodingType - EncodingTypeUrl = "url" -) - -// Bucket event for which to send notifications. -const ( - // @enum Event - EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" - // @enum Event - EventS3ObjectCreated = "s3:ObjectCreated:*" - // @enum Event - EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" - // @enum Event - EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" - // @enum Event - EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" - // @enum Event - EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" - // @enum Event - EventS3ObjectRemoved = "s3:ObjectRemoved:*" - // @enum Event - EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" - // @enum Event - EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" -) - -const ( - // @enum ExpirationStatus - ExpirationStatusEnabled = "Enabled" - // @enum ExpirationStatus - ExpirationStatusDisabled = "Disabled" -) - -const ( - // @enum MFADelete - MFADeleteEnabled = "Enabled" - // @enum MFADelete - MFADeleteDisabled = "Disabled" -) - -const ( - // @enum MFADeleteStatus - MFADeleteStatusEnabled = "Enabled" - // @enum MFADeleteStatus - MFADeleteStatusDisabled = "Disabled" -) - -const ( - // @enum MetadataDirective - MetadataDirectiveCopy = "COPY" - // @enum MetadataDirective - MetadataDirectiveReplace = "REPLACE" -) - -const ( - // @enum ObjectCannedACL - ObjectCannedACLPrivate = "private" - // @enum ObjectCannedACL - ObjectCannedACLPublicRead = "public-read" - // @enum ObjectCannedACL - ObjectCannedACLPublicReadWrite = "public-read-write" - // @enum ObjectCannedACL - ObjectCannedACLAuthenticatedRead = "authenticated-read" - // @enum ObjectCannedACL - ObjectCannedACLBucketOwnerRead = "bucket-owner-read" - // @enum ObjectCannedACL - ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" -) - -const ( - // @enum ObjectStorageClass - ObjectStorageClassStandard = "STANDARD" - // @enum ObjectStorageClass - ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" - // @enum ObjectStorageClass - ObjectStorageClassGlacier = "GLACIER" -) - -const ( - // @enum ObjectVersionStorageClass - ObjectVersionStorageClassStandard = "STANDARD" -) - -const ( - // @enum Payer - PayerRequester = "Requester" - // @enum Payer - PayerBucketOwner = "BucketOwner" -) - -const ( - // @enum Permission - PermissionFullControl = "FULL_CONTROL" - // @enum Permission - PermissionWrite = "WRITE" - // @enum Permission - PermissionWriteAcp = "WRITE_ACP" - // @enum Permission - PermissionRead = "READ" - // @enum Permission - PermissionReadAcp = "READ_ACP" -) - -const ( - // @enum Protocol - ProtocolHttp = "http" - // @enum Protocol - ProtocolHttps = "https" -) - -const ( - // @enum ReplicationRuleStatus - ReplicationRuleStatusEnabled = "Enabled" - // @enum ReplicationRuleStatus - ReplicationRuleStatusDisabled = "Disabled" -) - -const ( - // @enum ReplicationStatus - ReplicationStatusComplete = "COMPLETE" - // @enum ReplicationStatus - ReplicationStatusPending = "PENDING" - // @enum ReplicationStatus - ReplicationStatusFailed = "FAILED" - // @enum ReplicationStatus - ReplicationStatusReplica = "REPLICA" -) - -// If present, indicates that the requester was successfully charged for the -// request. -const ( - // @enum RequestCharged - RequestChargedRequester = "requester" -) - -// Confirms that the requester knows that she or he will be charged for the -// request. Bucket owners need not specify this parameter in their requests. -// Documentation on downloading objects from requester pays buckets can be found -// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html -const ( - // @enum RequestPayer - RequestPayerRequester = "requester" -) - -const ( - // @enum ServerSideEncryption - ServerSideEncryptionAes256 = "AES256" - // @enum ServerSideEncryption - ServerSideEncryptionAwsKms = "aws:kms" -) - -const ( - // @enum StorageClass - StorageClassStandard = "STANDARD" - // @enum StorageClass - StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" - // @enum StorageClass - StorageClassLt = "LT" -) - -const ( - // @enum TransitionStorageClass - TransitionStorageClassGlacier = "GLACIER" -) - -const ( - // @enum Type - TypeCanonicalUser = "CanonicalUser" - // @enum Type - TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" - // @enum Type - TypeGroup = "Group" -) diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location.go deleted file mode 100644 index 6194333..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location.go +++ /dev/null @@ -1,42 +0,0 @@ -package s3 - -import ( - "io/ioutil" - "regexp" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) - -func buildGetBucketLocation(r *aws.Request) { - if r.DataFilled() { - out := r.Data.(*GetBucketLocationOutput) - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", "failed reading response body", err) - return - } - - match := reBucketLocation.FindSubmatch(b) - if len(match) > 1 { - loc := string(match[1]) - out.LocationConstraint = &loc - } - } -} - -func populateLocationConstraint(r *aws.Request) { - if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { - in := r.Params.(*CreateBucketInput) - if in.CreateBucketConfiguration == nil { - r.Params = awsutil.CopyOf(r.Params) - in = r.Params.(*CreateBucketInput) - in.CreateBucketConfiguration = &CreateBucketConfiguration{ - LocationConstraint: r.Config.Region, - } - } - } -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go deleted file mode 100644 index 3123b7f..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/bucket_location_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package s3_test - -import ( - "bytes" - "io/ioutil" - "net/http" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/internal/test/unit" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/stretchr/testify/assert" -) - -var _ = unit.Imported -var s3LocationTests = []struct { - body string - loc string -}{ - {``, ``}, - {`EU`, `EU`}, -} - -func TestGetBucketLocation(t *testing.T) { - for _, test := range s3LocationTests { - s := s3.New(nil) - s.Handlers.Send.Clear() - s.Handlers.Send.PushBack(func(r *aws.Request) { - reader := ioutil.NopCloser(bytes.NewReader([]byte(test.body))) - r.HTTPResponse = &http.Response{StatusCode: 200, Body: reader} - }) - - resp, err := s.GetBucketLocation(&s3.GetBucketLocationInput{Bucket: aws.String("bucket")}) - assert.NoError(t, err) - if test.loc == "" { - assert.Nil(t, resp.LocationConstraint) - } else { - assert.Equal(t, test.loc, *resp.LocationConstraint) - } - } -} - -func TestPopulateLocationConstraint(t *testing.T) { - s := s3.New(nil) - in := &s3.CreateBucketInput{ - Bucket: aws.String("bucket"), - } - req, _ := s.CreateBucketRequest(in) - err := req.Build() - assert.NoError(t, err) - assert.Equal(t, "mock-region", awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint")[0]) - assert.Nil(t, in.CreateBucketConfiguration) // don't modify original params -} - -func TestNoPopulateLocationConstraintIfProvided(t *testing.T) { - s := s3.New(nil) - req, _ := s.CreateBucketRequest(&s3.CreateBucketInput{ - Bucket: aws.String("bucket"), - CreateBucketConfiguration: &s3.CreateBucketConfiguration{}, - }) - err := req.Build() - assert.NoError(t, err) - assert.Equal(t, 0, len(awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint"))) -} - -func TestNoPopulateLocationConstraintIfClassic(t *testing.T) { - s := s3.New(&aws.Config{Region: aws.String("us-east-1")}) - req, _ := s.CreateBucketRequest(&s3.CreateBucketInput{ - Bucket: aws.String("bucket"), - }) - err := req.Build() - assert.NoError(t, err) - assert.Equal(t, 0, len(awsutil.ValuesAtPath(req.Params, "CreateBucketConfiguration.LocationConstraint"))) -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations.go deleted file mode 100644 index 490ff2d..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations.go +++ /dev/null @@ -1,32 +0,0 @@ -package s3 - -import "github.com/aws/aws-sdk-go/aws" - -func init() { - initService = func(s *aws.Service) { - // Support building custom host-style bucket endpoints - s.Handlers.Build.PushFront(updateHostWithBucket) - - // Require SSL when using SSE keys - s.Handlers.Validate.PushBack(validateSSERequiresSSL) - s.Handlers.Build.PushBack(computeSSEKeys) - - // S3 uses custom error unmarshaling logic - s.Handlers.UnmarshalError.Clear() - s.Handlers.UnmarshalError.PushBack(unmarshalError) - } - - initRequest = func(r *aws.Request) { - switch r.Operation.Name { - case opPutBucketCORS, opPutBucketLifecycle, opPutBucketPolicy, opPutBucketTagging, opDeleteObjects: - // These S3 operations require Content-MD5 to be set - r.Handlers.Build.PushBack(contentMD5) - case opGetBucketLocation: - // GetBucketLocation has custom parsing logic - r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) - case opCreateBucket: - // Auto-populate LocationConstraint with current region - r.Handlers.Validate.PushFront(populateLocationConstraint) - } - } -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations_test.go deleted file mode 100644 index 523b487..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/customizations_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package s3_test - -import ( - "crypto/md5" - "encoding/base64" - "io/ioutil" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/internal/test/unit" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/stretchr/testify/assert" -) - -var _ = unit.Imported - -func assertMD5(t *testing.T, req *aws.Request) { - err := req.Build() - assert.NoError(t, err) - - b, _ := ioutil.ReadAll(req.HTTPRequest.Body) - out := md5.Sum(b) - assert.NotEmpty(t, b) - assert.Equal(t, base64.StdEncoding.EncodeToString(out[:]), req.HTTPRequest.Header.Get("Content-MD5")) -} - -func TestMD5InPutBucketCORS(t *testing.T) { - svc := s3.New(nil) - req, _ := svc.PutBucketCORSRequest(&s3.PutBucketCORSInput{ - Bucket: aws.String("bucketname"), - CORSConfiguration: &s3.CORSConfiguration{ - CORSRules: []*s3.CORSRule{ - {AllowedMethods: []*string{aws.String("GET")}}, - }, - }, - }) - assertMD5(t, req) -} - -func TestMD5InPutBucketLifecycle(t *testing.T) { - svc := s3.New(nil) - req, _ := svc.PutBucketLifecycleRequest(&s3.PutBucketLifecycleInput{ - Bucket: aws.String("bucketname"), - LifecycleConfiguration: &s3.LifecycleConfiguration{ - Rules: []*s3.LifecycleRule{ - { - ID: aws.String("ID"), - Prefix: aws.String("Prefix"), - Status: aws.String("Enabled"), - }, - }, - }, - }) - assertMD5(t, req) -} - -func TestMD5InPutBucketPolicy(t *testing.T) { - svc := s3.New(nil) - req, _ := svc.PutBucketPolicyRequest(&s3.PutBucketPolicyInput{ - Bucket: aws.String("bucketname"), - Policy: aws.String("{}"), - }) - assertMD5(t, req) -} - -func TestMD5InPutBucketTagging(t *testing.T) { - svc := s3.New(nil) - req, _ := svc.PutBucketTaggingRequest(&s3.PutBucketTaggingInput{ - Bucket: aws.String("bucketname"), - Tagging: &s3.Tagging{ - TagSet: []*s3.Tag{ - {Key: aws.String("KEY"), Value: aws.String("VALUE")}, - }, - }, - }) - assertMD5(t, req) -} - -func TestMD5InDeleteObjects(t *testing.T) { - svc := s3.New(nil) - req, _ := svc.DeleteObjectsRequest(&s3.DeleteObjectsInput{ - Bucket: aws.String("bucketname"), - Delete: &s3.Delete{ - Objects: []*s3.ObjectIdentifier{ - {Key: aws.String("key")}, - }, - }, - }) - assertMD5(t, req) -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/examples_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/examples_test.go deleted file mode 100644 index fc08a44..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/examples_test.go +++ /dev/null @@ -1,1928 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. - -package s3_test - -import ( - "bytes" - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/service/s3" -) - -var _ time.Duration -var _ bytes.Buffer - -func ExampleS3_AbortMultipartUpload() { - svc := s3.New(nil) - - params := &s3.AbortMultipartUploadInput{ - Bucket: aws.String("BucketName"), // Required - Key: aws.String("ObjectKey"), // Required - UploadID: aws.String("MultipartUploadId"), // Required - RequestPayer: aws.String("RequestPayer"), - } - resp, err := svc.AbortMultipartUpload(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_CompleteMultipartUpload() { - svc := s3.New(nil) - - params := &s3.CompleteMultipartUploadInput{ - Bucket: aws.String("BucketName"), // Required - Key: aws.String("ObjectKey"), // Required - UploadID: aws.String("MultipartUploadId"), // Required - MultipartUpload: &s3.CompletedMultipartUpload{ - Parts: []*s3.CompletedPart{ - { // Required - ETag: aws.String("ETag"), - PartNumber: aws.Int64(1), - }, - // More values... - }, - }, - RequestPayer: aws.String("RequestPayer"), - } - resp, err := svc.CompleteMultipartUpload(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_CopyObject() { - svc := s3.New(nil) - - params := &s3.CopyObjectInput{ - Bucket: aws.String("BucketName"), // Required - CopySource: aws.String("CopySource"), // Required - Key: aws.String("ObjectKey"), // Required - ACL: aws.String("ObjectCannedACL"), - CacheControl: aws.String("CacheControl"), - ContentDisposition: aws.String("ContentDisposition"), - ContentEncoding: aws.String("ContentEncoding"), - ContentLanguage: aws.String("ContentLanguage"), - ContentType: aws.String("ContentType"), - CopySourceIfMatch: aws.String("CopySourceIfMatch"), - CopySourceIfModifiedSince: aws.Time(time.Now()), - CopySourceIfNoneMatch: aws.String("CopySourceIfNoneMatch"), - CopySourceIfUnmodifiedSince: aws.Time(time.Now()), - CopySourceSSECustomerAlgorithm: aws.String("CopySourceSSECustomerAlgorithm"), - CopySourceSSECustomerKey: aws.String("CopySourceSSECustomerKey"), - CopySourceSSECustomerKeyMD5: aws.String("CopySourceSSECustomerKeyMD5"), - Expires: aws.Time(time.Now()), - GrantFullControl: aws.String("GrantFullControl"), - GrantRead: aws.String("GrantRead"), - GrantReadACP: aws.String("GrantReadACP"), - GrantWriteACP: aws.String("GrantWriteACP"), - Metadata: map[string]*string{ - "Key": aws.String("MetadataValue"), // Required - // More values... - }, - MetadataDirective: aws.String("MetadataDirective"), - RequestPayer: aws.String("RequestPayer"), - SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), - SSECustomerKey: aws.String("SSECustomerKey"), - SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), - SSEKMSKeyID: aws.String("SSEKMSKeyId"), - ServerSideEncryption: aws.String("ServerSideEncryption"), - StorageClass: aws.String("StorageClass"), - WebsiteRedirectLocation: aws.String("WebsiteRedirectLocation"), - } - resp, err := svc.CopyObject(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_CreateBucket() { - svc := s3.New(nil) - - params := &s3.CreateBucketInput{ - Bucket: aws.String("BucketName"), // Required - ACL: aws.String("BucketCannedACL"), - CreateBucketConfiguration: &s3.CreateBucketConfiguration{ - LocationConstraint: aws.String("BucketLocationConstraint"), - }, - GrantFullControl: aws.String("GrantFullControl"), - GrantRead: aws.String("GrantRead"), - GrantReadACP: aws.String("GrantReadACP"), - GrantWrite: aws.String("GrantWrite"), - GrantWriteACP: aws.String("GrantWriteACP"), - } - resp, err := svc.CreateBucket(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_CreateMultipartUpload() { - svc := s3.New(nil) - - params := &s3.CreateMultipartUploadInput{ - Bucket: aws.String("BucketName"), // Required - Key: aws.String("ObjectKey"), // Required - ACL: aws.String("ObjectCannedACL"), - CacheControl: aws.String("CacheControl"), - ContentDisposition: aws.String("ContentDisposition"), - ContentEncoding: aws.String("ContentEncoding"), - ContentLanguage: aws.String("ContentLanguage"), - ContentType: aws.String("ContentType"), - Expires: aws.Time(time.Now()), - GrantFullControl: aws.String("GrantFullControl"), - GrantRead: aws.String("GrantRead"), - GrantReadACP: aws.String("GrantReadACP"), - GrantWriteACP: aws.String("GrantWriteACP"), - Metadata: map[string]*string{ - "Key": aws.String("MetadataValue"), // Required - // More values... - }, - RequestPayer: aws.String("RequestPayer"), - SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), - SSECustomerKey: aws.String("SSECustomerKey"), - SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), - SSEKMSKeyID: aws.String("SSEKMSKeyId"), - ServerSideEncryption: aws.String("ServerSideEncryption"), - StorageClass: aws.String("StorageClass"), - WebsiteRedirectLocation: aws.String("WebsiteRedirectLocation"), - } - resp, err := svc.CreateMultipartUpload(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_DeleteBucket() { - svc := s3.New(nil) - - params := &s3.DeleteBucketInput{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.DeleteBucket(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_DeleteBucketCORS() { - svc := s3.New(nil) - - params := &s3.DeleteBucketCORSInput{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.DeleteBucketCORS(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_DeleteBucketLifecycle() { - svc := s3.New(nil) - - params := &s3.DeleteBucketLifecycleInput{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.DeleteBucketLifecycle(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_DeleteBucketPolicy() { - svc := s3.New(nil) - - params := &s3.DeleteBucketPolicyInput{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.DeleteBucketPolicy(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_DeleteBucketReplication() { - svc := s3.New(nil) - - params := &s3.DeleteBucketReplicationInput{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.DeleteBucketReplication(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_DeleteBucketTagging() { - svc := s3.New(nil) - - params := &s3.DeleteBucketTaggingInput{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.DeleteBucketTagging(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_DeleteBucketWebsite() { - svc := s3.New(nil) - - params := &s3.DeleteBucketWebsiteInput{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.DeleteBucketWebsite(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_DeleteObject() { - svc := s3.New(nil) - - params := &s3.DeleteObjectInput{ - Bucket: aws.String("BucketName"), // Required - Key: aws.String("ObjectKey"), // Required - MFA: aws.String("MFA"), - RequestPayer: aws.String("RequestPayer"), - VersionID: aws.String("ObjectVersionId"), - } - resp, err := svc.DeleteObject(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_DeleteObjects() { - svc := s3.New(nil) - - params := &s3.DeleteObjectsInput{ - Bucket: aws.String("BucketName"), // Required - Delete: &s3.Delete{ // Required - Objects: []*s3.ObjectIdentifier{ // Required - { // Required - Key: aws.String("ObjectKey"), // Required - VersionID: aws.String("ObjectVersionId"), - }, - // More values... - }, - Quiet: aws.Bool(true), - }, - MFA: aws.String("MFA"), - RequestPayer: aws.String("RequestPayer"), - } - resp, err := svc.DeleteObjects(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_GetBucketACL() { - svc := s3.New(nil) - - params := &s3.GetBucketACLInput{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.GetBucketACL(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_GetBucketCORS() { - svc := s3.New(nil) - - params := &s3.GetBucketCORSInput{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.GetBucketCORS(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_GetBucketLifecycle() { - svc := s3.New(nil) - - params := &s3.GetBucketLifecycleInput{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.GetBucketLifecycle(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_GetBucketLocation() { - svc := s3.New(nil) - - params := &s3.GetBucketLocationInput{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.GetBucketLocation(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_GetBucketLogging() { - svc := s3.New(nil) - - params := &s3.GetBucketLoggingInput{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.GetBucketLogging(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_GetBucketNotification() { - svc := s3.New(nil) - - params := &s3.GetBucketNotificationConfigurationRequest{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.GetBucketNotification(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_GetBucketNotificationConfiguration() { - svc := s3.New(nil) - - params := &s3.GetBucketNotificationConfigurationRequest{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.GetBucketNotificationConfiguration(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_GetBucketPolicy() { - svc := s3.New(nil) - - params := &s3.GetBucketPolicyInput{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.GetBucketPolicy(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_GetBucketReplication() { - svc := s3.New(nil) - - params := &s3.GetBucketReplicationInput{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.GetBucketReplication(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_GetBucketRequestPayment() { - svc := s3.New(nil) - - params := &s3.GetBucketRequestPaymentInput{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.GetBucketRequestPayment(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_GetBucketTagging() { - svc := s3.New(nil) - - params := &s3.GetBucketTaggingInput{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.GetBucketTagging(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_GetBucketVersioning() { - svc := s3.New(nil) - - params := &s3.GetBucketVersioningInput{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.GetBucketVersioning(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_GetBucketWebsite() { - svc := s3.New(nil) - - params := &s3.GetBucketWebsiteInput{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.GetBucketWebsite(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_GetObject() { - svc := s3.New(nil) - - params := &s3.GetObjectInput{ - Bucket: aws.String("BucketName"), // Required - Key: aws.String("ObjectKey"), // Required - IfMatch: aws.String("IfMatch"), - IfModifiedSince: aws.Time(time.Now()), - IfNoneMatch: aws.String("IfNoneMatch"), - IfUnmodifiedSince: aws.Time(time.Now()), - Range: aws.String("Range"), - RequestPayer: aws.String("RequestPayer"), - ResponseCacheControl: aws.String("ResponseCacheControl"), - ResponseContentDisposition: aws.String("ResponseContentDisposition"), - ResponseContentEncoding: aws.String("ResponseContentEncoding"), - ResponseContentLanguage: aws.String("ResponseContentLanguage"), - ResponseContentType: aws.String("ResponseContentType"), - ResponseExpires: aws.Time(time.Now()), - SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), - SSECustomerKey: aws.String("SSECustomerKey"), - SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), - VersionID: aws.String("ObjectVersionId"), - } - resp, err := svc.GetObject(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_GetObjectACL() { - svc := s3.New(nil) - - params := &s3.GetObjectACLInput{ - Bucket: aws.String("BucketName"), // Required - Key: aws.String("ObjectKey"), // Required - RequestPayer: aws.String("RequestPayer"), - VersionID: aws.String("ObjectVersionId"), - } - resp, err := svc.GetObjectACL(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_GetObjectTorrent() { - svc := s3.New(nil) - - params := &s3.GetObjectTorrentInput{ - Bucket: aws.String("BucketName"), // Required - Key: aws.String("ObjectKey"), // Required - RequestPayer: aws.String("RequestPayer"), - } - resp, err := svc.GetObjectTorrent(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_HeadBucket() { - svc := s3.New(nil) - - params := &s3.HeadBucketInput{ - Bucket: aws.String("BucketName"), // Required - } - resp, err := svc.HeadBucket(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_HeadObject() { - svc := s3.New(nil) - - params := &s3.HeadObjectInput{ - Bucket: aws.String("BucketName"), // Required - Key: aws.String("ObjectKey"), // Required - IfMatch: aws.String("IfMatch"), - IfModifiedSince: aws.Time(time.Now()), - IfNoneMatch: aws.String("IfNoneMatch"), - IfUnmodifiedSince: aws.Time(time.Now()), - Range: aws.String("Range"), - RequestPayer: aws.String("RequestPayer"), - SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), - SSECustomerKey: aws.String("SSECustomerKey"), - SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), - VersionID: aws.String("ObjectVersionId"), - } - resp, err := svc.HeadObject(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_ListBuckets() { - svc := s3.New(nil) - - var params *s3.ListBucketsInput - resp, err := svc.ListBuckets(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_ListMultipartUploads() { - svc := s3.New(nil) - - params := &s3.ListMultipartUploadsInput{ - Bucket: aws.String("BucketName"), // Required - Delimiter: aws.String("Delimiter"), - EncodingType: aws.String("EncodingType"), - KeyMarker: aws.String("KeyMarker"), - MaxUploads: aws.Int64(1), - Prefix: aws.String("Prefix"), - UploadIDMarker: aws.String("UploadIdMarker"), - } - resp, err := svc.ListMultipartUploads(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_ListObjectVersions() { - svc := s3.New(nil) - - params := &s3.ListObjectVersionsInput{ - Bucket: aws.String("BucketName"), // Required - Delimiter: aws.String("Delimiter"), - EncodingType: aws.String("EncodingType"), - KeyMarker: aws.String("KeyMarker"), - MaxKeys: aws.Int64(1), - Prefix: aws.String("Prefix"), - VersionIDMarker: aws.String("VersionIdMarker"), - } - resp, err := svc.ListObjectVersions(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_ListObjects() { - svc := s3.New(nil) - - params := &s3.ListObjectsInput{ - Bucket: aws.String("BucketName"), // Required - Delimiter: aws.String("Delimiter"), - EncodingType: aws.String("EncodingType"), - Marker: aws.String("Marker"), - MaxKeys: aws.Int64(1), - Prefix: aws.String("Prefix"), - } - resp, err := svc.ListObjects(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_ListParts() { - svc := s3.New(nil) - - params := &s3.ListPartsInput{ - Bucket: aws.String("BucketName"), // Required - Key: aws.String("ObjectKey"), // Required - UploadID: aws.String("MultipartUploadId"), // Required - MaxParts: aws.Int64(1), - PartNumberMarker: aws.Int64(1), - RequestPayer: aws.String("RequestPayer"), - } - resp, err := svc.ListParts(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_PutBucketACL() { - svc := s3.New(nil) - - params := &s3.PutBucketACLInput{ - Bucket: aws.String("BucketName"), // Required - ACL: aws.String("BucketCannedACL"), - AccessControlPolicy: &s3.AccessControlPolicy{ - Grants: []*s3.Grant{ - { // Required - Grantee: &s3.Grantee{ - Type: aws.String("Type"), // Required - DisplayName: aws.String("DisplayName"), - EmailAddress: aws.String("EmailAddress"), - ID: aws.String("ID"), - URI: aws.String("URI"), - }, - Permission: aws.String("Permission"), - }, - // More values... - }, - Owner: &s3.Owner{ - DisplayName: aws.String("DisplayName"), - ID: aws.String("ID"), - }, - }, - GrantFullControl: aws.String("GrantFullControl"), - GrantRead: aws.String("GrantRead"), - GrantReadACP: aws.String("GrantReadACP"), - GrantWrite: aws.String("GrantWrite"), - GrantWriteACP: aws.String("GrantWriteACP"), - } - resp, err := svc.PutBucketACL(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_PutBucketCORS() { - svc := s3.New(nil) - - params := &s3.PutBucketCORSInput{ - Bucket: aws.String("BucketName"), // Required - CORSConfiguration: &s3.CORSConfiguration{ - CORSRules: []*s3.CORSRule{ - { // Required - AllowedHeaders: []*string{ - aws.String("AllowedHeader"), // Required - // More values... - }, - AllowedMethods: []*string{ - aws.String("AllowedMethod"), // Required - // More values... - }, - AllowedOrigins: []*string{ - aws.String("AllowedOrigin"), // Required - // More values... - }, - ExposeHeaders: []*string{ - aws.String("ExposeHeader"), // Required - // More values... - }, - MaxAgeSeconds: aws.Int64(1), - }, - // More values... - }, - }, - } - resp, err := svc.PutBucketCORS(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_PutBucketLifecycle() { - svc := s3.New(nil) - - params := &s3.PutBucketLifecycleInput{ - Bucket: aws.String("BucketName"), // Required - LifecycleConfiguration: &s3.LifecycleConfiguration{ - Rules: []*s3.LifecycleRule{ // Required - { // Required - Prefix: aws.String("Prefix"), // Required - Status: aws.String("ExpirationStatus"), // Required - Expiration: &s3.LifecycleExpiration{ - Date: aws.Time(time.Now()), - Days: aws.Int64(1), - }, - ID: aws.String("ID"), - NoncurrentVersionExpiration: &s3.NoncurrentVersionExpiration{ - NoncurrentDays: aws.Int64(1), - }, - NoncurrentVersionTransition: &s3.NoncurrentVersionTransition{ - NoncurrentDays: aws.Int64(1), - StorageClass: aws.String("TransitionStorageClass"), - }, - Transition: &s3.Transition{ - Date: aws.Time(time.Now()), - Days: aws.Int64(1), - StorageClass: aws.String("TransitionStorageClass"), - }, - }, - // More values... - }, - }, - } - resp, err := svc.PutBucketLifecycle(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_PutBucketLogging() { - svc := s3.New(nil) - - params := &s3.PutBucketLoggingInput{ - Bucket: aws.String("BucketName"), // Required - BucketLoggingStatus: &s3.BucketLoggingStatus{ // Required - LoggingEnabled: &s3.LoggingEnabled{ - TargetBucket: aws.String("TargetBucket"), - TargetGrants: []*s3.TargetGrant{ - { // Required - Grantee: &s3.Grantee{ - Type: aws.String("Type"), // Required - DisplayName: aws.String("DisplayName"), - EmailAddress: aws.String("EmailAddress"), - ID: aws.String("ID"), - URI: aws.String("URI"), - }, - Permission: aws.String("BucketLogsPermission"), - }, - // More values... - }, - TargetPrefix: aws.String("TargetPrefix"), - }, - }, - } - resp, err := svc.PutBucketLogging(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_PutBucketNotification() { - svc := s3.New(nil) - - params := &s3.PutBucketNotificationInput{ - Bucket: aws.String("BucketName"), // Required - NotificationConfiguration: &s3.NotificationConfigurationDeprecated{ // Required - CloudFunctionConfiguration: &s3.CloudFunctionConfiguration{ - CloudFunction: aws.String("CloudFunction"), - Event: aws.String("Event"), - Events: []*string{ - aws.String("Event"), // Required - // More values... - }, - ID: aws.String("NotificationId"), - InvocationRole: aws.String("CloudFunctionInvocationRole"), - }, - QueueConfiguration: &s3.QueueConfigurationDeprecated{ - Event: aws.String("Event"), - Events: []*string{ - aws.String("Event"), // Required - // More values... - }, - ID: aws.String("NotificationId"), - Queue: aws.String("QueueArn"), - }, - TopicConfiguration: &s3.TopicConfigurationDeprecated{ - Event: aws.String("Event"), - Events: []*string{ - aws.String("Event"), // Required - // More values... - }, - ID: aws.String("NotificationId"), - Topic: aws.String("TopicArn"), - }, - }, - } - resp, err := svc.PutBucketNotification(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_PutBucketNotificationConfiguration() { - svc := s3.New(nil) - - params := &s3.PutBucketNotificationConfigurationInput{ - Bucket: aws.String("BucketName"), // Required - NotificationConfiguration: &s3.NotificationConfiguration{ // Required - LambdaFunctionConfigurations: []*s3.LambdaFunctionConfiguration{ - { // Required - Events: []*string{ // Required - aws.String("Event"), // Required - // More values... - }, - LambdaFunctionARN: aws.String("LambdaFunctionArn"), // Required - ID: aws.String("NotificationId"), - }, - // More values... - }, - QueueConfigurations: []*s3.QueueConfiguration{ - { // Required - Events: []*string{ // Required - aws.String("Event"), // Required - // More values... - }, - QueueARN: aws.String("QueueArn"), // Required - ID: aws.String("NotificationId"), - }, - // More values... - }, - TopicConfigurations: []*s3.TopicConfiguration{ - { // Required - Events: []*string{ // Required - aws.String("Event"), // Required - // More values... - }, - TopicARN: aws.String("TopicArn"), // Required - ID: aws.String("NotificationId"), - }, - // More values... - }, - }, - } - resp, err := svc.PutBucketNotificationConfiguration(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_PutBucketPolicy() { - svc := s3.New(nil) - - params := &s3.PutBucketPolicyInput{ - Bucket: aws.String("BucketName"), // Required - Policy: aws.String("Policy"), // Required - } - resp, err := svc.PutBucketPolicy(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_PutBucketReplication() { - svc := s3.New(nil) - - params := &s3.PutBucketReplicationInput{ - Bucket: aws.String("BucketName"), // Required - ReplicationConfiguration: &s3.ReplicationConfiguration{ // Required - Role: aws.String("Role"), // Required - Rules: []*s3.ReplicationRule{ // Required - { // Required - Destination: &s3.Destination{ // Required - Bucket: aws.String("BucketName"), // Required - }, - Prefix: aws.String("Prefix"), // Required - Status: aws.String("ReplicationRuleStatus"), // Required - ID: aws.String("ID"), - }, - // More values... - }, - }, - } - resp, err := svc.PutBucketReplication(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_PutBucketRequestPayment() { - svc := s3.New(nil) - - params := &s3.PutBucketRequestPaymentInput{ - Bucket: aws.String("BucketName"), // Required - RequestPaymentConfiguration: &s3.RequestPaymentConfiguration{ // Required - Payer: aws.String("Payer"), // Required - }, - } - resp, err := svc.PutBucketRequestPayment(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_PutBucketTagging() { - svc := s3.New(nil) - - params := &s3.PutBucketTaggingInput{ - Bucket: aws.String("BucketName"), // Required - Tagging: &s3.Tagging{ // Required - TagSet: []*s3.Tag{ // Required - { // Required - Key: aws.String("ObjectKey"), // Required - Value: aws.String("Value"), // Required - }, - // More values... - }, - }, - } - resp, err := svc.PutBucketTagging(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_PutBucketVersioning() { - svc := s3.New(nil) - - params := &s3.PutBucketVersioningInput{ - Bucket: aws.String("BucketName"), // Required - VersioningConfiguration: &s3.VersioningConfiguration{ // Required - MFADelete: aws.String("MFADelete"), - Status: aws.String("BucketVersioningStatus"), - }, - MFA: aws.String("MFA"), - } - resp, err := svc.PutBucketVersioning(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_PutBucketWebsite() { - svc := s3.New(nil) - - params := &s3.PutBucketWebsiteInput{ - Bucket: aws.String("BucketName"), // Required - WebsiteConfiguration: &s3.WebsiteConfiguration{ // Required - ErrorDocument: &s3.ErrorDocument{ - Key: aws.String("ObjectKey"), // Required - }, - IndexDocument: &s3.IndexDocument{ - Suffix: aws.String("Suffix"), // Required - }, - RedirectAllRequestsTo: &s3.RedirectAllRequestsTo{ - HostName: aws.String("HostName"), // Required - Protocol: aws.String("Protocol"), - }, - RoutingRules: []*s3.RoutingRule{ - { // Required - Redirect: &s3.Redirect{ // Required - HTTPRedirectCode: aws.String("HttpRedirectCode"), - HostName: aws.String("HostName"), - Protocol: aws.String("Protocol"), - ReplaceKeyPrefixWith: aws.String("ReplaceKeyPrefixWith"), - ReplaceKeyWith: aws.String("ReplaceKeyWith"), - }, - Condition: &s3.Condition{ - HTTPErrorCodeReturnedEquals: aws.String("HttpErrorCodeReturnedEquals"), - KeyPrefixEquals: aws.String("KeyPrefixEquals"), - }, - }, - // More values... - }, - }, - } - resp, err := svc.PutBucketWebsite(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_PutObject() { - svc := s3.New(nil) - - params := &s3.PutObjectInput{ - Bucket: aws.String("BucketName"), // Required - Key: aws.String("ObjectKey"), // Required - ACL: aws.String("ObjectCannedACL"), - Body: bytes.NewReader([]byte("PAYLOAD")), - CacheControl: aws.String("CacheControl"), - ContentDisposition: aws.String("ContentDisposition"), - ContentEncoding: aws.String("ContentEncoding"), - ContentLanguage: aws.String("ContentLanguage"), - ContentLength: aws.Int64(1), - ContentType: aws.String("ContentType"), - Expires: aws.Time(time.Now()), - GrantFullControl: aws.String("GrantFullControl"), - GrantRead: aws.String("GrantRead"), - GrantReadACP: aws.String("GrantReadACP"), - GrantWriteACP: aws.String("GrantWriteACP"), - Metadata: map[string]*string{ - "Key": aws.String("MetadataValue"), // Required - // More values... - }, - RequestPayer: aws.String("RequestPayer"), - SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), - SSECustomerKey: aws.String("SSECustomerKey"), - SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), - SSEKMSKeyID: aws.String("SSEKMSKeyId"), - ServerSideEncryption: aws.String("ServerSideEncryption"), - StorageClass: aws.String("StorageClass"), - WebsiteRedirectLocation: aws.String("WebsiteRedirectLocation"), - } - resp, err := svc.PutObject(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_PutObjectACL() { - svc := s3.New(nil) - - params := &s3.PutObjectACLInput{ - Bucket: aws.String("BucketName"), // Required - Key: aws.String("ObjectKey"), // Required - ACL: aws.String("ObjectCannedACL"), - AccessControlPolicy: &s3.AccessControlPolicy{ - Grants: []*s3.Grant{ - { // Required - Grantee: &s3.Grantee{ - Type: aws.String("Type"), // Required - DisplayName: aws.String("DisplayName"), - EmailAddress: aws.String("EmailAddress"), - ID: aws.String("ID"), - URI: aws.String("URI"), - }, - Permission: aws.String("Permission"), - }, - // More values... - }, - Owner: &s3.Owner{ - DisplayName: aws.String("DisplayName"), - ID: aws.String("ID"), - }, - }, - GrantFullControl: aws.String("GrantFullControl"), - GrantRead: aws.String("GrantRead"), - GrantReadACP: aws.String("GrantReadACP"), - GrantWrite: aws.String("GrantWrite"), - GrantWriteACP: aws.String("GrantWriteACP"), - RequestPayer: aws.String("RequestPayer"), - } - resp, err := svc.PutObjectACL(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_RestoreObject() { - svc := s3.New(nil) - - params := &s3.RestoreObjectInput{ - Bucket: aws.String("BucketName"), // Required - Key: aws.String("ObjectKey"), // Required - RequestPayer: aws.String("RequestPayer"), - RestoreRequest: &s3.RestoreRequest{ - Days: aws.Int64(1), // Required - }, - VersionID: aws.String("ObjectVersionId"), - } - resp, err := svc.RestoreObject(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_UploadPart() { - svc := s3.New(nil) - - params := &s3.UploadPartInput{ - Bucket: aws.String("BucketName"), // Required - Key: aws.String("ObjectKey"), // Required - PartNumber: aws.Int64(1), // Required - UploadID: aws.String("MultipartUploadId"), // Required - Body: bytes.NewReader([]byte("PAYLOAD")), - ContentLength: aws.Int64(1), - RequestPayer: aws.String("RequestPayer"), - SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), - SSECustomerKey: aws.String("SSECustomerKey"), - SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), - } - resp, err := svc.UploadPart(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} - -func ExampleS3_UploadPartCopy() { - svc := s3.New(nil) - - params := &s3.UploadPartCopyInput{ - Bucket: aws.String("BucketName"), // Required - CopySource: aws.String("CopySource"), // Required - Key: aws.String("ObjectKey"), // Required - PartNumber: aws.Int64(1), // Required - UploadID: aws.String("MultipartUploadId"), // Required - CopySourceIfMatch: aws.String("CopySourceIfMatch"), - CopySourceIfModifiedSince: aws.Time(time.Now()), - CopySourceIfNoneMatch: aws.String("CopySourceIfNoneMatch"), - CopySourceIfUnmodifiedSince: aws.Time(time.Now()), - CopySourceRange: aws.String("CopySourceRange"), - CopySourceSSECustomerAlgorithm: aws.String("CopySourceSSECustomerAlgorithm"), - CopySourceSSECustomerKey: aws.String("CopySourceSSECustomerKey"), - CopySourceSSECustomerKeyMD5: aws.String("CopySourceSSECustomerKeyMD5"), - RequestPayer: aws.String("RequestPayer"), - SSECustomerAlgorithm: aws.String("SSECustomerAlgorithm"), - SSECustomerKey: aws.String("SSECustomerKey"), - SSECustomerKeyMD5: aws.String("SSECustomerKeyMD5"), - } - resp, err := svc.UploadPartCopy(params) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - // Generic AWS error with Code, Message, and original error (if any) - fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) - if reqErr, ok := err.(awserr.RequestFailure); ok { - // A service error occurred - fmt.Println(reqErr.Code(), reqErr.Message(), reqErr.StatusCode(), reqErr.RequestID()) - } - } else { - // This case should never be hit, the SDK should always return an - // error which satisfies the awserr.Error interface. - fmt.Println(err.Error()) - } - } - - // Pretty-print the response data. - fmt.Println(awsutil.Prettify(resp)) -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go deleted file mode 100644 index ef4c8ea..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go +++ /dev/null @@ -1,53 +0,0 @@ -package s3 - -import ( - "regexp" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) -var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) - -// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. -// Buckets created outside of the classic region MUST be DNS compatible. -func dnsCompatibleBucketName(bucket string) bool { - return reDomain.MatchString(bucket) && - !reIPAddress.MatchString(bucket) && - !strings.Contains(bucket, "..") -} - -// hostStyleBucketName returns true if the request should put the bucket in -// the host. This is false if S3ForcePathStyle is explicitly set or if the -// bucket is not DNS compatible. -func hostStyleBucketName(r *aws.Request, bucket string) bool { - if aws.BoolValue(r.Config.S3ForcePathStyle) { - return false - } - - // Bucket might be DNS compatible but dots in the hostname will fail - // certificate validation, so do not use host-style. - if r.HTTPRequest.URL.Scheme == "https" && strings.Contains(bucket, ".") { - return false - } - - // Use host-style if the bucket is DNS compatible - return dnsCompatibleBucketName(bucket) -} - -func updateHostWithBucket(r *aws.Request) { - b := awsutil.ValuesAtPath(r.Params, "Bucket") - if len(b) == 0 { - return - } - - if bucket := b[0].(string); bucket != "" && hostStyleBucketName(r, bucket) { - r.HTTPRequest.URL.Host = bucket + "." + r.HTTPRequest.URL.Host - r.HTTPRequest.URL.Path = strings.Replace(r.HTTPRequest.URL.Path, "/{Bucket}", "", -1) - if r.HTTPRequest.URL.Path == "" { - r.HTTPRequest.URL.Path = "/" - } - } -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go deleted file mode 100644 index c4347d4..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/host_style_bucket_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package s3_test - -import ( - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/internal/test/unit" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/stretchr/testify/assert" -) - -type s3BucketTest struct { - bucket string - url string -} - -var ( - _ = unit.Imported - - sslTests = []s3BucketTest{ - {"abc", "https://abc.s3.mock-region.amazonaws.com/"}, - {"a$b$c", "https://s3.mock-region.amazonaws.com/a%24b%24c"}, - {"a.b.c", "https://s3.mock-region.amazonaws.com/a.b.c"}, - {"a..bc", "https://s3.mock-region.amazonaws.com/a..bc"}, - } - - nosslTests = []s3BucketTest{ - {"a.b.c", "http://a.b.c.s3.mock-region.amazonaws.com/"}, - {"a..bc", "http://s3.mock-region.amazonaws.com/a..bc"}, - } - - forcepathTests = []s3BucketTest{ - {"abc", "https://s3.mock-region.amazonaws.com/abc"}, - {"a$b$c", "https://s3.mock-region.amazonaws.com/a%24b%24c"}, - {"a.b.c", "https://s3.mock-region.amazonaws.com/a.b.c"}, - {"a..bc", "https://s3.mock-region.amazonaws.com/a..bc"}, - } -) - -func runTests(t *testing.T, svc *s3.S3, tests []s3BucketTest) { - for _, test := range tests { - req, _ := svc.ListObjectsRequest(&s3.ListObjectsInput{Bucket: &test.bucket}) - req.Build() - assert.Equal(t, test.url, req.HTTPRequest.URL.String()) - } -} - -func TestHostStyleBucketBuild(t *testing.T) { - s := s3.New(nil) - runTests(t, s, sslTests) -} - -func TestHostStyleBucketBuildNoSSL(t *testing.T) { - s := s3.New(&aws.Config{DisableSSL: aws.Bool(true)}) - runTests(t, s, nosslTests) -} - -func TestPathStyleBucketBuild(t *testing.T) { - s := s3.New(&aws.Config{S3ForcePathStyle: aws.Bool(true)}) - runTests(t, s, forcepathTests) -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go deleted file mode 100644 index aa5f05e..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface.go +++ /dev/null @@ -1,236 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. - -// Package s3iface provides an interface for the Amazon Simple Storage Service. -package s3iface - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/s3" -) - -// S3API is the interface type for s3.S3. -type S3API interface { - AbortMultipartUploadRequest(*s3.AbortMultipartUploadInput) (*aws.Request, *s3.AbortMultipartUploadOutput) - - AbortMultipartUpload(*s3.AbortMultipartUploadInput) (*s3.AbortMultipartUploadOutput, error) - - CompleteMultipartUploadRequest(*s3.CompleteMultipartUploadInput) (*aws.Request, *s3.CompleteMultipartUploadOutput) - - CompleteMultipartUpload(*s3.CompleteMultipartUploadInput) (*s3.CompleteMultipartUploadOutput, error) - - CopyObjectRequest(*s3.CopyObjectInput) (*aws.Request, *s3.CopyObjectOutput) - - CopyObject(*s3.CopyObjectInput) (*s3.CopyObjectOutput, error) - - CreateBucketRequest(*s3.CreateBucketInput) (*aws.Request, *s3.CreateBucketOutput) - - CreateBucket(*s3.CreateBucketInput) (*s3.CreateBucketOutput, error) - - CreateMultipartUploadRequest(*s3.CreateMultipartUploadInput) (*aws.Request, *s3.CreateMultipartUploadOutput) - - CreateMultipartUpload(*s3.CreateMultipartUploadInput) (*s3.CreateMultipartUploadOutput, error) - - DeleteBucketRequest(*s3.DeleteBucketInput) (*aws.Request, *s3.DeleteBucketOutput) - - DeleteBucket(*s3.DeleteBucketInput) (*s3.DeleteBucketOutput, error) - - DeleteBucketCORSRequest(*s3.DeleteBucketCORSInput) (*aws.Request, *s3.DeleteBucketCORSOutput) - - DeleteBucketCORS(*s3.DeleteBucketCORSInput) (*s3.DeleteBucketCORSOutput, error) - - DeleteBucketLifecycleRequest(*s3.DeleteBucketLifecycleInput) (*aws.Request, *s3.DeleteBucketLifecycleOutput) - - DeleteBucketLifecycle(*s3.DeleteBucketLifecycleInput) (*s3.DeleteBucketLifecycleOutput, error) - - DeleteBucketPolicyRequest(*s3.DeleteBucketPolicyInput) (*aws.Request, *s3.DeleteBucketPolicyOutput) - - DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error) - - DeleteBucketReplicationRequest(*s3.DeleteBucketReplicationInput) (*aws.Request, *s3.DeleteBucketReplicationOutput) - - DeleteBucketReplication(*s3.DeleteBucketReplicationInput) (*s3.DeleteBucketReplicationOutput, error) - - DeleteBucketTaggingRequest(*s3.DeleteBucketTaggingInput) (*aws.Request, *s3.DeleteBucketTaggingOutput) - - DeleteBucketTagging(*s3.DeleteBucketTaggingInput) (*s3.DeleteBucketTaggingOutput, error) - - DeleteBucketWebsiteRequest(*s3.DeleteBucketWebsiteInput) (*aws.Request, *s3.DeleteBucketWebsiteOutput) - - DeleteBucketWebsite(*s3.DeleteBucketWebsiteInput) (*s3.DeleteBucketWebsiteOutput, error) - - DeleteObjectRequest(*s3.DeleteObjectInput) (*aws.Request, *s3.DeleteObjectOutput) - - DeleteObject(*s3.DeleteObjectInput) (*s3.DeleteObjectOutput, error) - - DeleteObjectsRequest(*s3.DeleteObjectsInput) (*aws.Request, *s3.DeleteObjectsOutput) - - DeleteObjects(*s3.DeleteObjectsInput) (*s3.DeleteObjectsOutput, error) - - GetBucketACLRequest(*s3.GetBucketACLInput) (*aws.Request, *s3.GetBucketACLOutput) - - GetBucketACL(*s3.GetBucketACLInput) (*s3.GetBucketACLOutput, error) - - GetBucketCORSRequest(*s3.GetBucketCORSInput) (*aws.Request, *s3.GetBucketCORSOutput) - - GetBucketCORS(*s3.GetBucketCORSInput) (*s3.GetBucketCORSOutput, error) - - GetBucketLifecycleRequest(*s3.GetBucketLifecycleInput) (*aws.Request, *s3.GetBucketLifecycleOutput) - - GetBucketLifecycle(*s3.GetBucketLifecycleInput) (*s3.GetBucketLifecycleOutput, error) - - GetBucketLocationRequest(*s3.GetBucketLocationInput) (*aws.Request, *s3.GetBucketLocationOutput) - - GetBucketLocation(*s3.GetBucketLocationInput) (*s3.GetBucketLocationOutput, error) - - GetBucketLoggingRequest(*s3.GetBucketLoggingInput) (*aws.Request, *s3.GetBucketLoggingOutput) - - GetBucketLogging(*s3.GetBucketLoggingInput) (*s3.GetBucketLoggingOutput, error) - - GetBucketNotificationRequest(*s3.GetBucketNotificationConfigurationRequest) (*aws.Request, *s3.NotificationConfigurationDeprecated) - - GetBucketNotification(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfigurationDeprecated, error) - - GetBucketNotificationConfigurationRequest(*s3.GetBucketNotificationConfigurationRequest) (*aws.Request, *s3.NotificationConfiguration) - - GetBucketNotificationConfiguration(*s3.GetBucketNotificationConfigurationRequest) (*s3.NotificationConfiguration, error) - - GetBucketPolicyRequest(*s3.GetBucketPolicyInput) (*aws.Request, *s3.GetBucketPolicyOutput) - - GetBucketPolicy(*s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error) - - GetBucketReplicationRequest(*s3.GetBucketReplicationInput) (*aws.Request, *s3.GetBucketReplicationOutput) - - GetBucketReplication(*s3.GetBucketReplicationInput) (*s3.GetBucketReplicationOutput, error) - - GetBucketRequestPaymentRequest(*s3.GetBucketRequestPaymentInput) (*aws.Request, *s3.GetBucketRequestPaymentOutput) - - GetBucketRequestPayment(*s3.GetBucketRequestPaymentInput) (*s3.GetBucketRequestPaymentOutput, error) - - GetBucketTaggingRequest(*s3.GetBucketTaggingInput) (*aws.Request, *s3.GetBucketTaggingOutput) - - GetBucketTagging(*s3.GetBucketTaggingInput) (*s3.GetBucketTaggingOutput, error) - - GetBucketVersioningRequest(*s3.GetBucketVersioningInput) (*aws.Request, *s3.GetBucketVersioningOutput) - - GetBucketVersioning(*s3.GetBucketVersioningInput) (*s3.GetBucketVersioningOutput, error) - - GetBucketWebsiteRequest(*s3.GetBucketWebsiteInput) (*aws.Request, *s3.GetBucketWebsiteOutput) - - GetBucketWebsite(*s3.GetBucketWebsiteInput) (*s3.GetBucketWebsiteOutput, error) - - GetObjectRequest(*s3.GetObjectInput) (*aws.Request, *s3.GetObjectOutput) - - GetObject(*s3.GetObjectInput) (*s3.GetObjectOutput, error) - - GetObjectACLRequest(*s3.GetObjectACLInput) (*aws.Request, *s3.GetObjectACLOutput) - - GetObjectACL(*s3.GetObjectACLInput) (*s3.GetObjectACLOutput, error) - - GetObjectTorrentRequest(*s3.GetObjectTorrentInput) (*aws.Request, *s3.GetObjectTorrentOutput) - - GetObjectTorrent(*s3.GetObjectTorrentInput) (*s3.GetObjectTorrentOutput, error) - - HeadBucketRequest(*s3.HeadBucketInput) (*aws.Request, *s3.HeadBucketOutput) - - HeadBucket(*s3.HeadBucketInput) (*s3.HeadBucketOutput, error) - - HeadObjectRequest(*s3.HeadObjectInput) (*aws.Request, *s3.HeadObjectOutput) - - HeadObject(*s3.HeadObjectInput) (*s3.HeadObjectOutput, error) - - ListBucketsRequest(*s3.ListBucketsInput) (*aws.Request, *s3.ListBucketsOutput) - - ListBuckets(*s3.ListBucketsInput) (*s3.ListBucketsOutput, error) - - ListMultipartUploadsRequest(*s3.ListMultipartUploadsInput) (*aws.Request, *s3.ListMultipartUploadsOutput) - - ListMultipartUploads(*s3.ListMultipartUploadsInput) (*s3.ListMultipartUploadsOutput, error) - - ListMultipartUploadsPages(*s3.ListMultipartUploadsInput, func(*s3.ListMultipartUploadsOutput, bool) bool) error - - ListObjectVersionsRequest(*s3.ListObjectVersionsInput) (*aws.Request, *s3.ListObjectVersionsOutput) - - ListObjectVersions(*s3.ListObjectVersionsInput) (*s3.ListObjectVersionsOutput, error) - - ListObjectVersionsPages(*s3.ListObjectVersionsInput, func(*s3.ListObjectVersionsOutput, bool) bool) error - - ListObjectsRequest(*s3.ListObjectsInput) (*aws.Request, *s3.ListObjectsOutput) - - ListObjects(*s3.ListObjectsInput) (*s3.ListObjectsOutput, error) - - ListObjectsPages(*s3.ListObjectsInput, func(*s3.ListObjectsOutput, bool) bool) error - - ListPartsRequest(*s3.ListPartsInput) (*aws.Request, *s3.ListPartsOutput) - - ListParts(*s3.ListPartsInput) (*s3.ListPartsOutput, error) - - ListPartsPages(*s3.ListPartsInput, func(*s3.ListPartsOutput, bool) bool) error - - PutBucketACLRequest(*s3.PutBucketACLInput) (*aws.Request, *s3.PutBucketACLOutput) - - PutBucketACL(*s3.PutBucketACLInput) (*s3.PutBucketACLOutput, error) - - PutBucketCORSRequest(*s3.PutBucketCORSInput) (*aws.Request, *s3.PutBucketCORSOutput) - - PutBucketCORS(*s3.PutBucketCORSInput) (*s3.PutBucketCORSOutput, error) - - PutBucketLifecycleRequest(*s3.PutBucketLifecycleInput) (*aws.Request, *s3.PutBucketLifecycleOutput) - - PutBucketLifecycle(*s3.PutBucketLifecycleInput) (*s3.PutBucketLifecycleOutput, error) - - PutBucketLoggingRequest(*s3.PutBucketLoggingInput) (*aws.Request, *s3.PutBucketLoggingOutput) - - PutBucketLogging(*s3.PutBucketLoggingInput) (*s3.PutBucketLoggingOutput, error) - - PutBucketNotificationRequest(*s3.PutBucketNotificationInput) (*aws.Request, *s3.PutBucketNotificationOutput) - - PutBucketNotification(*s3.PutBucketNotificationInput) (*s3.PutBucketNotificationOutput, error) - - PutBucketNotificationConfigurationRequest(*s3.PutBucketNotificationConfigurationInput) (*aws.Request, *s3.PutBucketNotificationConfigurationOutput) - - PutBucketNotificationConfiguration(*s3.PutBucketNotificationConfigurationInput) (*s3.PutBucketNotificationConfigurationOutput, error) - - PutBucketPolicyRequest(*s3.PutBucketPolicyInput) (*aws.Request, *s3.PutBucketPolicyOutput) - - PutBucketPolicy(*s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error) - - PutBucketReplicationRequest(*s3.PutBucketReplicationInput) (*aws.Request, *s3.PutBucketReplicationOutput) - - PutBucketReplication(*s3.PutBucketReplicationInput) (*s3.PutBucketReplicationOutput, error) - - PutBucketRequestPaymentRequest(*s3.PutBucketRequestPaymentInput) (*aws.Request, *s3.PutBucketRequestPaymentOutput) - - PutBucketRequestPayment(*s3.PutBucketRequestPaymentInput) (*s3.PutBucketRequestPaymentOutput, error) - - PutBucketTaggingRequest(*s3.PutBucketTaggingInput) (*aws.Request, *s3.PutBucketTaggingOutput) - - PutBucketTagging(*s3.PutBucketTaggingInput) (*s3.PutBucketTaggingOutput, error) - - PutBucketVersioningRequest(*s3.PutBucketVersioningInput) (*aws.Request, *s3.PutBucketVersioningOutput) - - PutBucketVersioning(*s3.PutBucketVersioningInput) (*s3.PutBucketVersioningOutput, error) - - PutBucketWebsiteRequest(*s3.PutBucketWebsiteInput) (*aws.Request, *s3.PutBucketWebsiteOutput) - - PutBucketWebsite(*s3.PutBucketWebsiteInput) (*s3.PutBucketWebsiteOutput, error) - - PutObjectRequest(*s3.PutObjectInput) (*aws.Request, *s3.PutObjectOutput) - - PutObject(*s3.PutObjectInput) (*s3.PutObjectOutput, error) - - PutObjectACLRequest(*s3.PutObjectACLInput) (*aws.Request, *s3.PutObjectACLOutput) - - PutObjectACL(*s3.PutObjectACLInput) (*s3.PutObjectACLOutput, error) - - RestoreObjectRequest(*s3.RestoreObjectInput) (*aws.Request, *s3.RestoreObjectOutput) - - RestoreObject(*s3.RestoreObjectInput) (*s3.RestoreObjectOutput, error) - - UploadPartRequest(*s3.UploadPartInput) (*aws.Request, *s3.UploadPartOutput) - - UploadPart(*s3.UploadPartInput) (*s3.UploadPartOutput, error) - - UploadPartCopyRequest(*s3.UploadPartCopyInput) (*aws.Request, *s3.UploadPartCopyOutput) - - UploadPartCopy(*s3.UploadPartCopyInput) (*s3.UploadPartCopyOutput, error) -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface_test.go deleted file mode 100644 index cd67215..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3iface/interface_test.go +++ /dev/null @@ -1,15 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. - -package s3iface_test - -import ( - "testing" - - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3iface" - "github.com/stretchr/testify/assert" -) - -func TestInterface(t *testing.T) { - assert.Implements(t, (*s3iface.S3API)(nil), s3.New(nil)) -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go deleted file mode 100644 index d6ebe02..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download.go +++ /dev/null @@ -1,257 +0,0 @@ -package s3manager - -import ( - "fmt" - "io" - "strconv" - "strings" - "sync" - "time" - - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/service/s3" -) - -// The default range of bytes to get at a time when using Download(). -var DefaultDownloadPartSize int64 = 1024 * 1024 * 5 - -// The default number of goroutines to spin up when using Download(). -var DefaultDownloadConcurrency = 5 - -// The default set of options used when opts is nil in Download(). -var DefaultDownloadOptions = &DownloadOptions{ - PartSize: DefaultDownloadPartSize, - Concurrency: DefaultDownloadConcurrency, -} - -// DownloadOptions keeps tracks of extra options to pass to an Download() call. -type DownloadOptions struct { - // The buffer size (in bytes) to use when buffering data into chunks and - // sending them as parts to S3. The minimum allowed part size is 5MB, and - // if this value is set to zero, the DefaultPartSize value will be used. - PartSize int64 - - // The number of goroutines to spin up in parallel when sending parts. - // If this is set to zero, the DefaultConcurrency value will be used. - Concurrency int - - // An S3 client to use when performing downloads. Leave this as nil to use - // a default client. - S3 *s3.S3 -} - -// NewDownloader creates a new Downloader structure that downloads an object -// from S3 in concurrent chunks. Pass in an optional DownloadOptions struct -// to customize the downloader behavior. -func NewDownloader(opts *DownloadOptions) *Downloader { - if opts == nil { - opts = DefaultDownloadOptions - } - return &Downloader{opts: opts} -} - -// The Downloader structure that calls Download(). It is safe to call Download() -// on this structure for multiple objects and across concurrent goroutines. -type Downloader struct { - opts *DownloadOptions -} - -// Download downloads an object in S3 and writes the payload into w using -// concurrent GET requests. -// -// It is safe to call this method for multiple objects and across concurrent -// goroutines. -func (d *Downloader) Download(w io.WriterAt, input *s3.GetObjectInput) (n int64, err error) { - impl := downloader{w: w, in: input, opts: *d.opts} - return impl.download() -} - -// downloader is the implementation structure used internally by Downloader. -type downloader struct { - opts DownloadOptions - in *s3.GetObjectInput - w io.WriterAt - - wg sync.WaitGroup - m sync.Mutex - - pos int64 - totalBytes int64 - written int64 - err error -} - -// init initializes the downloader with default options. -func (d *downloader) init() { - d.totalBytes = -1 - - if d.opts.Concurrency == 0 { - d.opts.Concurrency = DefaultDownloadConcurrency - } - - if d.opts.PartSize == 0 { - d.opts.PartSize = DefaultDownloadPartSize - } - - if d.opts.S3 == nil { - d.opts.S3 = s3.New(nil) - } -} - -// download performs the implementation of the object download across ranged -// GETs. -func (d *downloader) download() (n int64, err error) { - d.init() - - // Spin up workers - ch := make(chan dlchunk, d.opts.Concurrency) - for i := 0; i < d.opts.Concurrency; i++ { - d.wg.Add(1) - go d.downloadPart(ch) - } - - // Assign work - for d.geterr() == nil { - if d.pos != 0 { - // This is not the first chunk, let's wait until we know the total - // size of the payload so we can see if we have read the entire - // object. - total := d.getTotalBytes() - - if total < 0 { - // Total has not yet been set, so sleep and loop around while - // waiting for our first worker to resolve this value. - time.Sleep(10 * time.Millisecond) - continue - } else if d.pos >= total { - break // We're finished queueing chunks - } - } - - // Queue the next range of bytes to read. - ch <- dlchunk{w: d.w, start: d.pos, size: d.opts.PartSize} - d.pos += d.opts.PartSize - } - - // Wait for completion - close(ch) - d.wg.Wait() - - // Return error - return d.written, d.err -} - -// downloadPart is an individual goroutine worker reading from the ch channel -// and performing a GetObject request on the data with a given byte range. -// -// If this is the first worker, this operation also resolves the total number -// of bytes to be read so that the worker manager knows when it is finished. -func (d *downloader) downloadPart(ch chan dlchunk) { - defer d.wg.Done() - - for { - chunk, ok := <-ch - - if !ok { - break - } - - if d.geterr() == nil { - // Get the next byte range of data - in := &s3.GetObjectInput{} - awsutil.Copy(in, d.in) - rng := fmt.Sprintf("bytes=%d-%d", - chunk.start, chunk.start+chunk.size-1) - in.Range = &rng - - resp, err := d.opts.S3.GetObject(in) - if err != nil { - d.seterr(err) - } else { - d.setTotalBytes(resp) // Set total if not yet set. - - n, err := io.Copy(&chunk, resp.Body) - resp.Body.Close() - - if err != nil { - d.seterr(err) - } - d.incrwritten(n) - } - } - } -} - -// getTotalBytes is a thread-safe getter for retrieving the total byte status. -func (d *downloader) getTotalBytes() int64 { - d.m.Lock() - defer d.m.Unlock() - - return d.totalBytes -} - -// getTotalBytes is a thread-safe setter for setting the total byte status. -func (d *downloader) setTotalBytes(resp *s3.GetObjectOutput) { - d.m.Lock() - defer d.m.Unlock() - - if d.totalBytes >= 0 { - return - } - - parts := strings.Split(*resp.ContentRange, "/") - total, err := strconv.ParseInt(parts[len(parts)-1], 10, 64) - if err != nil { - d.err = err - return - } - - d.totalBytes = total -} - -func (d *downloader) incrwritten(n int64) { - d.m.Lock() - defer d.m.Unlock() - - d.written += n -} - -// geterr is a thread-safe getter for the error object -func (d *downloader) geterr() error { - d.m.Lock() - defer d.m.Unlock() - - return d.err -} - -// seterr is a thread-safe setter for the error object -func (d *downloader) seterr(e error) { - d.m.Lock() - defer d.m.Unlock() - - d.err = e -} - -// dlchunk represents a single chunk of data to write by the worker routine. -// This structure also implements an io.SectionReader style interface for -// io.WriterAt, effectively making it an io.SectionWriter (which does not -// exist). -type dlchunk struct { - w io.WriterAt - start int64 - size int64 - cur int64 -} - -// Write wraps io.WriterAt for the dlchunk, writing from the dlchunk's start -// position to its end (or EOF). -func (c *dlchunk) Write(p []byte) (n int, err error) { - if c.cur >= c.size { - return 0, io.EOF - } - - n, err = c.w.WriteAt(p, c.start+c.cur) - c.cur += int64(n) - - return -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go deleted file mode 100644 index 2c9c3b4..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/download_test.go +++ /dev/null @@ -1,165 +0,0 @@ -package s3manager_test - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "regexp" - "strconv" - "sync" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/internal/test/unit" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3manager" - "github.com/stretchr/testify/assert" -) - -var _ = unit.Imported - -func dlLoggingSvc(data []byte) (*s3.S3, *[]string, *[]string) { - var m sync.Mutex - names := []string{} - ranges := []string{} - - svc := s3.New(nil) - svc.Handlers.Send.Clear() - svc.Handlers.Send.PushBack(func(r *aws.Request) { - m.Lock() - defer m.Unlock() - - names = append(names, r.Operation.Name) - ranges = append(ranges, *r.Params.(*s3.GetObjectInput).Range) - - rerng := regexp.MustCompile(`bytes=(\d+)-(\d+)`) - rng := rerng.FindStringSubmatch(r.HTTPRequest.Header.Get("Range")) - start, _ := strconv.ParseInt(rng[1], 10, 64) - fin, _ := strconv.ParseInt(rng[2], 10, 64) - fin++ - - if fin > int64(len(data)) { - fin = int64(len(data)) - } - - r.HTTPResponse = &http.Response{ - StatusCode: 200, - Body: ioutil.NopCloser(bytes.NewReader(data[start:fin])), - Header: http.Header{}, - } - r.HTTPResponse.Header.Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", - start, fin, len(data))) - }) - - return svc, &names, &ranges -} - -type dlwriter struct { - buf []byte -} - -func newDLWriter(size int) *dlwriter { - return &dlwriter{buf: make([]byte, size)} -} - -func (d dlwriter) WriteAt(p []byte, pos int64) (n int, err error) { - if pos > int64(len(d.buf)) { - return 0, io.EOF - } - - written := 0 - for i, b := range p { - if i >= len(d.buf) { - break - } - d.buf[pos+int64(i)] = b - written++ - } - return written, nil -} - -func TestDownloadOrder(t *testing.T) { - s, names, ranges := dlLoggingSvc(buf12MB) - - opts := &s3manager.DownloadOptions{S3: s, Concurrency: 1} - d := s3manager.NewDownloader(opts) - w := newDLWriter(len(buf12MB)) - n, err := d.Download(w, &s3.GetObjectInput{ - Bucket: aws.String("bucket"), - Key: aws.String("key"), - }) - - assert.Nil(t, err) - assert.Equal(t, int64(len(buf12MB)), n) - assert.Equal(t, []string{"GetObject", "GetObject", "GetObject"}, *names) - assert.Equal(t, []string{"bytes=0-5242879", "bytes=5242880-10485759", "bytes=10485760-15728639"}, *ranges) - - count := 0 - for _, b := range w.buf { - count += int(b) - } - assert.Equal(t, 0, count) -} - -func TestDownloadZero(t *testing.T) { - s, names, ranges := dlLoggingSvc([]byte{}) - - opts := &s3manager.DownloadOptions{S3: s} - d := s3manager.NewDownloader(opts) - w := newDLWriter(0) - n, err := d.Download(w, &s3.GetObjectInput{ - Bucket: aws.String("bucket"), - Key: aws.String("key"), - }) - - assert.Nil(t, err) - assert.Equal(t, int64(0), n) - assert.Equal(t, []string{"GetObject"}, *names) - assert.Equal(t, []string{"bytes=0-5242879"}, *ranges) -} - -func TestDownloadSetPartSize(t *testing.T) { - s, names, ranges := dlLoggingSvc([]byte{1, 2, 3}) - - opts := &s3manager.DownloadOptions{S3: s, PartSize: 1, Concurrency: 1} - d := s3manager.NewDownloader(opts) - w := newDLWriter(3) - n, err := d.Download(w, &s3.GetObjectInput{ - Bucket: aws.String("bucket"), - Key: aws.String("key"), - }) - - assert.Nil(t, err) - assert.Equal(t, int64(3), n) - assert.Equal(t, []string{"GetObject", "GetObject", "GetObject"}, *names) - assert.Equal(t, []string{"bytes=0-0", "bytes=1-1", "bytes=2-2"}, *ranges) - assert.Equal(t, []byte{1, 2, 3}, w.buf) -} - -func TestDownloadError(t *testing.T) { - s, names, _ := dlLoggingSvc([]byte{1, 2, 3}) - opts := &s3manager.DownloadOptions{S3: s, PartSize: 1, Concurrency: 1} - - num := 0 - s.Handlers.Send.PushBack(func(r *aws.Request) { - num++ - if num > 1 { - r.HTTPResponse.StatusCode = 400 - r.HTTPResponse.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) - } - }) - - d := s3manager.NewDownloader(opts) - w := newDLWriter(3) - n, err := d.Download(w, &s3.GetObjectInput{ - Bucket: aws.String("bucket"), - Key: aws.String("key"), - }) - - assert.NotNil(t, err) - assert.Equal(t, int64(1), n) - assert.Equal(t, []string{"GetObject", "GetObject"}, *names) - assert.Equal(t, []byte{1, 0, 0}, w.buf) -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go deleted file mode 100644 index db55680..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload.go +++ /dev/null @@ -1,562 +0,0 @@ -package s3manager - -import ( - "bytes" - "fmt" - "io" - "sort" - "sync" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/service/s3" -) - -// The maximum allowed number of parts in a multi-part upload on Amazon S3. -var MaxUploadParts = 10000 - -// The minimum allowed part size when uploading a part to Amazon S3. -var MinUploadPartSize int64 = 1024 * 1024 * 5 - -// The default part size to buffer chunks of a payload into. -var DefaultUploadPartSize = MinUploadPartSize - -// The default number of goroutines to spin up when using Upload(). -var DefaultUploadConcurrency = 5 - -// The default set of options used when opts is nil in Upload(). -var DefaultUploadOptions = &UploadOptions{ - PartSize: DefaultUploadPartSize, - Concurrency: DefaultUploadConcurrency, - LeavePartsOnError: false, - S3: nil, -} - -// A MultiUploadFailure wraps a failed S3 multipart upload. An error returned -// will satisfy this interface when a multi part upload failed to upload all -// chucks to S3. In the case of a failure the UploadID is needed to operate on -// the chunks, if any, which were uploaded. -// -// Example: -// -// u := s3manager.NewUploader(opts) -// output, err := u.upload(input) -// if err != nil { -// if multierr, ok := err.(MultiUploadFailure); ok { -// // Process error and its associated uploadID -// fmt.Println("Error:", multierr.Code(), multierr.Message(), multierr.UploadID()) -// } else { -// // Process error generically -// fmt.Println("Error:", err.Error()) -// } -// } -// -type MultiUploadFailure interface { - awserr.Error - - // Returns the upload id for the S3 multipart upload that failed. - UploadID() string -} - -// So that the Error interface type can be included as an anonymous field -// in the multiUploadError struct and not conflict with the error.Error() method. -type awsError awserr.Error - -// A multiUploadError wraps the upload ID of a failed s3 multipart upload. -// Composed of BaseError for code, message, and original error -// -// Should be used for an error that occurred failing a S3 multipart upload, -// and a upload ID is available. If an uploadID is not available a more relevant -type multiUploadError struct { - awsError - - // ID for multipart upload which failed. - uploadID string -} - -// Error returns the string representation of the error. -// -// See apierr.BaseError ErrorWithExtra for output format -// -// Satisfies the error interface. -func (m multiUploadError) Error() string { - extra := fmt.Sprintf("upload id: %s", m.uploadID) - return awserr.SprintError(m.Code(), m.Message(), extra, m.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (m multiUploadError) String() string { - return m.Error() -} - -// UploadID returns the id of the S3 upload which failed. -func (m multiUploadError) UploadID() string { - return m.uploadID -} - -// UploadInput contains all input for upload requests to Amazon S3. -type UploadInput struct { - // The canned ACL to apply to the object. - ACL *string `location:"header" locationName:"x-amz-acl" type:"string"` - - Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` - - // Specifies caching behavior along the request/reply chain. - CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` - - // Specifies presentational information for the object. - ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` - - // Specifies what content encodings have been applied to the object and thus - // what decoding mechanisms must be applied to obtain the media-type referenced - // by the Content-Type header field. - ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` - - // The language the content is in. - ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` - - // A standard MIME type describing the format of the object data. - ContentType *string `location:"header" locationName:"Content-Type" type:"string"` - - // The date and time at which the object is no longer cacheable. - Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` - - // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. - GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` - - // Allows grantee to read the object data and its metadata. - GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` - - // Allows grantee to read the object ACL. - GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` - - // Allows grantee to write the ACL for the applicable object. - GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` - - Key *string `location:"uri" locationName:"Key" type:"string" required:"true"` - - // A map of metadata to store with the object in S3. - Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` - - // Confirms that the requester knows that she or he will be charged for the - // request. Bucket owners need not specify this parameter in their requests. - // Documentation on downloading objects from requester pays buckets can be found - // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html - RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string"` - - // Specifies the algorithm to use to when encrypting the object (e.g., AES256, - // aws:kms). - SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` - - // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting - // data. This value is used to store the object and then it is discarded; Amazon - // does not store the encryption key. The key must be appropriate for use with - // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm - // header. - SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` - - // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. - // Amazon S3 uses this header for a message integrity check to ensure the encryption - // key was transmitted without error. - SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` - - // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT - // requests for an object protected by AWS KMS will fail if not made via SSL - // or using SigV4. Documentation on configuring any of the officially supported - // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version - SSEKMSKeyID *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` - - // The Server-side encryption algorithm used when storing this object in S3 - // (e.g., AES256, aws:kms). - ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string"` - - // The type of storage to use for the object. Defaults to 'STANDARD'. - StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string"` - - // If the bucket is configured as a website, redirects requests for this object - // to another object in the same bucket or to an external URL. Amazon S3 stores - // the value of this header in the object metadata. - WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` - - // The readable body payload to send to S3. - Body io.Reader -} - -// UploadOutput represents a response from the Upload() call. -type UploadOutput struct { - // The URL where the object was uploaded to. - Location string - - // The ID for a multipart upload to S3. In the case of an error the error - // can be cast to the MultiUploadFailure interface to extract the upload ID. - UploadID string -} - -// UploadOptions keeps tracks of extra options to pass to an Upload() call. -type UploadOptions struct { - // The buffer size (in bytes) to use when buffering data into chunks and - // sending them as parts to S3. The minimum allowed part size is 5MB, and - // if this value is set to zero, the DefaultPartSize value will be used. - PartSize int64 - - // The number of goroutines to spin up in parallel when sending parts. - // If this is set to zero, the DefaultConcurrency value will be used. - Concurrency int - - // Setting this value to true will cause the SDK to avoid calling - // AbortMultipartUpload on a failure, leaving all successfully uploaded - // parts on S3 for manual recovery. - // - // Note that storing parts of an incomplete multipart upload counts towards - // space usage on S3 and will add additional costs if not cleaned up. - LeavePartsOnError bool - - // The client to use when uploading to S3. Leave this as nil to use the - // default S3 client. - S3 *s3.S3 -} - -// NewUploader creates a new Uploader object to upload data to S3. Pass in -// an optional opts structure to customize the uploader behavior. -func NewUploader(opts *UploadOptions) *Uploader { - if opts == nil { - opts = DefaultUploadOptions - } - return &Uploader{opts: opts} -} - -// The Uploader structure that calls Upload(). It is safe to call Upload() -// on this structure for multiple objects and across concurrent goroutines. -type Uploader struct { - opts *UploadOptions -} - -// Upload uploads an object to S3, intelligently buffering large files into -// smaller chunks and sending them in parallel across multiple goroutines. You -// can configure the buffer size and concurrency through the opts parameter. -// -// If opts is set to nil, DefaultUploadOptions will be used. -// -// It is safe to call this method for multiple objects and across concurrent -// goroutines. -func (u *Uploader) Upload(input *UploadInput) (*UploadOutput, error) { - i := uploader{in: input, opts: *u.opts} - return i.upload() -} - -// internal structure to manage an upload to S3. -type uploader struct { - in *UploadInput - opts UploadOptions - - readerPos int64 // current reader position - totalSize int64 // set to -1 if the size is not known -} - -// internal logic for deciding whether to upload a single part or use a -// multipart upload. -func (u *uploader) upload() (*UploadOutput, error) { - u.init() - - if u.opts.PartSize < MinUploadPartSize { - msg := fmt.Sprintf("part size must be at least %d bytes", MinUploadPartSize) - return nil, awserr.New("ConfigError", msg, nil) - } - - // Do one read to determine if we have more than one part - buf, err := u.nextReader() - if err == io.EOF || err == io.ErrUnexpectedEOF { // single part - return u.singlePart(buf) - } else if err != nil { - return nil, awserr.New("ReadRequestBody", "read upload data failed", err) - } - - mu := multiuploader{uploader: u} - return mu.upload(buf) -} - -// init will initialize all default options. -func (u *uploader) init() { - if u.opts.S3 == nil { - u.opts.S3 = s3.New(nil) - } - if u.opts.Concurrency == 0 { - u.opts.Concurrency = DefaultUploadConcurrency - } - if u.opts.PartSize == 0 { - u.opts.PartSize = DefaultUploadPartSize - } - - // Try to get the total size for some optimizations - u.initSize() -} - -// initSize tries to detect the total stream size, setting u.totalSize. If -// the size is not known, totalSize is set to -1. -func (u *uploader) initSize() { - u.totalSize = -1 - - switch r := u.in.Body.(type) { - case io.Seeker: - pos, _ := r.Seek(0, 1) - defer r.Seek(pos, 0) - - n, err := r.Seek(0, 2) - if err != nil { - return - } - u.totalSize = n - - // try to adjust partSize if it is too small - if u.totalSize/u.opts.PartSize >= int64(MaxUploadParts) { - u.opts.PartSize = u.totalSize / int64(MaxUploadParts) - } - } -} - -// nextReader returns a seekable reader representing the next packet of data. -// This operation increases the shared u.readerPos counter, but note that it -// does not need to be wrapped in a mutex because nextReader is only called -// from the main thread. -func (u *uploader) nextReader() (io.ReadSeeker, error) { - switch r := u.in.Body.(type) { - case io.ReaderAt: - var err error - - n := u.opts.PartSize - if u.totalSize >= 0 { - bytesLeft := u.totalSize - u.readerPos - - if bytesLeft == 0 { - err = io.EOF - } else if bytesLeft <= u.opts.PartSize { - err = io.ErrUnexpectedEOF - n = bytesLeft - } - } - - buf := io.NewSectionReader(r, u.readerPos, n) - u.readerPos += n - - return buf, err - - default: - packet := make([]byte, u.opts.PartSize) - n, err := io.ReadFull(u.in.Body, packet) - u.readerPos += int64(n) - - return bytes.NewReader(packet[0:n]), err - } -} - -// singlePart contains upload logic for uploading a single chunk via -// a regular PutObject request. Multipart requests require at least two -// parts, or at least 5MB of data. -func (u *uploader) singlePart(buf io.ReadSeeker) (*UploadOutput, error) { - params := &s3.PutObjectInput{} - awsutil.Copy(params, u.in) - params.Body = buf - - req, _ := u.opts.S3.PutObjectRequest(params) - if err := req.Send(); err != nil { - return nil, err - } - - url := req.HTTPRequest.URL.String() - return &UploadOutput{Location: url}, nil -} - -// internal structure to manage a specific multipart upload to S3. -type multiuploader struct { - *uploader - wg sync.WaitGroup - m sync.Mutex - err error - uploadID string - parts completedParts -} - -// keeps track of a single chunk of data being sent to S3. -type chunk struct { - buf io.ReadSeeker - num int64 -} - -// completedParts is a wrapper to make parts sortable by their part number, -// since S3 required this list to be sent in sorted order. -type completedParts []*s3.CompletedPart - -func (a completedParts) Len() int { return len(a) } -func (a completedParts) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a completedParts) Less(i, j int) bool { return *a[i].PartNumber < *a[j].PartNumber } - -// upload will perform a multipart upload using the firstBuf buffer containing -// the first chunk of data. -func (u *multiuploader) upload(firstBuf io.ReadSeeker) (*UploadOutput, error) { - params := &s3.CreateMultipartUploadInput{} - awsutil.Copy(params, u.in) - - // Create the multipart - resp, err := u.opts.S3.CreateMultipartUpload(params) - if err != nil { - return nil, err - } - u.uploadID = *resp.UploadID - - // Create the workers - ch := make(chan chunk, u.opts.Concurrency) - for i := 0; i < u.opts.Concurrency; i++ { - u.wg.Add(1) - go u.readChunk(ch) - } - - // Send part 1 to the workers - var num int64 = 1 - ch <- chunk{buf: firstBuf, num: num} - - // Read and queue the rest of the parts - for u.geterr() == nil { - // This upload exceeded maximum number of supported parts, error now. - if num > int64(MaxUploadParts) { - msg := fmt.Sprintf("exceeded total allowed parts (%d). "+ - "Adjust PartSize to fit in this limit", MaxUploadParts) - u.seterr(awserr.New("TotalPartsExceeded", msg, nil)) - break - } - - num++ - - buf, err := u.nextReader() - if err == io.EOF { - break - } - - ch <- chunk{buf: buf, num: num} - - if err != nil && err != io.ErrUnexpectedEOF { - u.seterr(awserr.New( - "ReadRequestBody", - "read multipart upload data failed", - err)) - break - } - } - - // Close the channel, wait for workers, and complete upload - close(ch) - u.wg.Wait() - complete := u.complete() - - if err := u.geterr(); err != nil { - return nil, &multiUploadError{ - awsError: awserr.New( - "MultipartUpload", - "upload multipart failed", - err), - uploadID: u.uploadID, - } - } - return &UploadOutput{ - Location: *complete.Location, - UploadID: u.uploadID, - }, nil -} - -// readChunk runs in worker goroutines to pull chunks off of the ch channel -// and send() them as UploadPart requests. -func (u *multiuploader) readChunk(ch chan chunk) { - defer u.wg.Done() - for { - data, ok := <-ch - - if !ok { - break - } - - if u.geterr() == nil { - if err := u.send(data); err != nil { - u.seterr(err) - } - } - } -} - -// send performs an UploadPart request and keeps track of the completed -// part information. -func (u *multiuploader) send(c chunk) error { - resp, err := u.opts.S3.UploadPart(&s3.UploadPartInput{ - Bucket: u.in.Bucket, - Key: u.in.Key, - Body: c.buf, - UploadID: &u.uploadID, - PartNumber: &c.num, - }) - - if err != nil { - return err - } - - n := c.num - completed := &s3.CompletedPart{ETag: resp.ETag, PartNumber: &n} - - u.m.Lock() - u.parts = append(u.parts, completed) - u.m.Unlock() - - return nil -} - -// geterr is a thread-safe getter for the error object -func (u *multiuploader) geterr() error { - u.m.Lock() - defer u.m.Unlock() - - return u.err -} - -// seterr is a thread-safe setter for the error object -func (u *multiuploader) seterr(e error) { - u.m.Lock() - defer u.m.Unlock() - - u.err = e -} - -// fail will abort the multipart unless LeavePartsOnError is set to true. -func (u *multiuploader) fail() { - if u.opts.LeavePartsOnError { - return - } - - u.opts.S3.AbortMultipartUpload(&s3.AbortMultipartUploadInput{ - Bucket: u.in.Bucket, - Key: u.in.Key, - UploadID: &u.uploadID, - }) -} - -// complete successfully completes a multipart upload and returns the response. -func (u *multiuploader) complete() *s3.CompleteMultipartUploadOutput { - if u.geterr() != nil { - u.fail() - return nil - } - - // Parts must be sorted in PartNumber order. - sort.Sort(u.parts) - - resp, err := u.opts.S3.CompleteMultipartUpload(&s3.CompleteMultipartUploadInput{ - Bucket: u.in.Bucket, - Key: u.in.Key, - UploadID: &u.uploadID, - MultipartUpload: &s3.CompletedMultipartUpload{Parts: u.parts}, - }) - if err != nil { - u.seterr(err) - u.fail() - } - - return resp -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go deleted file mode 100644 index 9016419..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/s3manager/upload_test.go +++ /dev/null @@ -1,438 +0,0 @@ -package s3manager_test - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "sort" - "sync" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/internal/test/unit" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/aws/aws-sdk-go/service/s3/s3manager" - "github.com/stretchr/testify/assert" -) - -var _ = unit.Imported -var buf12MB = make([]byte, 1024*1024*12) -var buf2MB = make([]byte, 1024*1024*2) - -var emptyList = []string{} - -func val(i interface{}, s string) interface{} { - return awsutil.ValuesAtPath(i, s)[0] -} - -func contains(src []string, s string) bool { - for _, v := range src { - if s == v { - return true - } - } - return false -} - -func loggingSvc(ignoreOps []string) (*s3.S3, *[]string, *[]interface{}) { - var m sync.Mutex - partNum := 0 - names := []string{} - params := []interface{}{} - svc := s3.New(nil) - svc.Handlers.Unmarshal.Clear() - svc.Handlers.UnmarshalMeta.Clear() - svc.Handlers.UnmarshalError.Clear() - svc.Handlers.Send.Clear() - svc.Handlers.Send.PushBack(func(r *aws.Request) { - m.Lock() - defer m.Unlock() - - if !contains(ignoreOps, r.Operation.Name) { - names = append(names, r.Operation.Name) - params = append(params, r.Params) - } - - r.HTTPResponse = &http.Response{ - StatusCode: 200, - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - - switch data := r.Data.(type) { - case *s3.CreateMultipartUploadOutput: - data.UploadID = aws.String("UPLOAD-ID") - case *s3.UploadPartOutput: - partNum++ - data.ETag = aws.String(fmt.Sprintf("ETAG%d", partNum)) - case *s3.CompleteMultipartUploadOutput: - data.Location = aws.String("https://location") - } - }) - - return svc, &names, ¶ms -} - -func buflen(i interface{}) int { - r := i.(io.Reader) - b, _ := ioutil.ReadAll(r) - return len(b) -} - -func TestUploadOrderMulti(t *testing.T) { - s, ops, args := loggingSvc(emptyList) - mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s}) - resp, err := mgr.Upload(&s3manager.UploadInput{ - Bucket: aws.String("Bucket"), - Key: aws.String("Key"), - Body: bytes.NewReader(buf12MB), - ServerSideEncryption: aws.String("AES256"), - ContentType: aws.String("content/type"), - }) - - assert.NoError(t, err) - assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) - assert.Equal(t, "https://location", resp.Location) - assert.Equal(t, "UPLOAD-ID", resp.UploadID) - - // Validate input values - - // UploadPart - assert.Equal(t, "UPLOAD-ID", val((*args)[1], "UploadID")) - assert.Equal(t, "UPLOAD-ID", val((*args)[2], "UploadID")) - assert.Equal(t, "UPLOAD-ID", val((*args)[3], "UploadID")) - - // CompleteMultipartUpload - assert.Equal(t, "UPLOAD-ID", val((*args)[4], "UploadID")) - assert.Equal(t, int64(1), val((*args)[4], "MultipartUpload.Parts[0].PartNumber")) - assert.Equal(t, int64(2), val((*args)[4], "MultipartUpload.Parts[1].PartNumber")) - assert.Equal(t, int64(3), val((*args)[4], "MultipartUpload.Parts[2].PartNumber")) - assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[0].ETag")) - assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[1].ETag")) - assert.Regexp(t, `^ETAG\d+$`, val((*args)[4], "MultipartUpload.Parts[2].ETag")) - - // Custom headers - assert.Equal(t, "AES256", val((*args)[0], "ServerSideEncryption")) - assert.Equal(t, "content/type", val((*args)[0], "ContentType")) -} - -func TestUploadOrderMultiDifferentPartSize(t *testing.T) { - s, ops, args := loggingSvc(emptyList) - mgr := s3manager.NewUploader(&s3manager.UploadOptions{ - S3: s, - PartSize: 1024 * 1024 * 7, - Concurrency: 1, - }) - _, err := mgr.Upload(&s3manager.UploadInput{ - Bucket: aws.String("Bucket"), - Key: aws.String("Key"), - Body: bytes.NewReader(buf12MB), - }) - - assert.NoError(t, err) - assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) - - // Part lengths - assert.Equal(t, 1024*1024*7, buflen(val((*args)[1], "Body"))) - assert.Equal(t, 1024*1024*5, buflen(val((*args)[2], "Body"))) -} - -func TestUploadIncreasePartSize(t *testing.T) { - s3manager.MaxUploadParts = 2 - defer func() { s3manager.MaxUploadParts = 10000 }() - - s, ops, args := loggingSvc(emptyList) - opts := &s3manager.UploadOptions{S3: s, Concurrency: 1} - mgr := s3manager.NewUploader(opts) - _, err := mgr.Upload(&s3manager.UploadInput{ - Bucket: aws.String("Bucket"), - Key: aws.String("Key"), - Body: bytes.NewReader(buf12MB), - }) - - assert.NoError(t, err) - assert.Equal(t, int64(0), opts.PartSize) // don't modify orig options - assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) - - // Part lengths - assert.Equal(t, 1024*1024*6, buflen(val((*args)[1], "Body"))) - assert.Equal(t, 1024*1024*6, buflen(val((*args)[2], "Body"))) -} - -func TestUploadFailIfPartSizeTooSmall(t *testing.T) { - opts := &s3manager.UploadOptions{PartSize: 5} - mgr := s3manager.NewUploader(opts) - resp, err := mgr.Upload(&s3manager.UploadInput{ - Bucket: aws.String("Bucket"), - Key: aws.String("Key"), - Body: bytes.NewReader(buf12MB), - }) - - assert.Nil(t, resp) - assert.NotNil(t, err) - - aerr := err.(awserr.Error) - assert.Equal(t, "ConfigError", aerr.Code()) - assert.Contains(t, aerr.Message(), "part size must be at least") -} - -func TestUploadOrderSingle(t *testing.T) { - s, ops, args := loggingSvc(emptyList) - mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s}) - resp, err := mgr.Upload(&s3manager.UploadInput{ - Bucket: aws.String("Bucket"), - Key: aws.String("Key"), - Body: bytes.NewReader(buf2MB), - ServerSideEncryption: aws.String("AES256"), - ContentType: aws.String("content/type"), - }) - - assert.NoError(t, err) - assert.Equal(t, []string{"PutObject"}, *ops) - assert.NotEqual(t, "", resp.Location) - assert.Equal(t, "", resp.UploadID) - assert.Equal(t, "AES256", val((*args)[0], "ServerSideEncryption")) - assert.Equal(t, "content/type", val((*args)[0], "ContentType")) -} - -func TestUploadOrderSingleFailure(t *testing.T) { - s, ops, _ := loggingSvc(emptyList) - s.Handlers.Send.PushBack(func(r *aws.Request) { - r.HTTPResponse.StatusCode = 400 - }) - mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s}) - resp, err := mgr.Upload(&s3manager.UploadInput{ - Bucket: aws.String("Bucket"), - Key: aws.String("Key"), - Body: bytes.NewReader(buf2MB), - }) - - assert.Error(t, err) - assert.Equal(t, []string{"PutObject"}, *ops) - assert.Nil(t, resp) -} - -func TestUploadOrderZero(t *testing.T) { - s, ops, args := loggingSvc(emptyList) - mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s}) - resp, err := mgr.Upload(&s3manager.UploadInput{ - Bucket: aws.String("Bucket"), - Key: aws.String("Key"), - Body: bytes.NewReader(make([]byte, 0)), - }) - - assert.NoError(t, err) - assert.Equal(t, []string{"PutObject"}, *ops) - assert.NotEqual(t, "", resp.Location) - assert.Equal(t, "", resp.UploadID) - assert.Equal(t, 0, buflen(val((*args)[0], "Body"))) -} - -func TestUploadOrderMultiFailure(t *testing.T) { - s, ops, _ := loggingSvc(emptyList) - s.Handlers.Send.PushBack(func(r *aws.Request) { - switch t := r.Data.(type) { - case *s3.UploadPartOutput: - if *t.ETag == "ETAG2" { - r.HTTPResponse.StatusCode = 400 - } - } - }) - - mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s, Concurrency: 1}) - _, err := mgr.Upload(&s3manager.UploadInput{ - Bucket: aws.String("Bucket"), - Key: aws.String("Key"), - Body: bytes.NewReader(buf12MB), - }) - - assert.Error(t, err) - assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "AbortMultipartUpload"}, *ops) -} - -func TestUploadOrderMultiFailureOnComplete(t *testing.T) { - s, ops, _ := loggingSvc(emptyList) - s.Handlers.Send.PushBack(func(r *aws.Request) { - switch r.Data.(type) { - case *s3.CompleteMultipartUploadOutput: - r.HTTPResponse.StatusCode = 400 - } - }) - - mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s, Concurrency: 1}) - _, err := mgr.Upload(&s3manager.UploadInput{ - Bucket: aws.String("Bucket"), - Key: aws.String("Key"), - Body: bytes.NewReader(buf12MB), - }) - - assert.Error(t, err) - assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", - "UploadPart", "CompleteMultipartUpload", "AbortMultipartUpload"}, *ops) -} - -func TestUploadOrderMultiFailureOnCreate(t *testing.T) { - s, ops, _ := loggingSvc(emptyList) - s.Handlers.Send.PushBack(func(r *aws.Request) { - switch r.Data.(type) { - case *s3.CreateMultipartUploadOutput: - r.HTTPResponse.StatusCode = 400 - } - }) - - mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s}) - _, err := mgr.Upload(&s3manager.UploadInput{ - Bucket: aws.String("Bucket"), - Key: aws.String("Key"), - Body: bytes.NewReader(make([]byte, 1024*1024*12)), - }) - - assert.Error(t, err) - assert.Equal(t, []string{"CreateMultipartUpload"}, *ops) -} - -func TestUploadOrderMultiFailureLeaveParts(t *testing.T) { - s, ops, _ := loggingSvc(emptyList) - s.Handlers.Send.PushBack(func(r *aws.Request) { - switch data := r.Data.(type) { - case *s3.UploadPartOutput: - if *data.ETag == "ETAG2" { - r.HTTPResponse.StatusCode = 400 - } - } - }) - - mgr := s3manager.NewUploader(&s3manager.UploadOptions{ - S3: s, - Concurrency: 1, - LeavePartsOnError: true, - }) - _, err := mgr.Upload(&s3manager.UploadInput{ - Bucket: aws.String("Bucket"), - Key: aws.String("Key"), - Body: bytes.NewReader(make([]byte, 1024*1024*12)), - }) - - assert.Error(t, err) - assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart"}, *ops) -} - -type failreader struct { - times int - failCount int -} - -func (f *failreader) Read(b []byte) (int, error) { - f.failCount++ - if f.failCount >= f.times { - return 0, fmt.Errorf("random failure") - } - return len(b), nil -} - -func TestUploadOrderReadFail1(t *testing.T) { - s, ops, _ := loggingSvc(emptyList) - mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s}) - _, err := mgr.Upload(&s3manager.UploadInput{ - Bucket: aws.String("Bucket"), - Key: aws.String("Key"), - Body: &failreader{times: 1}, - }) - - assert.Equal(t, "ReadRequestBody", err.(awserr.Error).Code()) - assert.EqualError(t, err.(awserr.Error).OrigErr(), "random failure") - assert.Equal(t, []string{}, *ops) -} - -func TestUploadOrderReadFail2(t *testing.T) { - s, ops, _ := loggingSvc([]string{"UploadPart"}) - mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s, Concurrency: 1}) - _, err := mgr.Upload(&s3manager.UploadInput{ - Bucket: aws.String("Bucket"), - Key: aws.String("Key"), - Body: &failreader{times: 2}, - }) - - assert.Equal(t, "ReadRequestBody", err.(awserr.Error).Code()) - assert.EqualError(t, err.(awserr.Error).OrigErr(), "random failure") - assert.Equal(t, []string{"CreateMultipartUpload", "AbortMultipartUpload"}, *ops) -} - -type sizedReader struct { - size int - cur int -} - -func (s *sizedReader) Read(p []byte) (n int, err error) { - if s.cur >= s.size { - return 0, io.EOF - } - - n = len(p) - s.cur += len(p) - if s.cur > s.size { - n -= s.cur - s.size - } - - return -} - -func TestUploadOrderMultiBufferedReader(t *testing.T) { - s, ops, args := loggingSvc(emptyList) - mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s}) - _, err := mgr.Upload(&s3manager.UploadInput{ - Bucket: aws.String("Bucket"), - Key: aws.String("Key"), - Body: &sizedReader{size: 1024 * 1024 * 12}, - }) - - assert.NoError(t, err) - assert.Equal(t, []string{"CreateMultipartUpload", "UploadPart", "UploadPart", "UploadPart", "CompleteMultipartUpload"}, *ops) - - // Part lengths - parts := []int{ - buflen(val((*args)[1], "Body")), - buflen(val((*args)[2], "Body")), - buflen(val((*args)[3], "Body")), - } - sort.Ints(parts) - assert.Equal(t, []int{1024 * 1024 * 2, 1024 * 1024 * 5, 1024 * 1024 * 5}, parts) -} - -func TestUploadOrderMultiBufferedReaderExceedTotalParts(t *testing.T) { - s3manager.MaxUploadParts = 2 - defer func() { s3manager.MaxUploadParts = 10000 }() - s, ops, _ := loggingSvc([]string{"UploadPart"}) - mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s, Concurrency: 1}) - resp, err := mgr.Upload(&s3manager.UploadInput{ - Bucket: aws.String("Bucket"), - Key: aws.String("Key"), - Body: &sizedReader{size: 1024 * 1024 * 12}, - }) - - assert.Error(t, err) - assert.Nil(t, resp) - assert.Equal(t, []string{"CreateMultipartUpload", "AbortMultipartUpload"}, *ops) - - aerr := err.(awserr.Error) - assert.Equal(t, "TotalPartsExceeded", aerr.Code()) - assert.Contains(t, aerr.Message(), "exceeded total allowed parts (2)") -} - -func TestUploadOrderSingleBufferedReader(t *testing.T) { - s, ops, _ := loggingSvc(emptyList) - mgr := s3manager.NewUploader(&s3manager.UploadOptions{S3: s}) - resp, err := mgr.Upload(&s3manager.UploadInput{ - Bucket: aws.String("Bucket"), - Key: aws.String("Key"), - Body: &sizedReader{size: 1024 * 1024 * 2}, - }) - - assert.NoError(t, err) - assert.Equal(t, []string{"PutObject"}, *ops) - assert.NotEqual(t, "", resp.Location) - assert.Equal(t, "", resp.UploadID) -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/service.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/service.go deleted file mode 100644 index 0d25fd2..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/service.go +++ /dev/null @@ -1,57 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. - -package s3 - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/internal/protocol/restxml" - "github.com/aws/aws-sdk-go/internal/signer/v4" -) - -// S3 is a client for Amazon S3. -type S3 struct { - *aws.Service -} - -// Used for custom service initialization logic -var initService func(*aws.Service) - -// Used for custom request initialization logic -var initRequest func(*aws.Request) - -// New returns a new S3 client. -func New(config *aws.Config) *S3 { - service := &aws.Service{ - Config: aws.DefaultConfig.Merge(config), - ServiceName: "s3", - APIVersion: "2006-03-01", - } - service.Initialize() - - // Handlers - service.Handlers.Sign.PushBack(v4.Sign) - service.Handlers.Build.PushBack(restxml.Build) - service.Handlers.Unmarshal.PushBack(restxml.Unmarshal) - service.Handlers.UnmarshalMeta.PushBack(restxml.UnmarshalMeta) - service.Handlers.UnmarshalError.PushBack(restxml.UnmarshalError) - - // Run custom service initialization if present - if initService != nil { - initService(service) - } - - return &S3{service} -} - -// newRequest creates a new request for a S3 operation and runs any -// custom request initialization. -func (c *S3) newRequest(op *aws.Operation, params, data interface{}) *aws.Request { - req := aws.NewRequest(c.Service, op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/sse_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/sse_test.go deleted file mode 100644 index 4e6ecdc..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/sse_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package s3_test - -import ( - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/internal/test/unit" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/stretchr/testify/assert" -) - -var _ = unit.Imported - -func TestSSECustomerKeyOverHTTPError(t *testing.T) { - s := s3.New(&aws.Config{DisableSSL: aws.Bool(true)}) - req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{ - Bucket: aws.String("bucket"), - CopySource: aws.String("bucket/source"), - Key: aws.String("dest"), - SSECustomerKey: aws.String("key"), - }) - err := req.Build() - - assert.Error(t, err) - assert.Equal(t, "ConfigError", err.(awserr.Error).Code()) - assert.Contains(t, err.(awserr.Error).Message(), "cannot send SSE keys over HTTP") -} - -func TestCopySourceSSECustomerKeyOverHTTPError(t *testing.T) { - s := s3.New(&aws.Config{DisableSSL: aws.Bool(true)}) - req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{ - Bucket: aws.String("bucket"), - CopySource: aws.String("bucket/source"), - Key: aws.String("dest"), - CopySourceSSECustomerKey: aws.String("key"), - }) - err := req.Build() - - assert.Error(t, err) - assert.Equal(t, "ConfigError", err.(awserr.Error).Code()) - assert.Contains(t, err.(awserr.Error).Message(), "cannot send SSE keys over HTTP") -} - -func TestComputeSSEKeys(t *testing.T) { - s := s3.New(nil) - req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{ - Bucket: aws.String("bucket"), - CopySource: aws.String("bucket/source"), - Key: aws.String("dest"), - SSECustomerKey: aws.String("key"), - CopySourceSSECustomerKey: aws.String("key"), - }) - err := req.Build() - - assert.NoError(t, err) - assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key")) - assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key")) - assert.Equal(t, "PG4LipwVIkqCKLmpjKFTHQ==", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key-md5")) - assert.Equal(t, "PG4LipwVIkqCKLmpjKFTHQ==", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key-md5")) -} - -func TestComputeSSEKeysShortcircuit(t *testing.T) { - s := s3.New(nil) - req, _ := s.CopyObjectRequest(&s3.CopyObjectInput{ - Bucket: aws.String("bucket"), - CopySource: aws.String("bucket/source"), - Key: aws.String("dest"), - SSECustomerKey: aws.String("key"), - CopySourceSSECustomerKey: aws.String("key"), - SSECustomerKeyMD5: aws.String("MD5"), - CopySourceSSECustomerKeyMD5: aws.String("MD5"), - }) - err := req.Build() - - assert.NoError(t, err) - assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key")) - assert.Equal(t, "a2V5", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key")) - assert.Equal(t, "MD5", req.HTTPRequest.Header.Get("x-amz-server-side-encryption-customer-key-md5")) - assert.Equal(t, "MD5", req.HTTPRequest.Header.Get("x-amz-copy-source-server-side-encryption-customer-key-md5")) -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go deleted file mode 100644 index c27d434..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go +++ /dev/null @@ -1,42 +0,0 @@ -package s3 - -import ( - "encoding/xml" - "io" - "strings" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" -) - -type xmlErrorResponse struct { - XMLName xml.Name `xml:"Error"` - Code string `xml:"Code"` - Message string `xml:"Message"` -} - -func unmarshalError(r *aws.Request) { - defer r.HTTPResponse.Body.Close() - - if r.HTTPResponse.ContentLength == int64(0) { - // No body, use status code to generate an awserr.Error - r.Error = awserr.NewRequestFailure( - awserr.New(strings.Replace(r.HTTPResponse.Status, " ", "", -1), r.HTTPResponse.Status, nil), - r.HTTPResponse.StatusCode, - "", - ) - return - } - - resp := &xmlErrorResponse{} - err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) - if err != nil && err != io.EOF { - r.Error = awserr.New("SerializationError", "failed to decode S3 XML error response", nil) - } else { - r.Error = awserr.NewRequestFailure( - awserr.New(resp.Code, resp.Message, nil), - r.HTTPResponse.StatusCode, - "", - ) - } -} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go b/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go deleted file mode 100644 index ee08d62..0000000 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/unmarshal_error_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package s3_test - -import ( - "bytes" - "io/ioutil" - "net/http" - "testing" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/internal/test/unit" - "github.com/aws/aws-sdk-go/service/s3" - "github.com/stretchr/testify/assert" -) - -var _ = unit.Imported - -var s3StatusCodeErrorTests = []struct { - scode int - status string - body string - code string - message string -}{ - {301, "Moved Permanently", "", "MovedPermanently", "Moved Permanently"}, - {403, "Forbidden", "", "Forbidden", "Forbidden"}, - {400, "Bad Request", "", "BadRequest", "Bad Request"}, - {404, "Not Found", "", "NotFound", "Not Found"}, - {500, "Internal Error", "", "InternalError", "Internal Error"}, -} - -func TestStatusCodeError(t *testing.T) { - for _, test := range s3StatusCodeErrorTests { - s := s3.New(nil) - s.Handlers.Send.Clear() - s.Handlers.Send.PushBack(func(r *aws.Request) { - body := ioutil.NopCloser(bytes.NewReader([]byte(test.body))) - r.HTTPResponse = &http.Response{ - ContentLength: int64(len(test.body)), - StatusCode: test.scode, - Status: test.status, - Body: body, - } - }) - _, err := s.PutBucketACL(&s3.PutBucketACLInput{ - Bucket: aws.String("bucket"), ACL: aws.String("public-read"), - }) - - assert.Error(t, err) - assert.Equal(t, test.code, err.(awserr.Error).Code()) - assert.Equal(t, test.message, err.(awserr.Error).Message()) - } -} diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/.travis.yml b/Godeps/_workspace/src/github.com/spf13/cobra/.travis.yml deleted file mode 100644 index dc43afd..0000000 --- a/Godeps/_workspace/src/github.com/spf13/cobra/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go -go: - - 1.3 - - 1.4.2 - - tip -script: - - go test ./... - - go build diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/README.md b/Godeps/_workspace/src/github.com/spf13/cobra/README.md deleted file mode 100644 index b1fb088..0000000 --- a/Godeps/_workspace/src/github.com/spf13/cobra/README.md +++ /dev/null @@ -1,485 +0,0 @@ -# Cobra - -A Commander for modern go CLI interactions - -[![Build Status](https://travis-ci.org/spf13/cobra.svg)](https://travis-ci.org/spf13/cobra) - -## Overview - -Cobra is a commander providing a simple interface to create powerful modern CLI -interfaces similar to git & go tools. In addition to providing an interface, Cobra -simultaneously provides a controller to organize your application code. - -Inspired by go, go-Commander, gh and subcommand, Cobra improves on these by -providing **fully posix compliant flags** (including short & long versions), -**nesting commands**, and the ability to **define your own help and usage** for any or -all commands. - -Cobra has an exceptionally clean interface and simple design without needless -constructors or initialization methods. - -Applications built with Cobra commands are designed to be as user friendly as -possible. Flags can be placed before or after the command (as long as a -confusing space isn’t provided). Both short and long flags can be used. A -command need not even be fully typed. The shortest unambiguous string will -suffice. Help is automatically generated and available for the application or -for a specific command using either the help command or the --help flag. - -## Concepts - -Cobra is built on a structure of commands & flags. - -**Commands** represent actions and **Flags** are modifiers for those actions. - -In the following example 'server' is a command and 'port' is a flag. - - hugo server --port=1313 - -### Commands - -Command is the central point of the application. Each interaction that -the application supports will be contained in a Command. A command can -have children commands and optionally run an action. - -In the example above 'server' is the command - -A Command has the following structure: - - type Command struct { - Use string // The one-line usage message. - Short string // The short description shown in the 'help' output. - Long string // The long message shown in the 'help ' output. - Run func(cmd *Command, args []string) // Run runs the command. - } - -### Flags - -A Flag is a way to modify the behavior of an command. Cobra supports -fully posix compliant flags as well as the go flag package. -A Cobra command can define flags that persist through to children commands -and flags that are only available to that command. - -In the example above 'port' is the flag. - -Flag functionality is provided by the [pflag -library](https://github.com/ogier/pflag), a fork of the flag standard library -which maintains the same interface while adding posix compliance. - -## Usage - -Cobra works by creating a set of commands and then organizing them into a tree. -The tree defines the structure of the application. - -Once each command is defined with it's corresponding flags, then the -tree is assigned to the commander which is finally executed. - -### Installing -Using Cobra is easy. First use go get to install the latest version -of the library. - - $ go get github.com/spf13/cobra - -Next include cobra in your application. - - import "github.com/spf13/cobra" - -### Create the root command - -The root command represents your binary itself. - -Cobra doesn't require any special constructors. Simply create your commands. - - var HugoCmd = &cobra.Command{ - Use: "hugo", - Short: "Hugo is a very fast static site generator", - Long: `A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - Complete documentation is available at http://hugo.spf13.com`, - Run: func(cmd *cobra.Command, args []string) { - // Do Stuff Here - }, - } - -### Create additional commands - -Additional commands can be defined. - - var versionCmd = &cobra.Command{ - Use: "version", - Short: "Print the version number of Hugo", - Long: `All software has versions. This is Hugo's`, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") - }, - } - -### Attach command to its parent -In this example we are attaching it to the root, but commands can be attached at any level. - - HugoCmd.AddCommand(versionCmd) - -### Assign flags to a command - -Since the flags are defined and used in different locations, we need to -define a variable outside with the correct scope to assign the flag to -work with. - - var Verbose bool - var Source string - -There are two different approaches to assign a flag. - -#### Persistent Flags - -A flag can be 'persistent' meaning that this flag will be available to the -command it's assigned to as well as every command under that command. For -global flags assign a flag as a persistent flag on the root. - - HugoCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") - -#### Local Flags - -A flag can also be assigned locally which will only apply to that specific command. - - HugoCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") - -### Remove a command from its parent - -Removing a command is not a common action in simple programs but it allows 3rd parties to customize an existing command tree. - -In this example, we remove the existing `VersionCmd` command of an existing root command, and we replace it by our own version. - - mainlib.RootCmd.RemoveCommand(mainlib.VersionCmd) - mainlib.RootCmd.AddCommand(versionCmd) - -### Once all commands and flags are defined, Execute the commands - -Execute should be run on the root for clarity, though it can be called on any command. - - HugoCmd.Execute() - -## Example - -In the example below we have defined three commands. Two are at the top level -and one (cmdTimes) is a child of one of the top commands. In this case the root -is not executable meaning that a subcommand is required. This is accomplished -by not providing a 'Run' for the 'rootCmd'. - -We have only defined one flag for a single command. - -More documentation about flags is available at https://github.com/spf13/pflag - - import( - "github.com/spf13/cobra" - "fmt" - "strings" - ) - - func main() { - - var echoTimes int - - var cmdPrint = &cobra.Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `print is for printing anything back to the screen. - For many years people have printed back to the screen. - `, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdEcho = &cobra.Command{ - Use: "echo [string to echo]", - Short: "Echo anything to the screen", - Long: `echo is for echoing anything back. - Echo works a lot like print, except it has a child command. - `, - Run: func(cmd *cobra.Command, args []string) { - fmt.Println("Print: " + strings.Join(args, " ")) - }, - } - - var cmdTimes = &cobra.Command{ - Use: "times [# times] [string to echo]", - Short: "Echo anything to the screen more times", - Long: `echo things multiple times back to the user by providing - a count and a string.`, - Run: func(cmd *cobra.Command, args []string) { - for i:=0; i < echoTimes; i++ { - fmt.Println("Echo: " + strings.Join(args, " ")) - } - }, - } - - cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") - - var rootCmd = &cobra.Command{Use: "app"} - rootCmd.AddCommand(cmdPrint, cmdEcho) - cmdEcho.AddCommand(cmdTimes) - rootCmd.Execute() - } - -For a more complete example of a larger application, please checkout [Hugo](http://hugo.spf13.com) - -## The Help Command - -Cobra automatically adds a help command to your application when you have subcommands. -This will be called when a user runs 'app help'. Additionally help will also -support all other commands as input. Say for instance you have a command called -'create' without any additional configuration cobra will work when 'app help -create' is called. Every command will automatically have the '--help' flag added. - -### Example - -The following output is automatically generated by cobra. Nothing beyond the -command and flag definitions are needed. - - > hugo help - - A Fast and Flexible Static Site Generator built with - love by spf13 and friends in Go. - - Complete documentation is available at http://hugo.spf13.com - - Usage: - hugo [flags] - hugo [command] - - Available Commands: - server :: Hugo runs it's own a webserver to render the files - version :: Print the version number of Hugo - check :: Check content in the source directory - benchmark :: Benchmark hugo by building a site a number of times - help [command] :: Help about any command - - Available Flags: - -b, --base-url="": hostname (and path) to the root eg. http://spf13.com/ - -D, --build-drafts=false: include content marked as draft - --config="": config file (default is path/config.yaml|json|toml) - -d, --destination="": filesystem path to write files to - -s, --source="": filesystem path to read files relative from - --stepAnalysis=false: display memory and timing of different steps of the program - --uglyurls=false: if true, use /filename.html instead of /filename/ - -v, --verbose=false: verbose output - -w, --watch=false: watch filesystem for changes and recreate as needed - - Use "hugo help [command]" for more information about that command. - - - -Help is just a command like any other. There is no special logic or behavior -around it. In fact you can provide your own if you want. - -### Defining your own help - -You can provide your own Help command or you own template for the default command to use. - -The default help command is - - func (c *Command) initHelp() { - if c.helpCommand == nil { - c.helpCommand = &Command{ - Use: "help [command]", - Short: "Help about any command", - Long: `Help provides help for any command in the application. - Simply type ` + c.Name() + ` help [path to command] for full details.`, - Run: c.HelpFunc(), - } - } - c.AddCommand(c.helpCommand) - } - -You can provide your own command, function or template through the following methods. - - command.SetHelpCommand(cmd *Command) - - command.SetHelpFunc(f func(*Command, []string)) - - command.SetHelpTemplate(s string) - -The latter two will also apply to any children commands. - -## Usage - -When the user provides an invalid flag or invalid command Cobra responds by -showing the user the 'usage' - -### Example -You may recognize this from the help above. That's because the default help -embeds the usage as part of it's output. - - Usage: - hugo [flags] - hugo [command] - - Available Commands: - server Hugo runs it's own a webserver to render the files - version Print the version number of Hugo - check Check content in the source directory - benchmark Benchmark hugo by building a site a number of times - help [command] Help about any command - - Available Flags: - -b, --base-url="": hostname (and path) to the root eg. http://spf13.com/ - -D, --build-drafts=false: include content marked as draft - --config="": config file (default is path/config.yaml|json|toml) - -d, --destination="": filesystem path to write files to - -s, --source="": filesystem path to read files relative from - --stepAnalysis=false: display memory and timing of different steps of the program - --uglyurls=false: if true, use /filename.html instead of /filename/ - -v, --verbose=false: verbose output - -w, --watch=false: watch filesystem for changes and recreate as needed - -### Defining your own usage -You can provide your own usage function or template for cobra to use. - -The default usage function is - - return func(c *Command) error { - err := tmpl(c.Out(), c.UsageTemplate(), c) - return err - } - -Like help the function and template are over ridable through public methods. - - command.SetUsageFunc(f func(*Command) error) - - command.SetUsageTemplate(s string) - -## PreRun or PostRun Hooks - -It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistendPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherrited by children if they do not declare their own. These function are run in the following order: - -- `PersistentPreRun` -- `PreRun` -- `Run` -- `PostRun` -- `PersistenPostRun` - -And example of two commands which use all of these features is below. When the subcommand in executed it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun` - -```go -package main - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -func main() { - - var rootCmd = &cobra.Command{ - Use: "root [sub]", - Short: "My root command", - PersistentPreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) - }, - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) - }, - } - - var subCmd = &cobra.Command{ - Use: "sub [no options!]", - Short: "My sub command", - PreRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PreRun with args: %v\n", args) - }, - Run: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd Run with args: %v\n", args) - }, - PostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PostRun with args: %v\n", args) - }, - PersistentPostRun: func(cmd *cobra.Command, args []string) { - fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) - }, - } - - rootCmd.AddCommand(subCmd) - - rootCmd.SetArgs([]string{""}) - _ = rootCmd.Execute() - fmt.Print("\n") - rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) - _ = rootCmd.Execute() -} -``` - -## Generating markdown formatted documentation for your command - -Cobra can generate a markdown formatted document based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Markdown Docs](md_docs.md) - -## Generating bash completions for your command - -Cobra can generate a bash completions file. If you add more information to your command these completions can be amazingly powerful and flexible. Read more about [Bash Completions](bash_completions.md) - -## Debugging - -Cobra provides a ‘DebugFlags’ method on a command which when called will print -out everything Cobra knows about the flags for each command - -### Example - - command.DebugFlags() - -## Release Notes -* **0.9.0** June 17, 2014 - * flags can appears anywhere in the args (provided they are unambiguous) - * --help prints usage screen for app or command - * Prefix matching for commands - * Cleaner looking help and usage output - * Extensive test suite -* **0.8.0** Nov 5, 2013 - * Reworked interface to remove commander completely - * Command now primary structure - * No initialization needed - * Usage & Help templates & functions definable at any level - * Updated Readme -* **0.7.0** Sept 24, 2013 - * Needs more eyes - * Test suite - * Support for automatic error messages - * Support for help command - * Support for printing to any io.Writer instead of os.Stderr - * Support for persistent flags which cascade down tree - * Ready for integration into Hugo -* **0.1.0** Sept 3, 2013 - * Implement first draft - -## ToDo -* Launch proper documentation site - -## Contributing - -1. Fork it -2. Create your feature branch (`git checkout -b my-new-feature`) -3. Commit your changes (`git commit -am 'Add some feature'`) -4. Push to the branch (`git push origin my-new-feature`) -5. Create new Pull Request - -## Contributors - -Names in no particular order: - -* [spf13](https://github.com/spf13) - -## License - -Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) - - -[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/spf13/cobra/trend.png)](https://bitdeli.com/free "Bitdeli Badge") - diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/bash_completions.go b/Godeps/_workspace/src/github.com/spf13/cobra/bash_completions.go deleted file mode 100644 index 3a421bc..0000000 --- a/Godeps/_workspace/src/github.com/spf13/cobra/bash_completions.go +++ /dev/null @@ -1,370 +0,0 @@ -package cobra - -import ( - "bytes" - "fmt" - "os" - "sort" - "strings" - - "github.com/spf13/pflag" -) - -const ( - BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extentions" - BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag" -) - -func preamble(out *bytes.Buffer) { - fmt.Fprintf(out, `#!/bin/bash - - -__debug() -{ - if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then - echo "$*" >> "${BASH_COMP_DEBUG_FILE}" - fi -} - -__index_of_word() -{ - local w word=$1 - shift - index=0 - for w in "$@"; do - [[ $w = "$word" ]] && return - index=$((index+1)) - done - index=-1 -} - -__contains_word() -{ - local w word=$1; shift - for w in "$@"; do - [[ $w = "$word" ]] && return - done - return 1 -} - -__handle_reply() -{ - __debug "${FUNCNAME}" - case $cur in - -*) - compopt -o nospace - local allflags - if [ ${#must_have_one_flag[@]} -ne 0 ]; then - allflags=("${must_have_one_flag[@]}") - else - allflags=("${flags[*]} ${two_word_flags[*]}") - fi - COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") ) - [[ $COMPREPLY == *= ]] || compopt +o nospace - return 0; - ;; - esac - - # check if we are handling a flag with special work handling - local index - __index_of_word "${prev}" "${flags_with_completion[@]}" - if [[ ${index} -ge 0 ]]; then - ${flags_completion[${index}]} - return - fi - - # we are parsing a flag and don't have a special handler, no completion - if [[ ${cur} != "${words[cword]}" ]]; then - return - fi - - local completions - if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then - completions=("${must_have_one_flag[@]}") - elif [[ ${#must_have_one_noun[@]} -ne 0 ]]; then - completions=("${must_have_one_noun[@]}") - else - completions=("${commands[@]}") - fi - COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") ) - - if [[ ${#COMPREPLY[@]} -eq 0 ]]; then - declare -F __custom_func >/dev/null && __custom_func - fi -} - -# The arguments should be in the form "ext1|ext2|extn" -__handle_filename_extension_flag() -{ - local ext="$1" - _filedir "@(${ext})" -} - -__handle_flag() -{ - __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" - - # if a command required a flag, and we found it, unset must_have_one_flag() - local flagname=${words[c]} - # if the word contained an = - if [[ ${words[c]} == *"="* ]]; then - flagname=${flagname%%=*} # strip everything after the = - flagname="${flagname}=" # but put the = back - fi - __debug "${FUNCNAME}: looking for ${flagname}" - if __contains_word "${flagname}" "${must_have_one_flag[@]}"; then - must_have_one_flag=() - fi - - # skip the argument to a two word flag - if __contains_word "${words[c]}" "${two_word_flags[@]}"; then - c=$((c+1)) - # if we are looking for a flags value, don't show commands - if [[ $c -eq $cword ]]; then - commands=() - fi - fi - - # skip the flag itself - c=$((c+1)) - -} - -__handle_noun() -{ - __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" - - if __contains_word "${words[c]}" "${must_have_one_noun[@]}"; then - must_have_one_noun=() - fi - - nouns+=("${words[c]}") - c=$((c+1)) -} - -__handle_command() -{ - __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" - - local next_command - if [[ -n ${last_command} ]]; then - next_command="_${last_command}_${words[c]}" - else - next_command="_${words[c]}" - fi - c=$((c+1)) - __debug "${FUNCNAME}: looking for ${next_command}" - declare -F $next_command >/dev/null && $next_command -} - -__handle_word() -{ - if [[ $c -ge $cword ]]; then - __handle_reply - return - fi - __debug "${FUNCNAME}: c is $c words[c] is ${words[c]}" - if [[ "${words[c]}" == -* ]]; then - __handle_flag - elif __contains_word "${words[c]}" "${commands[@]}"; then - __handle_command - else - __handle_noun - fi - __handle_word -} - -`) -} - -func postscript(out *bytes.Buffer, name string) { - fmt.Fprintf(out, "__start_%s()\n", name) - fmt.Fprintf(out, `{ - local cur prev words cword - _init_completion -s || return - - local c=0 - local flags=() - local two_word_flags=() - local flags_with_completion=() - local flags_completion=() - local commands=("%s") - local must_have_one_flag=() - local must_have_one_noun=() - local last_command - local nouns=() - - __handle_word -} - -`, name) - fmt.Fprintf(out, "complete -F __start_%s %s\n", name, name) - fmt.Fprintf(out, "# ex: ts=4 sw=4 et filetype=sh\n") -} - -func writeCommands(cmd *Command, out *bytes.Buffer) { - fmt.Fprintf(out, " commands=()\n") - for _, c := range cmd.Commands() { - if len(c.Deprecated) > 0 { - continue - } - fmt.Fprintf(out, " commands+=(%q)\n", c.Name()) - } - fmt.Fprintf(out, "\n") -} - -func writeFlagHandler(name string, annotations map[string][]string, out *bytes.Buffer) { - for key, value := range annotations { - switch key { - case BashCompFilenameExt: - fmt.Fprintf(out, " flags_with_completion+=(%q)\n", name) - - if len(value) > 0 { - ext := "__handle_filename_extension_flag " + strings.Join(value, "|") - fmt.Fprintf(out, " flags_completion+=(%q)\n", ext) - } else { - ext := "_filedir" - fmt.Fprintf(out, " flags_completion+=(%q)\n", ext) - } - } - } -} - -func writeShortFlag(flag *pflag.Flag, out *bytes.Buffer) { - b := (flag.Value.Type() == "bool") - name := flag.Shorthand - format := " " - if !b { - format += "two_word_" - } - format += "flags+=(\"-%s\")\n" - fmt.Fprintf(out, format, name) - writeFlagHandler("-"+name, flag.Annotations, out) -} - -func writeFlag(flag *pflag.Flag, out *bytes.Buffer) { - b := (flag.Value.Type() == "bool") - name := flag.Name - format := " flags+=(\"--%s" - if !b { - format += "=" - } - format += "\")\n" - fmt.Fprintf(out, format, name) - writeFlagHandler("--"+name, flag.Annotations, out) -} - -func writeFlags(cmd *Command, out *bytes.Buffer) { - fmt.Fprintf(out, ` flags=() - two_word_flags=() - flags_with_completion=() - flags_completion=() - -`) - cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { - writeFlag(flag, out) - if len(flag.Shorthand) > 0 { - writeShortFlag(flag, out) - } - }) - - fmt.Fprintf(out, "\n") -} - -func writeRequiredFlag(cmd *Command, out *bytes.Buffer) { - fmt.Fprintf(out, " must_have_one_flag=()\n") - flags := cmd.NonInheritedFlags() - flags.VisitAll(func(flag *pflag.Flag) { - for key, _ := range flag.Annotations { - switch key { - case BashCompOneRequiredFlag: - format := " must_have_one_flag+=(\"--%s" - b := (flag.Value.Type() == "bool") - if !b { - format += "=" - } - format += "\")\n" - fmt.Fprintf(out, format, flag.Name) - - if len(flag.Shorthand) > 0 { - fmt.Fprintf(out, " must_have_one_flag+=(\"-%s\")\n", flag.Shorthand) - } - } - } - }) -} - -func writeRequiredNoun(cmd *Command, out *bytes.Buffer) { - fmt.Fprintf(out, " must_have_one_noun=()\n") - sort.Sort(sort.StringSlice(cmd.ValidArgs)) - for _, value := range cmd.ValidArgs { - fmt.Fprintf(out, " must_have_one_noun+=(%q)\n", value) - } -} - -func gen(cmd *Command, out *bytes.Buffer) { - for _, c := range cmd.Commands() { - if len(c.Deprecated) > 0 { - continue - } - gen(c, out) - } - commandName := cmd.CommandPath() - commandName = strings.Replace(commandName, " ", "_", -1) - fmt.Fprintf(out, "_%s()\n{\n", commandName) - fmt.Fprintf(out, " last_command=%q\n", commandName) - writeCommands(cmd, out) - writeFlags(cmd, out) - writeRequiredFlag(cmd, out) - writeRequiredNoun(cmd, out) - fmt.Fprintf(out, "}\n\n") -} - -func (cmd *Command) GenBashCompletion(out *bytes.Buffer) { - preamble(out) - if len(cmd.BashCompletionFunction) > 0 { - fmt.Fprintf(out, "%s\n", cmd.BashCompletionFunction) - } - gen(cmd, out) - postscript(out, cmd.Name()) -} - -func (cmd *Command) GenBashCompletionFile(filename string) error { - out := new(bytes.Buffer) - - cmd.GenBashCompletion(out) - - outFile, err := os.Create(filename) - if err != nil { - return err - } - defer outFile.Close() - - _, err = outFile.Write(out.Bytes()) - if err != nil { - return err - } - return nil -} - -// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag, if it exists. -func (cmd *Command) MarkFlagRequired(name string) error { - return MarkFlagRequired(cmd.Flags(), name) -} - -// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag in the flag set, if it exists. -func MarkFlagRequired(flags *pflag.FlagSet, name string) error { - return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) -} - -// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists. -// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func (cmd *Command) MarkFlagFilename(name string, extensions ...string) error { - return MarkFlagFilename(cmd.Flags(), name, extensions...) -} - -// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists. -// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. -func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { - return flags.SetAnnotation(name, BashCompFilenameExt, extensions) -} diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/bash_completions_test.go b/Godeps/_workspace/src/github.com/spf13/cobra/bash_completions_test.go deleted file mode 100644 index acb6d81..0000000 --- a/Godeps/_workspace/src/github.com/spf13/cobra/bash_completions_test.go +++ /dev/null @@ -1,80 +0,0 @@ -package cobra - -import ( - "bytes" - "fmt" - "os" - "strings" - "testing" -) - -var _ = fmt.Println -var _ = os.Stderr - -func checkOmit(t *testing.T, found, unexpected string) { - if strings.Contains(found, unexpected) { - t.Errorf("Unexpected response.\nGot: %q\nBut should not have!\n", unexpected) - } -} - -func check(t *testing.T, found, expected string) { - if !strings.Contains(found, expected) { - t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) - } -} - -// World worst custom function, just keep telling you to enter hello! -const ( - bash_completion_func = `__custom_func() { -COMPREPLY=( "hello" ) -} -` -) - -func TestBashCompletions(t *testing.T) { - c := initializeWithRootCmd() - cmdEcho.AddCommand(cmdTimes) - c.AddCommand(cmdEcho, cmdPrint, cmdDeprecated) - - // custom completion function - c.BashCompletionFunction = bash_completion_func - - // required flag - c.MarkFlagRequired("introot") - - // valid nouns - validArgs := []string{"pods", "nodes", "services", "replicationControllers"} - c.ValidArgs = validArgs - - // filename - var flagval string - c.Flags().StringVar(&flagval, "filename", "", "Enter a filename") - c.MarkFlagFilename("filename", "json", "yaml", "yml") - - // filename extensions - var flagvalExt string - c.Flags().StringVar(&flagvalExt, "filename-ext", "", "Enter a filename (extension limited)") - c.MarkFlagFilename("filename-ext") - - out := new(bytes.Buffer) - c.GenBashCompletion(out) - str := out.String() - - check(t, str, "_cobra-test") - check(t, str, "_cobra-test_echo") - check(t, str, "_cobra-test_echo_times") - check(t, str, "_cobra-test_print") - - // check for required flags - check(t, str, `must_have_one_flag+=("--introot=")`) - // check for custom completion function - check(t, str, `COMPREPLY=( "hello" )`) - // check for required nouns - check(t, str, `must_have_one_noun+=("pods")`) - // check for filename extension flags - check(t, str, `flags_completion+=("_filedir")`) - // check for filename extension flags - check(t, str, `flags_completion+=("__handle_filename_extension_flag json|yaml|yml")`) - - checkOmit(t, str, cmdDeprecated.Name()) -} diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/cobra.go b/Godeps/_workspace/src/github.com/spf13/cobra/cobra.go deleted file mode 100644 index 78b92b0..0000000 --- a/Godeps/_workspace/src/github.com/spf13/cobra/cobra.go +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright © 2013 Steve Francia . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Commands similar to git, go tools and other modern CLI tools -// inspired by go, go-Commander, gh and subcommand - -package cobra - -import ( - "fmt" - "io" - "reflect" - "strconv" - "strings" - "text/template" -) - -var initializers []func() - -// automatic prefix matching can be a dangerous thing to automatically enable in CLI tools. -// Set this to true to enable it -var EnablePrefixMatching bool = false - -// enables an information splash screen on Windows if the CLI is started from explorer.exe. -var EnableWindowsMouseTrap bool = true - -var MousetrapHelpText string = `This is a command line tool - -You need to open cmd.exe and run it from there. -` - -//OnInitialize takes a series of func() arguments and appends them to a slice of func(). -func OnInitialize(y ...func()) { - for _, x := range y { - initializers = append(initializers, x) - } -} - -//Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, -//Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as -//ints and then compared. -func Gt(a interface{}, b interface{}) bool { - var left, right int64 - av := reflect.ValueOf(a) - - switch av.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - left = int64(av.Len()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - left = av.Int() - case reflect.String: - left, _ = strconv.ParseInt(av.String(), 10, 64) - } - - bv := reflect.ValueOf(b) - - switch bv.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - right = int64(bv.Len()) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - right = bv.Int() - case reflect.String: - right, _ = strconv.ParseInt(bv.String(), 10, 64) - } - - return left > right -} - -//Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. -func Eq(a interface{}, b interface{}) bool { - av := reflect.ValueOf(a) - bv := reflect.ValueOf(b) - - switch av.Kind() { - case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: - panic("Eq called on unsupported type") - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return av.Int() == bv.Int() - case reflect.String: - return av.String() == bv.String() - } - return false -} - -//rpad adds padding to the right of a string -func rpad(s string, padding int) string { - template := fmt.Sprintf("%%-%ds", padding) - return fmt.Sprintf(template, s) -} - -// tmpl executes the given template text on data, writing the result to w. -func tmpl(w io.Writer, text string, data interface{}) error { - t := template.New("top") - t.Funcs(template.FuncMap{ - "trim": strings.TrimSpace, - "rpad": rpad, - "gt": Gt, - "eq": Eq, - }) - template.Must(t.Parse(text)) - return t.Execute(w, data) -} diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/cobra_test.go b/Godeps/_workspace/src/github.com/spf13/cobra/cobra_test.go deleted file mode 100644 index 7cb4917..0000000 --- a/Godeps/_workspace/src/github.com/spf13/cobra/cobra_test.go +++ /dev/null @@ -1,965 +0,0 @@ -package cobra - -import ( - "bytes" - "fmt" - "os" - "reflect" - "runtime" - "strings" - "testing" - - "github.com/spf13/pflag" -) - -var _ = fmt.Println -var _ = os.Stderr - -var tp, te, tt, t1, tr []string -var rootPersPre, echoPre, echoPersPre, timesPersPre []string -var flagb1, flagb2, flagb3, flagbr, flagbp bool -var flags1, flags2a, flags2b, flags3 string -var flagi1, flagi2, flagi3, flagir int -var globalFlag1 bool -var flagEcho, rootcalled bool -var versionUsed int - -const strtwoParentHelp = "help message for parent flag strtwo" -const strtwoChildHelp = "help message for child flag strtwo" - -var cmdPrint = &Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `an absolutely utterly useless command for testing.`, - Run: func(cmd *Command, args []string) { - tp = args - }, -} - -var cmdEcho = &Command{ - Use: "echo [string to echo]", - Aliases: []string{"say"}, - Short: "Echo anything to the screen", - Long: `an utterly useless command for testing.`, - Example: "Just run cobra-test echo", - PersistentPreRun: func(cmd *Command, args []string) { - echoPersPre = args - }, - PreRun: func(cmd *Command, args []string) { - echoPre = args - }, - Run: func(cmd *Command, args []string) { - te = args - }, -} - -var cmdEchoSub = &Command{ - Use: "echosub [string to print]", - Short: "second sub command for echo", - Long: `an absolutely utterly useless command for testing gendocs!.`, - Run: func(cmd *Command, args []string) { - }, -} - -var cmdDeprecated = &Command{ - Use: "deprecated [can't do anything here]", - Short: "A command which is deprecated", - Long: `an absolutely utterly useless command for testing deprecation!.`, - Deprecated: "Please use echo instead", - Run: func(cmd *Command, args []string) { - }, -} - -var cmdTimes = &Command{ - Use: "times [# times] [string to echo]", - Short: "Echo anything to the screen more times", - Long: `a slightly useless command for testing.`, - PersistentPreRun: func(cmd *Command, args []string) { - timesPersPre = args - }, - Run: func(cmd *Command, args []string) { - tt = args - }, -} - -var cmdRootNoRun = &Command{ - Use: "cobra-test", - Short: "The root can run it's own function", - Long: "The root description for help", - PersistentPreRun: func(cmd *Command, args []string) { - rootPersPre = args - }, -} - -var cmdRootSameName = &Command{ - Use: "print", - Short: "Root with the same name as a subcommand", - Long: "The root description for help", -} - -var cmdRootWithRun = &Command{ - Use: "cobra-test", - Short: "The root can run it's own function", - Long: "The root description for help", - Run: func(cmd *Command, args []string) { - tr = args - rootcalled = true - }, -} - -var cmdSubNoRun = &Command{ - Use: "subnorun", - Short: "A subcommand without a Run function", - Long: "A long output about a subcommand without a Run function", -} - -var cmdVersion1 = &Command{ - Use: "version", - Short: "Print the version number", - Long: `First version of the version command`, - Run: func(cmd *Command, args []string) { - versionUsed = 1 - }, -} - -var cmdVersion2 = &Command{ - Use: "version", - Short: "Print the version number", - Long: `Second version of the version command`, - Run: func(cmd *Command, args []string) { - versionUsed = 2 - }, -} - -func flagInit() { - cmdEcho.ResetFlags() - cmdPrint.ResetFlags() - cmdTimes.ResetFlags() - cmdRootNoRun.ResetFlags() - cmdRootSameName.ResetFlags() - cmdRootWithRun.ResetFlags() - cmdSubNoRun.ResetFlags() - cmdRootNoRun.PersistentFlags().StringVarP(&flags2a, "strtwo", "t", "two", strtwoParentHelp) - cmdEcho.Flags().IntVarP(&flagi1, "intone", "i", 123, "help message for flag intone") - cmdTimes.Flags().IntVarP(&flagi2, "inttwo", "j", 234, "help message for flag inttwo") - cmdPrint.Flags().IntVarP(&flagi3, "intthree", "i", 345, "help message for flag intthree") - cmdEcho.PersistentFlags().StringVarP(&flags1, "strone", "s", "one", "help message for flag strone") - cmdEcho.PersistentFlags().BoolVarP(&flagbp, "persistentbool", "p", false, "help message for flag persistentbool") - cmdTimes.PersistentFlags().StringVarP(&flags2b, "strtwo", "t", "2", strtwoChildHelp) - cmdPrint.PersistentFlags().StringVarP(&flags3, "strthree", "s", "three", "help message for flag strthree") - cmdEcho.Flags().BoolVarP(&flagb1, "boolone", "b", true, "help message for flag boolone") - cmdTimes.Flags().BoolVarP(&flagb2, "booltwo", "c", false, "help message for flag booltwo") - cmdPrint.Flags().BoolVarP(&flagb3, "boolthree", "b", true, "help message for flag boolthree") - cmdVersion1.ResetFlags() - cmdVersion2.ResetFlags() -} - -func commandInit() { - cmdEcho.ResetCommands() - cmdPrint.ResetCommands() - cmdTimes.ResetCommands() - cmdRootNoRun.ResetCommands() - cmdRootSameName.ResetCommands() - cmdRootWithRun.ResetCommands() - cmdSubNoRun.ResetCommands() -} - -func initialize() *Command { - tt, tp, te = nil, nil, nil - rootPersPre, echoPre, echoPersPre, timesPersPre = nil, nil, nil, nil - - var c = cmdRootNoRun - flagInit() - commandInit() - return c -} - -func initializeWithSameName() *Command { - tt, tp, te = nil, nil, nil - rootPersPre, echoPre, echoPersPre, timesPersPre = nil, nil, nil, nil - var c = cmdRootSameName - flagInit() - commandInit() - return c -} - -func initializeWithRootCmd() *Command { - cmdRootWithRun.ResetCommands() - tt, tp, te, tr, rootcalled = nil, nil, nil, nil, false - flagInit() - cmdRootWithRun.Flags().BoolVarP(&flagbr, "boolroot", "b", false, "help message for flag boolroot") - cmdRootWithRun.Flags().IntVarP(&flagir, "introot", "i", 321, "help message for flag introot") - commandInit() - return cmdRootWithRun -} - -type resulter struct { - Error error - Output string - Command *Command -} - -func fullSetupTest(input string) resulter { - c := initializeWithRootCmd() - - return fullTester(c, input) -} - -func noRRSetupTest(input string) resulter { - c := initialize() - - return fullTester(c, input) -} - -func rootOnlySetupTest(input string) resulter { - c := initializeWithRootCmd() - - return simpleTester(c, input) -} - -func simpleTester(c *Command, input string) resulter { - buf := new(bytes.Buffer) - // Testing flag with invalid input - c.SetOutput(buf) - c.SetArgs(strings.Split(input, " ")) - - err := c.Execute() - output := buf.String() - - return resulter{err, output, c} -} - -func fullTester(c *Command, input string) resulter { - buf := new(bytes.Buffer) - // Testing flag with invalid input - c.SetOutput(buf) - cmdEcho.AddCommand(cmdTimes) - c.AddCommand(cmdPrint, cmdEcho, cmdSubNoRun, cmdDeprecated) - c.SetArgs(strings.Split(input, " ")) - - err := c.Execute() - output := buf.String() - - return resulter{err, output, c} -} - -func logErr(t *testing.T, found, expected string) { - out := new(bytes.Buffer) - - _, _, line, ok := runtime.Caller(2) - if ok { - fmt.Fprintf(out, "Line: %d ", line) - } - fmt.Fprintf(out, "Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) - t.Errorf(out.String()) -} - -func checkResultContains(t *testing.T, x resulter, check string) { - if !strings.Contains(x.Output, check) { - logErr(t, x.Output, check) - } -} - -func checkResultOmits(t *testing.T, x resulter, check string) { - if strings.Contains(x.Output, check) { - logErr(t, x.Output, check) - } -} - -func checkOutputContains(t *testing.T, c *Command, check string) { - buf := new(bytes.Buffer) - c.SetOutput(buf) - c.Execute() - - if !strings.Contains(buf.String(), check) { - logErr(t, buf.String(), check) - } -} - -func TestSingleCommand(t *testing.T) { - noRRSetupTest("print one two") - - if te != nil || tt != nil { - t.Error("Wrong command called") - } - if tp == nil { - t.Error("Wrong command called") - } - if strings.Join(tp, " ") != "one two" { - t.Error("Command didn't parse correctly") - } -} - -func TestChildCommand(t *testing.T) { - noRRSetupTest("echo times one two") - - if te != nil || tp != nil { - t.Error("Wrong command called") - } - if tt == nil { - t.Error("Wrong command called") - } - if strings.Join(tt, " ") != "one two" { - t.Error("Command didn't parse correctly") - } -} - -func TestCommandAlias(t *testing.T) { - noRRSetupTest("say times one two") - - if te != nil || tp != nil { - t.Error("Wrong command called") - } - if tt == nil { - t.Error("Wrong command called") - } - if strings.Join(tt, " ") != "one two" { - t.Error("Command didn't parse correctly") - } -} - -func TestPrefixMatching(t *testing.T) { - EnablePrefixMatching = true - noRRSetupTest("ech times one two") - - if te != nil || tp != nil { - t.Error("Wrong command called") - } - if tt == nil { - t.Error("Wrong command called") - } - if strings.Join(tt, " ") != "one two" { - t.Error("Command didn't parse correctly") - } - - EnablePrefixMatching = false -} - -func TestNoPrefixMatching(t *testing.T) { - EnablePrefixMatching = false - - noRRSetupTest("ech times one two") - - if !(tt == nil && te == nil && tp == nil) { - t.Error("Wrong command called") - } -} - -func TestAliasPrefixMatching(t *testing.T) { - EnablePrefixMatching = true - noRRSetupTest("sa times one two") - - if te != nil || tp != nil { - t.Error("Wrong command called") - } - if tt == nil { - t.Error("Wrong command called") - } - if strings.Join(tt, " ") != "one two" { - t.Error("Command didn't parse correctly") - } - EnablePrefixMatching = false -} - -func TestChildSameName(t *testing.T) { - c := initializeWithSameName() - c.AddCommand(cmdPrint, cmdEcho) - c.SetArgs(strings.Split("print one two", " ")) - c.Execute() - - if te != nil || tt != nil { - t.Error("Wrong command called") - } - if tp == nil { - t.Error("Wrong command called") - } - if strings.Join(tp, " ") != "one two" { - t.Error("Command didn't parse correctly") - } -} - -func TestGrandChildSameName(t *testing.T) { - c := initializeWithSameName() - cmdTimes.AddCommand(cmdPrint) - c.AddCommand(cmdTimes) - c.SetArgs(strings.Split("times print one two", " ")) - c.Execute() - - if te != nil || tt != nil { - t.Error("Wrong command called") - } - if tp == nil { - t.Error("Wrong command called") - } - if strings.Join(tp, " ") != "one two" { - t.Error("Command didn't parse correctly") - } -} - -func TestFlagLong(t *testing.T) { - noRRSetupTest("echo --intone=13 something here") - - if strings.Join(te, " ") != "something here" { - t.Errorf("flags didn't leave proper args remaining..%s given", te) - } - if flagi1 != 13 { - t.Errorf("int flag didn't get correct value, had %d", flagi1) - } - if flagi2 != 234 { - t.Errorf("default flag value changed, 234 expected, %d given", flagi2) - } -} - -func TestFlagShort(t *testing.T) { - noRRSetupTest("echo -i13 something here") - - if strings.Join(te, " ") != "something here" { - t.Errorf("flags didn't leave proper args remaining..%s given", te) - } - if flagi1 != 13 { - t.Errorf("int flag didn't get correct value, had %d", flagi1) - } - if flagi2 != 234 { - t.Errorf("default flag value changed, 234 expected, %d given", flagi2) - } - - noRRSetupTest("echo -i 13 something here") - - if strings.Join(te, " ") != "something here" { - t.Errorf("flags didn't leave proper args remaining..%s given", te) - } - if flagi1 != 13 { - t.Errorf("int flag didn't get correct value, had %d", flagi1) - } - if flagi2 != 234 { - t.Errorf("default flag value changed, 234 expected, %d given", flagi2) - } - - noRRSetupTest("print -i99 one two") - - if strings.Join(tp, " ") != "one two" { - t.Errorf("flags didn't leave proper args remaining..%s given", tp) - } - if flagi3 != 99 { - t.Errorf("int flag didn't get correct value, had %d", flagi3) - } - if flagi1 != 123 { - t.Errorf("default flag value changed on different command with same shortname, 234 expected, %d given", flagi2) - } -} - -func TestChildCommandFlags(t *testing.T) { - noRRSetupTest("echo times -j 99 one two") - - if strings.Join(tt, " ") != "one two" { - t.Errorf("flags didn't leave proper args remaining..%s given", tt) - } - - // Testing with flag that shouldn't be persistent - r := noRRSetupTest("echo times -j 99 -i77 one two") - - if r.Error == nil { - t.Errorf("invalid flag should generate error") - } - - if !strings.Contains(r.Output, "unknown shorthand") { - t.Errorf("Wrong error message displayed, \n %s", r.Output) - } - - if flagi2 != 99 { - t.Errorf("flag value should be 99, %d given", flagi2) - } - - if flagi1 != 123 { - t.Errorf("unset flag should have default value, expecting 123, given %d", flagi1) - } - - // Testing with flag only existing on child - r = noRRSetupTest("echo -j 99 -i77 one two") - - if r.Error == nil { - t.Errorf("invalid flag should generate error") - } - - if !strings.Contains(r.Output, "unknown shorthand flag") { - t.Errorf("Wrong error message displayed, \n %s", r.Output) - } - - // Testing with persistent flag overwritten by child - noRRSetupTest("echo times --strtwo=child one two") - - if flags2b != "child" { - t.Errorf("flag value should be child, %s given", flags2b) - } - - if flags2a != "two" { - t.Errorf("unset flag should have default value, expecting two, given %s", flags2a) - } - - // Testing flag with invalid input - r = noRRSetupTest("echo -i10E") - - if r.Error == nil { - t.Errorf("invalid input should generate error") - } - - if !strings.Contains(r.Output, "invalid argument \"10E\" for i10E") { - t.Errorf("Wrong error message displayed, \n %s", r.Output) - } -} - -func TestTrailingCommandFlags(t *testing.T) { - x := fullSetupTest("echo two -x") - - if x.Error == nil { - t.Errorf("invalid flag should generate error") - } -} - -func TestInvalidSubcommandFlags(t *testing.T) { - cmd := initializeWithRootCmd() - cmd.AddCommand(cmdTimes) - - result := simpleTester(cmd, "times --inttwo=2 --badflag=bar") - - checkResultContains(t, result, "unknown flag: --badflag") - - if strings.Contains(result.Output, "unknown flag: --inttwo") { - t.Errorf("invalid --badflag flag shouldn't fail on 'unknown' --inttwo flag") - } - -} - -func TestSubcommandArgEvaluation(t *testing.T) { - cmd := initializeWithRootCmd() - - first := &Command{ - Use: "first", - Run: func(cmd *Command, args []string) { - }, - } - cmd.AddCommand(first) - - second := &Command{ - Use: "second", - Run: func(cmd *Command, args []string) { - fmt.Fprintf(cmd.Out(), "%v", args) - }, - } - first.AddCommand(second) - - result := simpleTester(cmd, "first second first third") - - expectedOutput := fmt.Sprintf("%v", []string{"first third"}) - if result.Output != expectedOutput { - t.Errorf("exptected %v, got %v", expectedOutput, result.Output) - } -} - -func TestPersistentFlags(t *testing.T) { - fullSetupTest("echo -s something -p more here") - - // persistentFlag should act like normal flag on it's own command - if strings.Join(te, " ") != "more here" { - t.Errorf("flags didn't leave proper args remaining..%s given", te) - } - if flags1 != "something" { - t.Errorf("string flag didn't get correct value, had %v", flags1) - } - if !flagbp { - t.Errorf("persistent bool flag not parsed correctly. Expected true, had %v", flagbp) - } - - // persistentFlag should act like normal flag on it's own command - fullSetupTest("echo times -s again -c -p test here") - - if strings.Join(tt, " ") != "test here" { - t.Errorf("flags didn't leave proper args remaining..%s given", tt) - } - - if flags1 != "again" { - t.Errorf("string flag didn't get correct value, had %v", flags1) - } - - if !flagb2 { - t.Errorf("local flag not parsed correctly. Expected true, had %v", flagb2) - } - if !flagbp { - t.Errorf("persistent bool flag not parsed correctly. Expected true, had %v", flagbp) - } -} - -func TestHelpCommand(t *testing.T) { - x := fullSetupTest("help") - checkResultContains(t, x, cmdRootWithRun.Long) - - x = fullSetupTest("help echo") - checkResultContains(t, x, cmdEcho.Long) - - x = fullSetupTest("help echo times") - checkResultContains(t, x, cmdTimes.Long) -} - -func TestChildCommandHelp(t *testing.T) { - c := noRRSetupTest("print --help") - checkResultContains(t, c, strtwoParentHelp) - r := noRRSetupTest("echo times --help") - checkResultContains(t, r, strtwoChildHelp) -} - -func TestNonRunChildHelp(t *testing.T) { - x := noRRSetupTest("subnorun") - checkResultContains(t, x, cmdSubNoRun.Long) -} - -func TestRunnableRootCommand(t *testing.T) { - fullSetupTest("") - - if rootcalled != true { - t.Errorf("Root Function was not called") - } -} - -func TestRunnableRootCommandNilInput(t *testing.T) { - empty_arg := make([]string, 0) - c := initializeWithRootCmd() - - buf := new(bytes.Buffer) - // Testing flag with invalid input - c.SetOutput(buf) - cmdEcho.AddCommand(cmdTimes) - c.AddCommand(cmdPrint, cmdEcho) - c.SetArgs(empty_arg) - - c.Execute() - - if rootcalled != true { - t.Errorf("Root Function was not called") - } -} - -func TestRunnableRootCommandEmptyInput(t *testing.T) { - args := make([]string, 3) - args[0] = "" - args[1] = "--introot=12" - args[2] = "" - c := initializeWithRootCmd() - - buf := new(bytes.Buffer) - // Testing flag with invalid input - c.SetOutput(buf) - cmdEcho.AddCommand(cmdTimes) - c.AddCommand(cmdPrint, cmdEcho) - c.SetArgs(args) - - c.Execute() - - if rootcalled != true { - t.Errorf("Root Function was not called.\n\nOutput was:\n\n%s\n", buf) - } -} - -func TestInvalidSubcommandWhenArgsAllowed(t *testing.T) { - fullSetupTest("echo invalid-sub") - - if te[0] != "invalid-sub" { - t.Errorf("Subcommand didn't work...") - } -} - -func TestRootFlags(t *testing.T) { - fullSetupTest("-i 17 -b") - - if flagbr != true { - t.Errorf("flag value should be true, %v given", flagbr) - } - - if flagir != 17 { - t.Errorf("flag value should be 17, %d given", flagir) - } -} - -func TestRootHelp(t *testing.T) { - x := fullSetupTest("--help") - - checkResultContains(t, x, "Available Commands:") - checkResultContains(t, x, "for more information about a command") - - if strings.Contains(x.Output, "unknown flag: --help") { - t.Errorf("--help shouldn't trigger an error, Got: \n %s", x.Output) - } - - if strings.Contains(x.Output, cmdEcho.Use) { - t.Errorf("--help shouldn't display subcommand's usage, Got: \n %s", x.Output) - } - - x = fullSetupTest("echo --help") - - if strings.Contains(x.Output, cmdTimes.Use) { - t.Errorf("--help shouldn't display subsubcommand's usage, Got: \n %s", x.Output) - } - - checkResultContains(t, x, "Available Commands:") - checkResultContains(t, x, "for more information about a command") - - if strings.Contains(x.Output, "unknown flag: --help") { - t.Errorf("--help shouldn't trigger an error, Got: \n %s", x.Output) - } - -} - -func TestFlagAccess(t *testing.T) { - initialize() - - local := cmdTimes.LocalFlags() - inherited := cmdTimes.InheritedFlags() - - for _, f := range []string{"inttwo", "strtwo", "booltwo"} { - if local.Lookup(f) == nil { - t.Errorf("LocalFlags expected to contain %s, Got: nil", f) - } - } - if inherited.Lookup("strone") == nil { - t.Errorf("InheritedFlags expected to contain strone, Got: nil") - } - if inherited.Lookup("strtwo") != nil { - t.Errorf("InheritedFlags shouldn not contain overwritten flag strtwo") - - } -} - -func TestNoNRunnableRootCommandNilInput(t *testing.T) { - args := make([]string, 0) - c := initialize() - - buf := new(bytes.Buffer) - // Testing flag with invalid input - c.SetOutput(buf) - cmdEcho.AddCommand(cmdTimes) - c.AddCommand(cmdPrint, cmdEcho) - c.SetArgs(args) - - c.Execute() - - if !strings.Contains(buf.String(), cmdRootNoRun.Long) { - t.Errorf("Expected to get help output, Got: \n %s", buf) - } -} - -func TestRootNoCommandHelp(t *testing.T) { - x := rootOnlySetupTest("--help") - - checkResultOmits(t, x, "Available Commands:") - checkResultOmits(t, x, "for more information about a command") - - if strings.Contains(x.Output, "unknown flag: --help") { - t.Errorf("--help shouldn't trigger an error, Got: \n %s", x.Output) - } - - x = rootOnlySetupTest("echo --help") - - checkResultOmits(t, x, "Available Commands:") - checkResultOmits(t, x, "for more information about a command") - - if strings.Contains(x.Output, "unknown flag: --help") { - t.Errorf("--help shouldn't trigger an error, Got: \n %s", x.Output) - } -} - -func TestRootUnknownCommand(t *testing.T) { - r := noRRSetupTest("bogus") - s := "Error: unknown command \"bogus\" for \"cobra-test\"\nRun 'cobra-test --help' for usage.\n" - - if r.Output != s { - t.Errorf("Unexpected response.\nExpecting to be:\n %q\nGot:\n %q\n", s, r.Output) - } - - r = noRRSetupTest("--strtwo=a bogus") - if r.Output != s { - t.Errorf("Unexpected response.\nExpecting to be:\n %q\nGot:\n %q\n", s, r.Output) - } -} - -func TestFlagsBeforeCommand(t *testing.T) { - // short without space - x := fullSetupTest("-i10 echo") - if x.Error != nil { - t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error) - } - - // short (int) with equals - // It appears that pflags doesn't support this... - // Commenting out until support can be added - - //x = noRRSetupTest("echo -i=10") - //if x.Error != nil { - //t.Errorf("Valid Input shouldn't have errors, got:\n %s", x.Error) - //} - - // long with equals - x = noRRSetupTest("--intone=123 echo one two") - if x.Error != nil { - t.Errorf("Valid Input shouldn't have errors, got:\n %s", x.Error) - } - - // With parsing error properly reported - x = fullSetupTest("-i10E echo") - if !strings.Contains(x.Output, "invalid argument \"10E\" for i10E") { - t.Errorf("Wrong error message displayed, \n %s", x.Output) - } - - //With quotes - x = fullSetupTest("-s=\"walking\" echo") - if x.Error != nil { - t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error) - } - - //With quotes and space - x = fullSetupTest("-s=\"walking fast\" echo") - if x.Error != nil { - t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error) - } - - //With inner quote - x = fullSetupTest("-s=\"walking \\\"Inner Quote\\\" fast\" echo") - if x.Error != nil { - t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error) - } - - //With quotes and space - x = fullSetupTest("-s=\"walking \\\"Inner Quote\\\" fast\" echo") - if x.Error != nil { - t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error) - } - -} - -func TestRemoveCommand(t *testing.T) { - versionUsed = 0 - c := initializeWithRootCmd() - c.AddCommand(cmdVersion1) - c.RemoveCommand(cmdVersion1) - x := fullTester(c, "version") - if x.Error == nil { - t.Errorf("Removed command should not have been called\n") - return - } -} - -func TestCommandWithoutSubcommands(t *testing.T) { - c := initializeWithRootCmd() - - x := simpleTester(c, "") - if x.Error != nil { - t.Errorf("Calling command without subcommands should not have error: %v", x.Error) - return - } -} - -func TestCommandWithoutSubcommandsWithArg(t *testing.T) { - c := initializeWithRootCmd() - expectedArgs := []string{"arg"} - - x := simpleTester(c, "arg") - if x.Error != nil { - t.Errorf("Calling command without subcommands but with arg should not have error: %v", x.Error) - return - } - if !reflect.DeepEqual(expectedArgs, tr) { - t.Errorf("Calling command without subcommands but with arg has wrong args: expected: %v, actual: %v", expectedArgs, tr) - return - } -} - -func TestReplaceCommandWithRemove(t *testing.T) { - versionUsed = 0 - c := initializeWithRootCmd() - c.AddCommand(cmdVersion1) - c.RemoveCommand(cmdVersion1) - c.AddCommand(cmdVersion2) - x := fullTester(c, "version") - if x.Error != nil { - t.Errorf("Valid Input shouldn't have errors, got:\n %q", x.Error) - return - } - if versionUsed == 1 { - t.Errorf("Removed command shouldn't be called\n") - } - if versionUsed != 2 { - t.Errorf("Replacing command should have been called but didn't\n") - } -} - -func TestDeprecatedSub(t *testing.T) { - c := fullSetupTest("deprecated") - - checkResultContains(t, c, cmdDeprecated.Deprecated) -} - -func TestPreRun(t *testing.T) { - noRRSetupTest("echo one two") - if echoPre == nil || echoPersPre == nil { - t.Error("PreRun or PersistentPreRun not called") - } - if rootPersPre != nil || timesPersPre != nil { - t.Error("Wrong *Pre functions called!") - } - - noRRSetupTest("echo times one two") - if timesPersPre == nil { - t.Error("PreRun or PersistentPreRun not called") - } - if echoPre != nil || echoPersPre != nil || rootPersPre != nil { - t.Error("Wrong *Pre functions called!") - } - - noRRSetupTest("print one two") - if rootPersPre == nil { - t.Error("Parent PersistentPreRun not called but should not have been") - } - if echoPre != nil || echoPersPre != nil || timesPersPre != nil { - t.Error("Wrong *Pre functions called!") - } -} - -// Check if cmdEchoSub gets PersistentPreRun from rootCmd even if is added last -func TestPeristentPreRunPropagation(t *testing.T) { - rootCmd := initialize() - - // First add the cmdEchoSub to cmdPrint - cmdPrint.AddCommand(cmdEchoSub) - // Now add cmdPrint to rootCmd - rootCmd.AddCommand(cmdPrint) - - rootCmd.SetArgs(strings.Split("print echosub lala", " ")) - rootCmd.Execute() - - if rootPersPre == nil || len(rootPersPre) == 0 || rootPersPre[0] != "lala" { - t.Error("RootCmd PersistentPreRun not called but should have been") - } -} - -func TestGlobalNormFuncPropagation(t *testing.T) { - normFunc := func(f *pflag.FlagSet, name string) pflag.NormalizedName { - return pflag.NormalizedName(name) - } - - rootCmd := initialize() - rootCmd.SetGlobalNormalizationFunc(normFunc) - if reflect.ValueOf(normFunc) != reflect.ValueOf(rootCmd.GlobalNormalizationFunc()) { - t.Error("rootCmd seems to have a wrong normalization function") - } - - // First add the cmdEchoSub to cmdPrint - cmdPrint.AddCommand(cmdEchoSub) - if cmdPrint.GlobalNormalizationFunc() != nil && cmdEchoSub.GlobalNormalizationFunc() != nil { - t.Error("cmdPrint and cmdEchoSub should had no normalization functions") - } - - // Now add cmdPrint to rootCmd - rootCmd.AddCommand(cmdPrint) - if reflect.ValueOf(cmdPrint.GlobalNormalizationFunc()).Pointer() != reflect.ValueOf(rootCmd.GlobalNormalizationFunc()).Pointer() || - reflect.ValueOf(cmdEchoSub.GlobalNormalizationFunc()).Pointer() != reflect.ValueOf(rootCmd.GlobalNormalizationFunc()).Pointer() { - t.Error("cmdPrint and cmdEchoSub should had the normalization function of rootCmd") - } -} diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/command_test.go b/Godeps/_workspace/src/github.com/spf13/cobra/command_test.go deleted file mode 100644 index 477d84e..0000000 --- a/Godeps/_workspace/src/github.com/spf13/cobra/command_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package cobra - -import ( - "reflect" - "testing" -) - -func TestStripFlags(t *testing.T) { - tests := []struct { - input []string - output []string - }{ - { - []string{"foo", "bar"}, - []string{"foo", "bar"}, - }, - { - []string{"foo", "--bar", "-b"}, - []string{"foo"}, - }, - { - []string{"-b", "foo", "--bar", "bar"}, - []string{}, - }, - { - []string{"-i10", "echo"}, - []string{"echo"}, - }, - { - []string{"-i=10", "echo"}, - []string{"echo"}, - }, - { - []string{"--int=100", "echo"}, - []string{"echo"}, - }, - { - []string{"-ib", "echo", "-bfoo", "baz"}, - []string{"echo", "baz"}, - }, - { - []string{"-i=baz", "bar", "-i", "foo", "blah"}, - []string{"bar", "blah"}, - }, - { - []string{"--int=baz", "-bbar", "-i", "foo", "blah"}, - []string{"blah"}, - }, - { - []string{"--cat", "bar", "-i", "foo", "blah"}, - []string{"bar", "blah"}, - }, - { - []string{"-c", "bar", "-i", "foo", "blah"}, - []string{"bar", "blah"}, - }, - { - []string{"--persist", "bar"}, - []string{"bar"}, - }, - { - []string{"-p", "bar"}, - []string{"bar"}, - }, - } - - cmdPrint := &Command{ - Use: "print [string to print]", - Short: "Print anything to the screen", - Long: `an utterly useless command for testing.`, - Run: func(cmd *Command, args []string) { - tp = args - }, - } - - var flagi int - var flagstr string - var flagbool bool - cmdPrint.PersistentFlags().BoolVarP(&flagbool, "persist", "p", false, "help for persistent one") - cmdPrint.Flags().IntVarP(&flagi, "int", "i", 345, "help message for flag int") - cmdPrint.Flags().StringVarP(&flagstr, "bar", "b", "bar", "help message for flag string") - cmdPrint.Flags().BoolVarP(&flagbool, "cat", "c", false, "help message for flag bool") - - for _, test := range tests { - output := stripFlags(test.input, cmdPrint) - if !reflect.DeepEqual(test.output, output) { - t.Errorf("expected: %v, got: %v", test.output, output) - } - } -} diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/md_docs.go b/Godeps/_workspace/src/github.com/spf13/cobra/md_docs.go deleted file mode 100644 index 6092c85..0000000 --- a/Godeps/_workspace/src/github.com/spf13/cobra/md_docs.go +++ /dev/null @@ -1,138 +0,0 @@ -//Copyright 2015 Red Hat Inc. All rights reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package cobra - -import ( - "bytes" - "fmt" - "os" - "sort" - "strings" - "time" -) - -func printOptions(out *bytes.Buffer, cmd *Command, name string) { - flags := cmd.NonInheritedFlags() - flags.SetOutput(out) - if flags.HasFlags() { - fmt.Fprintf(out, "### Options\n\n```\n") - flags.PrintDefaults() - fmt.Fprintf(out, "```\n\n") - } - - parentFlags := cmd.InheritedFlags() - parentFlags.SetOutput(out) - if parentFlags.HasFlags() { - fmt.Fprintf(out, "### Options inherited from parent commands\n\n```\n") - parentFlags.PrintDefaults() - fmt.Fprintf(out, "```\n\n") - } -} - -type byName []*Command - -func (s byName) Len() int { return len(s) } -func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } - -func GenMarkdown(cmd *Command, out *bytes.Buffer) { - GenMarkdownCustom(cmd, out, func(s string) string { return s }) -} - -func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) { - name := cmd.CommandPath() - - short := cmd.Short - long := cmd.Long - if len(long) == 0 { - long = short - } - - fmt.Fprintf(out, "## %s\n\n", name) - fmt.Fprintf(out, "%s\n\n", short) - fmt.Fprintf(out, "### Synopsis\n\n") - fmt.Fprintf(out, "\n%s\n\n", long) - - if cmd.Runnable() { - fmt.Fprintf(out, "```\n%s\n```\n\n", cmd.UseLine()) - } - - if len(cmd.Example) > 0 { - fmt.Fprintf(out, "### Examples\n\n") - fmt.Fprintf(out, "```\n%s\n```\n\n", cmd.Example) - } - - printOptions(out, cmd, name) - - if len(cmd.Commands()) > 0 || cmd.HasParent() { - fmt.Fprintf(out, "### SEE ALSO\n") - if cmd.HasParent() { - parent := cmd.Parent() - pname := parent.CommandPath() - link := pname + ".md" - link = strings.Replace(link, " ", "_", -1) - fmt.Fprintf(out, "* [%s](%s)\t - %s\n", pname, linkHandler(link), parent.Short) - } - - children := cmd.Commands() - sort.Sort(byName(children)) - - for _, child := range children { - if len(child.Deprecated) > 0 { - continue - } - cname := name + " " + child.Name() - link := cname + ".md" - link = strings.Replace(link, " ", "_", -1) - fmt.Fprintf(out, "* [%s](%s)\t - %s\n", cname, linkHandler(link), child.Short) - } - fmt.Fprintf(out, "\n") - } - - fmt.Fprintf(out, "###### Auto generated by spf13/cobra at %s\n", time.Now().UTC()) -} - -func GenMarkdownTree(cmd *Command, dir string) { - identity := func(s string) string { return s } - emptyStr := func(s string) string { return "" } - GenMarkdownTreeCustom(cmd, dir, emptyStr, identity) -} - -func GenMarkdownTreeCustom(cmd *Command, dir string, filePrepender func(string) string, linkHandler func(string) string) { - for _, c := range cmd.Commands() { - GenMarkdownTreeCustom(c, dir, filePrepender, linkHandler) - } - out := new(bytes.Buffer) - - GenMarkdownCustom(cmd, out, linkHandler) - - filename := cmd.CommandPath() - filename = dir + strings.Replace(filename, " ", "_", -1) + ".md" - outFile, err := os.Create(filename) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - defer outFile.Close() - _, err = outFile.WriteString(filePrepender(filename)) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - _, err = outFile.Write(out.Bytes()) - if err != nil { - fmt.Println(err) - os.Exit(1) - } -} diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/md_docs.md b/Godeps/_workspace/src/github.com/spf13/cobra/md_docs.md deleted file mode 100644 index 3a0d55a..0000000 --- a/Godeps/_workspace/src/github.com/spf13/cobra/md_docs.md +++ /dev/null @@ -1,81 +0,0 @@ -# Generating Markdown Docs For Your Own cobra.Command - -## Generate markdown docs for the entire command tree - -This program can actually generate docs for the kubectl command in the kubernetes project - -```go -package main - -import ( - "io/ioutil" - "os" - - "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd" - "github.com/spf13/cobra" -) - -func main() { - kubectl := cmd.NewFactory(nil).NewKubectlCommand(os.Stdin, ioutil.Discard, ioutil.Discard) - cobra.GenMarkdownTree(kubectl, "./") -} -``` - -This will generate a whole series of files, one for each command in the tree, in the directory specified (in this case "./") - -## Generate markdown docs for a single command - -You may wish to have more control over the output, or only generate for a single command, instead of the entire command tree. If this is the case you may prefer to `GenMarkdown` instead of `GenMarkdownTree` - -```go - out := new(bytes.Buffer) - cobra.GenMarkdown(cmd, out) -``` - -This will write the markdown doc for ONLY "cmd" into the out, buffer. - -## Customize the output - -Both `GenMarkdown` and `GenMarkdownTree` have alternate versions with callbacks to get some control of the output: - -```go -func GenMarkdownTreeCustom(cmd *Command, dir string, filePrepender func(string) string, linkHandler func(string) string) { - //... -} -``` - -```go -func GenMarkdownCustom(cmd *Command, out *bytes.Buffer, linkHandler func(string) string) { - //... -} -``` - -The `filePrepender` will prepend the return value given the full filepath to the rendered Markdown file. A common use case is to add front matter to use the generated documentation with [Hugo](http://gohugo.io/): - -```go -const fmTemplate = `--- -date: %s -title: "%s" -slug: %s -url: %s ---- -` - -filePrepender := func(filename string) string { - now := time.Now().Format(time.RFC3339) - name := filepath.Base(filename) - base := strings.TrimSuffix(name, path.Ext(name)) - url := "/commands/" + strings.ToLower(base) + "/" - return fmt.Sprintf(fmTemplate, now, strings.Replace(base, "_", " ", -1), base, url) -} -``` - -The `linkHandler` can be used to customize the rendered internal links to the commands, given a filename: - -```go -linkHandler := func(name string) string { - base := strings.TrimSuffix(name, path.Ext(name)) - return "/commands/" + strings.ToLower(base) + "/" -} -``` - diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/md_docs_test.go b/Godeps/_workspace/src/github.com/spf13/cobra/md_docs_test.go deleted file mode 100644 index defc941..0000000 --- a/Godeps/_workspace/src/github.com/spf13/cobra/md_docs_test.go +++ /dev/null @@ -1,67 +0,0 @@ -package cobra - -import ( - "bytes" - "fmt" - "os" - "strings" - "testing" -) - -var _ = fmt.Println -var _ = os.Stderr - -func TestGenMdDoc(t *testing.T) { - c := initializeWithRootCmd() - // Need two commands to run the command alphabetical sort - cmdEcho.AddCommand(cmdTimes, cmdEchoSub, cmdDeprecated) - c.AddCommand(cmdPrint, cmdEcho) - cmdRootWithRun.PersistentFlags().StringVarP(&flags2a, "rootflag", "r", "two", strtwoParentHelp) - - out := new(bytes.Buffer) - - // We generate on s subcommand so we have both subcommands and parents - GenMarkdown(cmdEcho, out) - found := out.String() - - // Our description - expected := cmdEcho.Long - if !strings.Contains(found, expected) { - t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) - } - - // Better have our example - expected = cmdEcho.Example - if !strings.Contains(found, expected) { - t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) - } - - // A local flag - expected = "boolone" - if !strings.Contains(found, expected) { - t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) - } - - // persistent flag on parent - expected = "rootflag" - if !strings.Contains(found, expected) { - t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) - } - - // We better output info about our parent - expected = cmdRootWithRun.Short - if !strings.Contains(found, expected) { - t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) - } - - // And about subcommands - expected = cmdEchoSub.Short - if !strings.Contains(found, expected) { - t.Errorf("Unexpected response.\nExpecting to contain: \n %q\nGot:\n %q\n", expected, found) - } - - unexpected := cmdDeprecated.Short - if strings.Contains(found, unexpected) { - t.Errorf("Unexpected response.\nFound: %v\nBut should not have!!\n", unexpected) - } -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/.travis.yml b/Godeps/_workspace/src/github.com/spf13/pflag/.travis.yml deleted file mode 100644 index c4d88e3..0000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -sudo: false - -language: go - -go: - - 1.3 - - 1.4 - - tip diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/bool_test.go b/Godeps/_workspace/src/github.com/spf13/pflag/bool_test.go deleted file mode 100644 index febf667..0000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/bool_test.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - "bytes" - "fmt" - "strconv" - "testing" -) - -// This value can be a boolean ("true", "false") or "maybe" -type triStateValue int - -const ( - triStateFalse triStateValue = 0 - triStateTrue triStateValue = 1 - triStateMaybe triStateValue = 2 -) - -const strTriStateMaybe = "maybe" - -func (v *triStateValue) IsBoolFlag() bool { - return true -} - -func (v *triStateValue) Get() interface{} { - return triStateValue(*v) -} - -func (v *triStateValue) Set(s string) error { - if s == strTriStateMaybe { - *v = triStateMaybe - return nil - } - boolVal, err := strconv.ParseBool(s) - if boolVal { - *v = triStateTrue - } else { - *v = triStateFalse - } - return err -} - -func (v *triStateValue) String() string { - if *v == triStateMaybe { - return strTriStateMaybe - } - return fmt.Sprintf("%v", bool(*v == triStateTrue)) -} - -// The type of the flag as requred by the pflag.Value interface -func (v *triStateValue) Type() string { - return "version" -} - -func setUpFlagSet(tristate *triStateValue) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - *tristate = triStateFalse - flag := f.VarPF(tristate, "tristate", "t", "tristate value (true, maybe or false)") - flag.NoOptDefVal = "true" - return f -} - -func TestExplicitTrue(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - err := f.Parse([]string{"--tristate=true"}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateTrue { - t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") - } -} - -func TestImplicitTrue(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - err := f.Parse([]string{"--tristate"}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateTrue { - t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") - } -} - -func TestShortFlag(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - err := f.Parse([]string{"-t"}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateTrue { - t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") - } -} - -func TestShortFlagExtraArgument(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - // The"maybe"turns into an arg, since short boolean options will only do true/false - err := f.Parse([]string{"-t", "maybe"}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateTrue { - t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead") - } - args := f.Args() - if len(args) != 1 || args[0] != "maybe" { - t.Fatal("expected an extra 'maybe' argument to stick around") - } -} - -func TestExplicitMaybe(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - err := f.Parse([]string{"--tristate=maybe"}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateMaybe { - t.Fatal("expected", triStateMaybe, "(triStateMaybe) but got", tristate, "instead") - } -} - -func TestExplicitFalse(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - err := f.Parse([]string{"--tristate=false"}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateFalse { - t.Fatal("expected", triStateFalse, "(triStateFalse) but got", tristate, "instead") - } -} - -func TestImplicitFalse(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - err := f.Parse([]string{}) - if err != nil { - t.Fatal("expected no error; got", err) - } - if tristate != triStateFalse { - t.Fatal("expected", triStateFalse, "(triStateFalse) but got", tristate, "instead") - } -} - -func TestInvalidValue(t *testing.T) { - var tristate triStateValue - f := setUpFlagSet(&tristate) - var buf bytes.Buffer - f.SetOutput(&buf) - err := f.Parse([]string{"--tristate=invalid"}) - if err == nil { - t.Fatal("expected an error but did not get any, tristate has value", tristate) - } -} - -func TestBoolP(t *testing.T) { - b := BoolP("bool", "b", false, "bool value in CommandLine") - c := BoolP("c", "c", false, "other bool value") - args := []string{"--bool"} - if err := CommandLine.Parse(args); err != nil { - t.Error("expected no error, got ", err) - } - if *b != true { - t.Errorf("expected b=true got b=%s", b) - } - if *c != false { - t.Errorf("expect c=false got c=%s", c) - } -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/example_test.go b/Godeps/_workspace/src/github.com/spf13/pflag/example_test.go deleted file mode 100644 index 9be7a49..0000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/example_test.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// These examples demonstrate more intricate uses of the flag package. -package pflag_test - -import ( - "errors" - "fmt" - "strings" - "time" - - flag "github.com/spf13/pflag" -) - -// Example 1: A single string flag called "species" with default value "gopher". -var species = flag.String("species", "gopher", "the species we are studying") - -// Example 2: A flag with a shorthand letter. -var gopherType = flag.StringP("gopher_type", "g", "pocket", "the variety of gopher") - -// Example 3: A user-defined flag type, a slice of durations. -type interval []time.Duration - -// String is the method to format the flag's value, part of the flag.Value interface. -// The String method's output will be used in diagnostics. -func (i *interval) String() string { - return fmt.Sprint(*i) -} - -func (i *interval) Type() string { - return "interval" -} - -// Set is the method to set the flag value, part of the flag.Value interface. -// Set's argument is a string to be parsed to set the flag. -// It's a comma-separated list, so we split it. -func (i *interval) Set(value string) error { - // If we wanted to allow the flag to be set multiple times, - // accumulating values, we would delete this if statement. - // That would permit usages such as - // -deltaT 10s -deltaT 15s - // and other combinations. - if len(*i) > 0 { - return errors.New("interval flag already set") - } - for _, dt := range strings.Split(value, ",") { - duration, err := time.ParseDuration(dt) - if err != nil { - return err - } - *i = append(*i, duration) - } - return nil -} - -// Define a flag to accumulate durations. Because it has a special type, -// we need to use the Var function and therefore create the flag during -// init. - -var intervalFlag interval - -func init() { - // Tie the command-line flag to the intervalFlag variable and - // set a usage message. - flag.Var(&intervalFlag, "deltaT", "comma-separated list of intervals to use between events") -} - -func Example() { - // All the interesting pieces are with the variables declared above, but - // to enable the flag package to see the flags defined there, one must - // execute, typically at the start of main (not init!): - // flag.Parse() - // We don't run it here because this is not a main function and - // the testing suite has already parsed the flags. -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/export_test.go b/Godeps/_workspace/src/github.com/spf13/pflag/export_test.go deleted file mode 100644 index 9318fee..0000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/export_test.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - "io/ioutil" - "os" -) - -// Additional routines compiled into the package only during testing. - -// ResetForTesting clears all flag state and sets the usage function as directed. -// After calling ResetForTesting, parse errors in flag handling will not -// exit the program. -func ResetForTesting(usage func()) { - CommandLine = &FlagSet{ - name: os.Args[0], - errorHandling: ContinueOnError, - output: ioutil.Discard, - } - Usage = usage -} - -// GetCommandLine returns the default FlagSet. -func GetCommandLine() *FlagSet { - return CommandLine -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/flag_test.go b/Godeps/_workspace/src/github.com/spf13/pflag/flag_test.go deleted file mode 100644 index a5928e5..0000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/flag_test.go +++ /dev/null @@ -1,755 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "reflect" - "sort" - "strings" - "testing" - "time" -) - -var ( - test_bool = Bool("test_bool", false, "bool value") - test_int = Int("test_int", 0, "int value") - test_int64 = Int64("test_int64", 0, "int64 value") - test_uint = Uint("test_uint", 0, "uint value") - test_uint64 = Uint64("test_uint64", 0, "uint64 value") - test_string = String("test_string", "0", "string value") - test_float64 = Float64("test_float64", 0, "float64 value") - test_duration = Duration("test_duration", 0, "time.Duration value") - test_optional_int = Int("test_optional_int", 0, "optional int value") - normalizeFlagNameInvocations = 0 -) - -func boolString(s string) string { - if s == "0" { - return "false" - } - return "true" -} - -func TestEverything(t *testing.T) { - m := make(map[string]*Flag) - desired := "0" - visitor := func(f *Flag) { - if len(f.Name) > 5 && f.Name[0:5] == "test_" { - m[f.Name] = f - ok := false - switch { - case f.Value.String() == desired: - ok = true - case f.Name == "test_bool" && f.Value.String() == boolString(desired): - ok = true - case f.Name == "test_duration" && f.Value.String() == desired+"s": - ok = true - } - if !ok { - t.Error("Visit: bad value", f.Value.String(), "for", f.Name) - } - } - } - VisitAll(visitor) - if len(m) != 9 { - t.Error("VisitAll misses some flags") - for k, v := range m { - t.Log(k, *v) - } - } - m = make(map[string]*Flag) - Visit(visitor) - if len(m) != 0 { - t.Errorf("Visit sees unset flags") - for k, v := range m { - t.Log(k, *v) - } - } - // Now set all flags - Set("test_bool", "true") - Set("test_int", "1") - Set("test_int64", "1") - Set("test_uint", "1") - Set("test_uint64", "1") - Set("test_string", "1") - Set("test_float64", "1") - Set("test_duration", "1s") - Set("test_optional_int", "1") - desired = "1" - Visit(visitor) - if len(m) != 9 { - t.Error("Visit fails after set") - for k, v := range m { - t.Log(k, *v) - } - } - // Now test they're visited in sort order. - var flagNames []string - Visit(func(f *Flag) { flagNames = append(flagNames, f.Name) }) - if !sort.StringsAreSorted(flagNames) { - t.Errorf("flag names not sorted: %v", flagNames) - } -} - -func TestUsage(t *testing.T) { - called := false - ResetForTesting(func() { called = true }) - if GetCommandLine().Parse([]string{"--x"}) == nil { - t.Error("parse did not fail for unknown flag") - } - if !called { - t.Error("did not call Usage for unknown flag") - } -} - -func TestAnnotation(t *testing.T) { - f := NewFlagSet("shorthand", ContinueOnError) - - if err := f.SetAnnotation("missing-flag", "key", nil); err == nil { - t.Errorf("Expected error setting annotation on non-existent flag") - } - - f.StringP("stringa", "a", "", "string value") - if err := f.SetAnnotation("stringa", "key", nil); err != nil { - t.Errorf("Unexpected error setting new nil annotation: %v", err) - } - if annotation := f.Lookup("stringa").Annotations["key"]; annotation != nil { - t.Errorf("Unexpected annotation: %v", annotation) - } - - f.StringP("stringb", "b", "", "string2 value") - if err := f.SetAnnotation("stringb", "key", []string{"value1"}); err != nil { - t.Errorf("Unexpected error setting new annotation: %v", err) - } - if annotation := f.Lookup("stringb").Annotations["key"]; !reflect.DeepEqual(annotation, []string{"value1"}) { - t.Errorf("Unexpected annotation: %v", annotation) - } - - if err := f.SetAnnotation("stringb", "key", []string{"value2"}); err != nil { - t.Errorf("Unexpected error updating annotation: %v", err) - } - if annotation := f.Lookup("stringb").Annotations["key"]; !reflect.DeepEqual(annotation, []string{"value2"}) { - t.Errorf("Unexpected annotation: %v", annotation) - } -} - -func testParse(f *FlagSet, t *testing.T) { - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - boolFlag := f.Bool("bool", false, "bool value") - bool2Flag := f.Bool("bool2", false, "bool2 value") - bool3Flag := f.Bool("bool3", false, "bool3 value") - intFlag := f.Int("int", 0, "int value") - int8Flag := f.Int8("int8", 0, "int value") - int32Flag := f.Int32("int32", 0, "int value") - int64Flag := f.Int64("int64", 0, "int64 value") - uintFlag := f.Uint("uint", 0, "uint value") - uint8Flag := f.Uint8("uint8", 0, "uint value") - uint16Flag := f.Uint16("uint16", 0, "uint value") - uint32Flag := f.Uint32("uint32", 0, "uint value") - uint64Flag := f.Uint64("uint64", 0, "uint64 value") - stringFlag := f.String("string", "0", "string value") - float32Flag := f.Float32("float32", 0, "float32 value") - float64Flag := f.Float64("float64", 0, "float64 value") - ipFlag := f.IP("ip", net.ParseIP("127.0.0.1"), "ip value") - maskFlag := f.IPMask("mask", ParseIPv4Mask("0.0.0.0"), "mask value") - durationFlag := f.Duration("duration", 5*time.Second, "time.Duration value") - optionalIntNoValueFlag := f.Int("optional-int-no-value", 0, "int value") - f.Lookup("optional-int-no-value").NoOptDefVal = "9" - optionalIntWithValueFlag := f.Int("optional-int-with-value", 0, "int value") - f.Lookup("optional-int-no-value").NoOptDefVal = "9" - extra := "one-extra-argument" - args := []string{ - "--bool", - "--bool2=true", - "--bool3=false", - "--int=22", - "--int8=-8", - "--int32=-32", - "--int64=0x23", - "--uint", "24", - "--uint8=8", - "--uint16=16", - "--uint32=32", - "--uint64=25", - "--string=hello", - "--float32=-172e12", - "--float64=2718e28", - "--ip=10.11.12.13", - "--mask=255.255.255.0", - "--duration=2m", - "--optional-int-no-value", - "--optional-int-with-value=42", - extra, - } - if err := f.Parse(args); err != nil { - t.Fatal(err) - } - if !f.Parsed() { - t.Error("f.Parse() = false after Parse") - } - if *boolFlag != true { - t.Error("bool flag should be true, is ", *boolFlag) - } - if v, err := f.GetBool("bool"); err != nil || v != *boolFlag { - t.Error("GetBool does not work.") - } - if *bool2Flag != true { - t.Error("bool2 flag should be true, is ", *bool2Flag) - } - if *bool3Flag != false { - t.Error("bool3 flag should be false, is ", *bool2Flag) - } - if *intFlag != 22 { - t.Error("int flag should be 22, is ", *intFlag) - } - if v, err := f.GetInt("int"); err != nil || v != *intFlag { - t.Error("GetInt does not work.") - } - if *int8Flag != -8 { - t.Error("int8 flag should be 0x23, is ", *int8Flag) - } - if v, err := f.GetInt8("int8"); err != nil || v != *int8Flag { - t.Error("GetInt8 does not work.") - } - if *int32Flag != -32 { - t.Error("int32 flag should be 0x23, is ", *int32Flag) - } - if v, err := f.GetInt32("int32"); err != nil || v != *int32Flag { - t.Error("GetInt32 does not work.") - } - if *int64Flag != 0x23 { - t.Error("int64 flag should be 0x23, is ", *int64Flag) - } - if v, err := f.GetInt64("int64"); err != nil || v != *int64Flag { - t.Error("GetInt64 does not work.") - } - if *uintFlag != 24 { - t.Error("uint flag should be 24, is ", *uintFlag) - } - if v, err := f.GetUint("uint"); err != nil || v != *uintFlag { - t.Error("GetUint does not work.") - } - if *uint8Flag != 8 { - t.Error("uint8 flag should be 8, is ", *uint8Flag) - } - if v, err := f.GetUint8("uint8"); err != nil || v != *uint8Flag { - t.Error("GetUint8 does not work.") - } - if *uint16Flag != 16 { - t.Error("uint16 flag should be 16, is ", *uint16Flag) - } - if v, err := f.GetUint16("uint16"); err != nil || v != *uint16Flag { - t.Error("GetUint16 does not work.") - } - if *uint32Flag != 32 { - t.Error("uint32 flag should be 32, is ", *uint32Flag) - } - if v, err := f.GetUint32("uint32"); err != nil || v != *uint32Flag { - t.Error("GetUint32 does not work.") - } - if *uint64Flag != 25 { - t.Error("uint64 flag should be 25, is ", *uint64Flag) - } - if v, err := f.GetUint64("uint64"); err != nil || v != *uint64Flag { - t.Error("GetUint64 does not work.") - } - if *stringFlag != "hello" { - t.Error("string flag should be `hello`, is ", *stringFlag) - } - if v, err := f.GetString("string"); err != nil || v != *stringFlag { - t.Error("GetString does not work.") - } - if *float32Flag != -172e12 { - t.Error("float32 flag should be -172e12, is ", *float32Flag) - } - if v, err := f.GetFloat32("float32"); err != nil || v != *float32Flag { - t.Errorf("GetFloat32 returned %v but float32Flag was %v", v, *float32Flag) - } - if *float64Flag != 2718e28 { - t.Error("float64 flag should be 2718e28, is ", *float64Flag) - } - if v, err := f.GetFloat64("float64"); err != nil || v != *float64Flag { - t.Errorf("GetFloat64 returned %v but float64Flag was %v", v, *float64Flag) - } - if !(*ipFlag).Equal(net.ParseIP("10.11.12.13")) { - t.Error("ip flag should be 10.11.12.13, is ", *ipFlag) - } - if v, err := f.GetIP("ip"); err != nil || !v.Equal(*ipFlag) { - t.Errorf("GetIP returned %v but ipFlag was %v", v, *ipFlag) - } - if (*maskFlag).String() != ParseIPv4Mask("255.255.255.0").String() { - t.Error("mask flag should be 255.255.255.0, is ", (*maskFlag).String()) - } - if v, err := f.GetIPv4Mask("mask"); err != nil || v.String() != (*maskFlag).String() { - t.Errorf("GetIP returned %v but maskFlag was %v", v, *maskFlag, err) - } - if *durationFlag != 2*time.Minute { - t.Error("duration flag should be 2m, is ", *durationFlag) - } - if v, err := f.GetDuration("duration"); err != nil || v != *durationFlag { - t.Error("GetDuration does not work.") - } - if _, err := f.GetInt("duration"); err == nil { - t.Error("GetInt parsed a time.Duration?!?!") - } - if *optionalIntNoValueFlag != 9 { - t.Error("optional int flag should be the default value, is ", *optionalIntNoValueFlag) - } - if *optionalIntWithValueFlag != 42 { - t.Error("optional int flag should be 42, is ", *optionalIntWithValueFlag) - } - if len(f.Args()) != 1 { - t.Error("expected one argument, got", len(f.Args())) - } else if f.Args()[0] != extra { - t.Errorf("expected argument %q got %q", extra, f.Args()[0]) - } -} - -func TestShorthand(t *testing.T) { - f := NewFlagSet("shorthand", ContinueOnError) - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - boolaFlag := f.BoolP("boola", "a", false, "bool value") - boolbFlag := f.BoolP("boolb", "b", false, "bool2 value") - boolcFlag := f.BoolP("boolc", "c", false, "bool3 value") - booldFlag := f.BoolP("boold", "d", false, "bool4 value") - stringaFlag := f.StringP("stringa", "s", "0", "string value") - stringzFlag := f.StringP("stringz", "z", "0", "string value") - extra := "interspersed-argument" - notaflag := "--i-look-like-a-flag" - args := []string{ - "-ab", - extra, - "-cs", - "hello", - "-z=something", - "-d=true", - "--", - notaflag, - } - f.SetOutput(ioutil.Discard) - if err := f.Parse(args); err != nil { - t.Error("expected no error, got ", err) - } - if !f.Parsed() { - t.Error("f.Parse() = false after Parse") - } - if *boolaFlag != true { - t.Error("boola flag should be true, is ", *boolaFlag) - } - if *boolbFlag != true { - t.Error("boolb flag should be true, is ", *boolbFlag) - } - if *boolcFlag != true { - t.Error("boolc flag should be true, is ", *boolcFlag) - } - if *booldFlag != true { - t.Error("boold flag should be true, is ", *booldFlag) - } - if *stringaFlag != "hello" { - t.Error("stringa flag should be `hello`, is ", *stringaFlag) - } - if *stringzFlag != "something" { - t.Error("stringz flag should be `something`, is ", *stringzFlag) - } - if len(f.Args()) != 2 { - t.Error("expected one argument, got", len(f.Args())) - } else if f.Args()[0] != extra { - t.Errorf("expected argument %q got %q", extra, f.Args()[0]) - } else if f.Args()[1] != notaflag { - t.Errorf("expected argument %q got %q", notaflag, f.Args()[1]) - } -} - -func TestParse(t *testing.T) { - ResetForTesting(func() { t.Error("bad parse") }) - testParse(GetCommandLine(), t) -} - -func TestFlagSetParse(t *testing.T) { - testParse(NewFlagSet("test", ContinueOnError), t) -} - -func replaceSeparators(name string, from []string, to string) string { - result := name - for _, sep := range from { - result = strings.Replace(result, sep, to, -1) - } - // Type convert to indicate normalization has been done. - return result -} - -func wordSepNormalizeFunc(f *FlagSet, name string) NormalizedName { - seps := []string{"-", "_"} - name = replaceSeparators(name, seps, ".") - normalizeFlagNameInvocations++ - - return NormalizedName(name) -} - -func testWordSepNormalizedNames(args []string, t *testing.T) { - f := NewFlagSet("normalized", ContinueOnError) - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - withDashFlag := f.Bool("with-dash-flag", false, "bool value") - // Set this after some flags have been added and before others. - f.SetNormalizeFunc(wordSepNormalizeFunc) - withUnderFlag := f.Bool("with_under_flag", false, "bool value") - withBothFlag := f.Bool("with-both_flag", false, "bool value") - if err := f.Parse(args); err != nil { - t.Fatal(err) - } - if !f.Parsed() { - t.Error("f.Parse() = false after Parse") - } - if *withDashFlag != true { - t.Error("withDashFlag flag should be true, is ", *withDashFlag) - } - if *withUnderFlag != true { - t.Error("withUnderFlag flag should be true, is ", *withUnderFlag) - } - if *withBothFlag != true { - t.Error("withBothFlag flag should be true, is ", *withBothFlag) - } -} - -func TestWordSepNormalizedNames(t *testing.T) { - args := []string{ - "--with-dash-flag", - "--with-under-flag", - "--with-both-flag", - } - testWordSepNormalizedNames(args, t) - - args = []string{ - "--with_dash_flag", - "--with_under_flag", - "--with_both_flag", - } - testWordSepNormalizedNames(args, t) - - args = []string{ - "--with-dash_flag", - "--with-under_flag", - "--with-both_flag", - } - testWordSepNormalizedNames(args, t) -} - -func aliasAndWordSepFlagNames(f *FlagSet, name string) NormalizedName { - seps := []string{"-", "_"} - - oldName := replaceSeparators("old-valid_flag", seps, ".") - newName := replaceSeparators("valid-flag", seps, ".") - - name = replaceSeparators(name, seps, ".") - switch name { - case oldName: - name = newName - break - } - - return NormalizedName(name) -} - -func TestCustomNormalizedNames(t *testing.T) { - f := NewFlagSet("normalized", ContinueOnError) - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - - validFlag := f.Bool("valid-flag", false, "bool value") - f.SetNormalizeFunc(aliasAndWordSepFlagNames) - someOtherFlag := f.Bool("some-other-flag", false, "bool value") - - args := []string{"--old_valid_flag", "--some-other_flag"} - if err := f.Parse(args); err != nil { - t.Fatal(err) - } - - if *validFlag != true { - t.Errorf("validFlag is %v even though we set the alias --old_valid_falg", *validFlag) - } - if *someOtherFlag != true { - t.Error("someOtherFlag should be true, is ", *someOtherFlag) - } -} - -// Every flag we add, the name (displayed also in usage) should normalized -func TestNormalizationFuncShouldChangeFlagName(t *testing.T) { - // Test normalization after addition - f := NewFlagSet("normalized", ContinueOnError) - - f.Bool("valid_flag", false, "bool value") - if f.Lookup("valid_flag").Name != "valid_flag" { - t.Error("The new flag should have the name 'valid_flag' instead of ", f.Lookup("valid_flag").Name) - } - - f.SetNormalizeFunc(wordSepNormalizeFunc) - if f.Lookup("valid_flag").Name != "valid.flag" { - t.Error("The new flag should have the name 'valid.flag' instead of ", f.Lookup("valid_flag").Name) - } - - // Test normalization before addition - f = NewFlagSet("normalized", ContinueOnError) - f.SetNormalizeFunc(wordSepNormalizeFunc) - - f.Bool("valid_flag", false, "bool value") - if f.Lookup("valid_flag").Name != "valid.flag" { - t.Error("The new flag should have the name 'valid.flag' instead of ", f.Lookup("valid_flag").Name) - } -} - -// Declare a user-defined flag type. -type flagVar []string - -func (f *flagVar) String() string { - return fmt.Sprint([]string(*f)) -} - -func (f *flagVar) Set(value string) error { - *f = append(*f, value) - return nil -} - -func (f *flagVar) Type() string { - return "flagVar" -} - -func TestUserDefined(t *testing.T) { - var flags FlagSet - flags.Init("test", ContinueOnError) - var v flagVar - flags.VarP(&v, "v", "v", "usage") - if err := flags.Parse([]string{"--v=1", "-v2", "-v", "3"}); err != nil { - t.Error(err) - } - if len(v) != 3 { - t.Fatal("expected 3 args; got ", len(v)) - } - expect := "[1 2 3]" - if v.String() != expect { - t.Errorf("expected value %q got %q", expect, v.String()) - } -} - -func TestSetOutput(t *testing.T) { - var flags FlagSet - var buf bytes.Buffer - flags.SetOutput(&buf) - flags.Init("test", ContinueOnError) - flags.Parse([]string{"--unknown"}) - if out := buf.String(); !strings.Contains(out, "--unknown") { - t.Logf("expected output mentioning unknown; got %q", out) - } -} - -// This tests that one can reset the flags. This still works but not well, and is -// superseded by FlagSet. -func TestChangingArgs(t *testing.T) { - ResetForTesting(func() { t.Fatal("bad parse") }) - oldArgs := os.Args - defer func() { os.Args = oldArgs }() - os.Args = []string{"cmd", "--before", "subcmd"} - before := Bool("before", false, "") - if err := GetCommandLine().Parse(os.Args[1:]); err != nil { - t.Fatal(err) - } - cmd := Arg(0) - os.Args = []string{"subcmd", "--after", "args"} - after := Bool("after", false, "") - Parse() - args := Args() - - if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { - t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) - } -} - -// Test that -help invokes the usage message and returns ErrHelp. -func TestHelp(t *testing.T) { - var helpCalled = false - fs := NewFlagSet("help test", ContinueOnError) - fs.Usage = func() { helpCalled = true } - var flag bool - fs.BoolVar(&flag, "flag", false, "regular flag") - // Regular flag invocation should work - err := fs.Parse([]string{"--flag=true"}) - if err != nil { - t.Fatal("expected no error; got ", err) - } - if !flag { - t.Error("flag was not set by --flag") - } - if helpCalled { - t.Error("help called for regular flag") - helpCalled = false // reset for next test - } - // Help flag should work as expected. - err = fs.Parse([]string{"--help"}) - if err == nil { - t.Fatal("error expected") - } - if err != ErrHelp { - t.Fatal("expected ErrHelp; got ", err) - } - if !helpCalled { - t.Fatal("help was not called") - } - // If we define a help flag, that should override. - var help bool - fs.BoolVar(&help, "help", false, "help flag") - helpCalled = false - err = fs.Parse([]string{"--help"}) - if err != nil { - t.Fatal("expected no error for defined --help; got ", err) - } - if helpCalled { - t.Fatal("help was called; should not have been for defined help flag") - } -} - -func TestNoInterspersed(t *testing.T) { - f := NewFlagSet("test", ContinueOnError) - f.SetInterspersed(false) - f.Bool("true", true, "always true") - f.Bool("false", false, "always false") - err := f.Parse([]string{"--true", "break", "--false"}) - if err != nil { - t.Fatal("expected no error; got ", err) - } - args := f.Args() - if len(args) != 2 || args[0] != "break" || args[1] != "--false" { - t.Fatal("expected interspersed options/non-options to fail") - } -} - -func TestTermination(t *testing.T) { - f := NewFlagSet("termination", ContinueOnError) - boolFlag := f.BoolP("bool", "l", false, "bool value") - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - arg1 := "ls" - arg2 := "-l" - args := []string{ - "--", - arg1, - arg2, - } - f.SetOutput(ioutil.Discard) - if err := f.Parse(args); err != nil { - t.Fatal("expected no error; got ", err) - } - if !f.Parsed() { - t.Error("f.Parse() = false after Parse") - } - if *boolFlag { - t.Error("expected boolFlag=false, got true") - } - if len(f.Args()) != 2 { - t.Errorf("expected 2 arguments, got %d: %v", len(f.Args()), f.Args()) - } - if f.Args()[0] != arg1 { - t.Errorf("expected argument %q got %q", arg1, f.Args()[0]) - } - if f.Args()[1] != arg2 { - t.Errorf("expected argument %q got %q", arg2, f.Args()[1]) - } -} - -func TestDeprecatedFlagInDocs(t *testing.T) { - f := NewFlagSet("bob", ContinueOnError) - f.Bool("badflag", true, "always true") - f.MarkDeprecated("badflag", "use --good-flag instead") - - out := new(bytes.Buffer) - f.SetOutput(out) - f.PrintDefaults() - - if strings.Contains(out.String(), "badflag") { - t.Errorf("found deprecated flag in usage!") - } -} - -func parseReturnStderr(t *testing.T, f *FlagSet, args []string) (string, error) { - oldStderr := os.Stderr - r, w, _ := os.Pipe() - os.Stderr = w - - err := f.Parse(args) - - outC := make(chan string) - // copy the output in a separate goroutine so printing can't block indefinitely - go func() { - var buf bytes.Buffer - io.Copy(&buf, r) - outC <- buf.String() - }() - - w.Close() - os.Stderr = oldStderr - out := <-outC - - return out, err -} - -func TestDeprecatedFlagUsage(t *testing.T) { - f := NewFlagSet("bob", ContinueOnError) - f.Bool("badflag", true, "always true") - usageMsg := "use --good-flag instead" - f.MarkDeprecated("badflag", usageMsg) - - args := []string{"--badflag"} - out, err := parseReturnStderr(t, f, args) - if err != nil { - t.Fatal("expected no error; got ", err) - } - - if !strings.Contains(out, usageMsg) { - t.Errorf("usageMsg not printed when using a deprecated flag!") - } -} - -func TestDeprecatedFlagUsageNormalized(t *testing.T) { - f := NewFlagSet("bob", ContinueOnError) - f.Bool("bad-double_flag", true, "always true") - f.SetNormalizeFunc(wordSepNormalizeFunc) - usageMsg := "use --good-flag instead" - f.MarkDeprecated("bad_double-flag", usageMsg) - - args := []string{"--bad_double_flag"} - out, err := parseReturnStderr(t, f, args) - if err != nil { - t.Fatal("expected no error; got ", err) - } - - if !strings.Contains(out, usageMsg) { - t.Errorf("usageMsg not printed when using a deprecated flag!") - } -} - -// Name normalization function should be called only once on flag addition -func TestMultipleNormalizeFlagNameInvocations(t *testing.T) { - normalizeFlagNameInvocations = 0 - - f := NewFlagSet("normalized", ContinueOnError) - f.SetNormalizeFunc(wordSepNormalizeFunc) - f.Bool("with_under_flag", false, "bool value") - - if normalizeFlagNameInvocations != 1 { - t.Fatal("Expected normalizeFlagNameInvocations to be 1; got ", normalizeFlagNameInvocations) - } -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/int_slice_test.go b/Godeps/_workspace/src/github.com/spf13/pflag/int_slice_test.go deleted file mode 100644 index d162e86..0000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/int_slice_test.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - "fmt" - "strconv" - "strings" - "testing" -) - -func setUpISFlagSet(isp *[]int) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - f.IntSliceVar(isp, "is", []int{}, "Command seperated list!") - return f -} - -func TestIS(t *testing.T) { - var is []int - f := setUpISFlagSet(&is) - - vals := []string{"1", "2", "4", "3"} - arg := fmt.Sprintf("--is=%s", strings.Join(vals, ",")) - err := f.Parse([]string{arg}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range is { - d, err := strconv.Atoi(vals[i]) - if err != nil { - t.Fatalf("got error: %v", err) - } - if d != v { - t.Fatalf("expected is[%d] to be %s but got: %d", i, vals[i], v) - } - } - getIS, err := f.GetIntSlice("is") - for i, v := range getIS { - d, err := strconv.Atoi(vals[i]) - if err != nil { - t.Fatalf("got error: %v", err) - } - if d != v { - t.Fatalf("expected is[%d] to be %s but got: %d from GetIntSlice", i, vals[i], v) - } - } -} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/string_slice_test.go b/Godeps/_workspace/src/github.com/spf13/pflag/string_slice_test.go deleted file mode 100644 index c37a91a..0000000 --- a/Godeps/_workspace/src/github.com/spf13/pflag/string_slice_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package pflag - -import ( - "fmt" - "strings" - "testing" -) - -func setUpSSFlagSet(ssp *[]string) *FlagSet { - f := NewFlagSet("test", ContinueOnError) - f.StringSliceVar(ssp, "ss", []string{}, "Command seperated list!") - return f -} - -func TestSS(t *testing.T) { - var ss []string - f := setUpSSFlagSet(&ss) - - vals := []string{"one", "two", "4", "3"} - arg := fmt.Sprintf("--ss=%s", strings.Join(vals, ",")) - err := f.Parse([]string{arg}) - if err != nil { - t.Fatal("expected no error; got", err) - } - for i, v := range ss { - if vals[i] != v { - t.Fatal("expected ss[%d] to be %s but got: %s", i, vals[i], v) - } - } - - getSS, err := f.GetStringSlice("ss") - if err != nil { - t.Fatal("got an error from GetStringSlice(): %v", err) - } - for i, v := range getSS { - if vals[i] != v { - t.Fatal("expected ss[%d] to be %s from GetStringSlice but got: %s", i, vals[i], v) - } - } -} diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/LICENSE b/Godeps/_workspace/src/github.com/vaughan0/go-ini/LICENSE deleted file mode 100644 index 968b453..0000000 --- a/Godeps/_workspace/src/github.com/vaughan0/go-ini/LICENSE +++ /dev/null @@ -1,14 +0,0 @@ -Copyright (c) 2013 Vaughan Newton - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated -documentation files (the "Software"), to deal in the Software without restriction, including without limitation the -rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit -persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the -Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR -OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/README.md b/Godeps/_workspace/src/github.com/vaughan0/go-ini/README.md deleted file mode 100644 index d5cd4e7..0000000 --- a/Godeps/_workspace/src/github.com/vaughan0/go-ini/README.md +++ /dev/null @@ -1,70 +0,0 @@ -go-ini -====== - -INI parsing library for Go (golang). - -View the API documentation [here](http://godoc.org/github.com/vaughan0/go-ini). - -Usage ------ - -Parse an INI file: - -```go -import "github.com/vaughan0/go-ini" - -file, err := ini.LoadFile("myfile.ini") -``` - -Get data from the parsed file: - -```go -name, ok := file.Get("person", "name") -if !ok { - panic("'name' variable missing from 'person' section") -} -``` - -Iterate through values in a section: - -```go -for key, value := range file["mysection"] { - fmt.Printf("%s => %s\n", key, value) -} -``` - -Iterate through sections in a file: - -```go -for name, section := range file { - fmt.Printf("Section name: %s\n", name) -} -``` - -File Format ------------ - -INI files are parsed by go-ini line-by-line. Each line may be one of the following: - - * A section definition: [section-name] - * A property: key = value - * A comment: #blahblah _or_ ;blahblah - * Blank. The line will be ignored. - -Properties defined before any section headers are placed in the default section, which has -the empty string as it's key. - -Example: - -```ini -# I am a comment -; So am I! - -[apples] -colour = red or green -shape = applish - -[oranges] -shape = square -colour = blue -``` diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini.go b/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini.go deleted file mode 100644 index 81aeb32..0000000 --- a/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini.go +++ /dev/null @@ -1,123 +0,0 @@ -// Package ini provides functions for parsing INI configuration files. -package ini - -import ( - "bufio" - "fmt" - "io" - "os" - "regexp" - "strings" -) - -var ( - sectionRegex = regexp.MustCompile(`^\[(.*)\]$`) - assignRegex = regexp.MustCompile(`^([^=]+)=(.*)$`) -) - -// ErrSyntax is returned when there is a syntax error in an INI file. -type ErrSyntax struct { - Line int - Source string // The contents of the erroneous line, without leading or trailing whitespace -} - -func (e ErrSyntax) Error() string { - return fmt.Sprintf("invalid INI syntax on line %d: %s", e.Line, e.Source) -} - -// A File represents a parsed INI file. -type File map[string]Section - -// A Section represents a single section of an INI file. -type Section map[string]string - -// Returns a named Section. A Section will be created if one does not already exist for the given name. -func (f File) Section(name string) Section { - section := f[name] - if section == nil { - section = make(Section) - f[name] = section - } - return section -} - -// Looks up a value for a key in a section and returns that value, along with a boolean result similar to a map lookup. -func (f File) Get(section, key string) (value string, ok bool) { - if s := f[section]; s != nil { - value, ok = s[key] - } - return -} - -// Loads INI data from a reader and stores the data in the File. -func (f File) Load(in io.Reader) (err error) { - bufin, ok := in.(*bufio.Reader) - if !ok { - bufin = bufio.NewReader(in) - } - return parseFile(bufin, f) -} - -// Loads INI data from a named file and stores the data in the File. -func (f File) LoadFile(file string) (err error) { - in, err := os.Open(file) - if err != nil { - return - } - defer in.Close() - return f.Load(in) -} - -func parseFile(in *bufio.Reader, file File) (err error) { - section := "" - lineNum := 0 - for done := false; !done; { - var line string - if line, err = in.ReadString('\n'); err != nil { - if err == io.EOF { - done = true - } else { - return - } - } - lineNum++ - line = strings.TrimSpace(line) - if len(line) == 0 { - // Skip blank lines - continue - } - if line[0] == ';' || line[0] == '#' { - // Skip comments - continue - } - - if groups := assignRegex.FindStringSubmatch(line); groups != nil { - key, val := groups[1], groups[2] - key, val = strings.TrimSpace(key), strings.TrimSpace(val) - file.Section(section)[key] = val - } else if groups := sectionRegex.FindStringSubmatch(line); groups != nil { - name := strings.TrimSpace(groups[1]) - section = name - // Create the section if it does not exist - file.Section(section) - } else { - return ErrSyntax{lineNum, line} - } - - } - return nil -} - -// Loads and returns a File from a reader. -func Load(in io.Reader) (File, error) { - file := make(File) - err := file.Load(in) - return file, err -} - -// Loads and returns an INI File from a file on disk. -func LoadFile(filename string) (File, error) { - file := make(File) - err := file.LoadFile(filename) - return file, err -} diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_linux_test.go b/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_linux_test.go deleted file mode 100644 index 38a6f00..0000000 --- a/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_linux_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package ini - -import ( - "reflect" - "syscall" - "testing" -) - -func TestLoadFile(t *testing.T) { - originalOpenFiles := numFilesOpen(t) - - file, err := LoadFile("test.ini") - if err != nil { - t.Fatal(err) - } - - if originalOpenFiles != numFilesOpen(t) { - t.Error("test.ini not closed") - } - - if !reflect.DeepEqual(file, File{"default": {"stuff": "things"}}) { - t.Error("file not read correctly") - } -} - -func numFilesOpen(t *testing.T) (num uint64) { - var rlimit syscall.Rlimit - err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &rlimit) - if err != nil { - t.Fatal(err) - } - maxFds := int(rlimit.Cur) - - var stat syscall.Stat_t - for i := 0; i < maxFds; i++ { - if syscall.Fstat(i, &stat) == nil { - num++ - } else { - return - } - } - return -} diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_test.go b/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_test.go deleted file mode 100644 index 06a4d05..0000000 --- a/Godeps/_workspace/src/github.com/vaughan0/go-ini/ini_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package ini - -import ( - "reflect" - "strings" - "testing" -) - -func TestLoad(t *testing.T) { - src := ` - # Comments are ignored - - herp = derp - - [foo] - hello=world - whitespace should = not matter - ; sneaky semicolon-style comment - multiple = equals = signs - - [bar] - this = that` - - file, err := Load(strings.NewReader(src)) - if err != nil { - t.Fatal(err) - } - check := func(section, key, expect string) { - if value, _ := file.Get(section, key); value != expect { - t.Errorf("Get(%q, %q): expected %q, got %q", section, key, expect, value) - } - } - - check("", "herp", "derp") - check("foo", "hello", "world") - check("foo", "whitespace should", "not matter") - check("foo", "multiple", "equals = signs") - check("bar", "this", "that") -} - -func TestSyntaxError(t *testing.T) { - src := ` - # Line 2 - [foo] - bar = baz - # Here's an error on line 6: - wut? - herp = derp` - _, err := Load(strings.NewReader(src)) - t.Logf("%T: %v", err, err) - if err == nil { - t.Fatal("expected an error, got nil") - } - syntaxErr, ok := err.(ErrSyntax) - if !ok { - t.Fatal("expected an error of type ErrSyntax") - } - if syntaxErr.Line != 6 { - t.Fatal("incorrect line number") - } - if syntaxErr.Source != "wut?" { - t.Fatal("incorrect source") - } -} - -func TestDefinedSectionBehaviour(t *testing.T) { - check := func(src string, expect File) { - file, err := Load(strings.NewReader(src)) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(file, expect) { - t.Errorf("expected %v, got %v", expect, file) - } - } - // No sections for an empty file - check("", File{}) - // Default section only if there are actually values for it - check("foo=bar", File{"": {"foo": "bar"}}) - // User-defined sections should always be present, even if empty - check("[a]\n[b]\nfoo=bar", File{ - "a": {}, - "b": {"foo": "bar"}, - }) - check("foo=bar\n[a]\nthis=that", File{ - "": {"foo": "bar"}, - "a": {"this": "that"}, - }) -} diff --git a/Godeps/_workspace/src/github.com/vaughan0/go-ini/test.ini b/Godeps/_workspace/src/github.com/vaughan0/go-ini/test.ini deleted file mode 100644 index d13c999..0000000 --- a/Godeps/_workspace/src/github.com/vaughan0/go-ini/test.ini +++ /dev/null @@ -1,2 +0,0 @@ -[default] -stuff = things diff --git a/s3.go b/s3.go index e08907a..eb1d9e1 100644 --- a/s3.go +++ b/s3.go @@ -11,6 +11,7 @@ import ( "time" "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/s3" ) @@ -20,8 +21,9 @@ type s3Provider struct { } func newS3Provider() (*s3Provider, error) { + sess := session.Must(session.NewSession()) return &s3Provider{ - conn: s3.New(&aws.Config{}), + conn: s3.New(sess), }, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt new file mode 100644 index 0000000..5f14d11 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -0,0 +1,3 @@ +AWS SDK for Go +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2014-2015 Stripe, Inc. diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go similarity index 66% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go rename to vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go index a52743b..56fdfc2 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -14,13 +14,13 @@ package awserr // if err != nil { // if awsErr, ok := err.(awserr.Error); ok { // // Get error details -// log.Println("Error:", err.Code(), err.Message()) +// log.Println("Error:", awsErr.Code(), awsErr.Message()) // // // Prints out full error message, including original error if there was one. -// log.Println("Error:", err.Error()) +// log.Println("Error:", awsErr.Error()) // // // Get original error -// if origErr := err.Err(); origErr != nil { +// if origErr := awsErr.OrigErr(); origErr != nil { // // operate on original error. // } // } else { @@ -42,15 +42,55 @@ type Error interface { OrigErr() error } +// BatchError is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Deprecated: Replaced with BatchedErrors. Only defined for backwards +// compatibility. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + +// BatchedErrors is a batch of errors which also wraps lower level errors with +// code, message, and original errors. Calling Error() will include all errors +// that occurred in the batch. +// +// Replaces BatchError +type BatchedErrors interface { + // Satisfy the base Error interface. + Error + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + // New returns an Error object described by the code, message, and origErr. // // If origErr satisfies the Error interface it will not be wrapped within a new // Error object and will instead be returned. func New(code, message string, origErr error) Error { - if e, ok := origErr.(Error); ok && e != nil { - return e + var errs []error + if origErr != nil { + errs = append(errs, origErr) } - return newBaseError(code, message, origErr) + return newBaseError(code, message, errs) +} + +// NewBatchError returns an BatchedErrors with a collection of errors as an +// array of errors. +func NewBatchError(code, message string, errs []error) BatchedErrors { + return newBaseError(code, message, errs) } // A RequestFailure is an interface to extract request failure information from @@ -63,9 +103,9 @@ func New(code, message string, origErr error) Error { // output, err := s3manage.Upload(svc, input, opts) // if err != nil { // if reqerr, ok := err.(RequestFailure); ok { -// log.Printf("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) +// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) // } else { -// log.Printf("Error:", err.Error() +// log.Println("Error:", err.Error()) // } // } // diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go similarity index 63% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go rename to vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go index 418fc4c..0202a00 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -31,23 +31,27 @@ type baseError struct { // Optional original error this error is based off of. Allows building // chained errors. - origErr error + errs []error } -// newBaseError returns an error object for the code, message, and err. +// newBaseError returns an error object for the code, message, and errors. // // code is a short no whitespace phrase depicting the classification of // the error that is being created. // -// message is the free flow string containing detailed information about the error. +// message is the free flow string containing detailed information about the +// error. // -// origErr is the error object which will be nested under the new error to be returned. -func newBaseError(code, message string, origErr error) *baseError { - return &baseError{ +// origErrs is the error objects which will be nested under the new errors to +// be returned. +func newBaseError(code, message string, origErrs []error) *baseError { + b := &baseError{ code: code, message: message, - origErr: origErr, + errs: origErrs, } + + return b } // Error returns the string representation of the error. @@ -56,7 +60,12 @@ func newBaseError(code, message string, origErr error) *baseError { // // Satisfies the error interface. func (b baseError) Error() string { - return SprintError(b.code, b.message, "", b.origErr) + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) } // String returns the string representation of the error. @@ -75,10 +84,28 @@ func (b baseError) Message() string { return b.message } -// OrigErr returns the original error if one was set. Nil is returned if no error -// was set. +// OrigErr returns the original error if one was set. Nil is returned if no +// error was set. This only returns the first element in the list. If the full +// list is needed, use BatchedErrors. func (b baseError) OrigErr() error { - return b.origErr + switch len(b.errs) { + case 0: + return nil + case 1: + return b.errs[0] + default: + if err, ok := b.errs[0].(Error); ok { + return NewBatchError(err.Code(), err.Message(), b.errs[1:]) + } + return NewBatchError("BatchedErrors", + "multiple errors occurred", b.errs) + } +} + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (b baseError) OrigErrs() []error { + return b.errs } // So that the Error interface type can be included as an anonymous field @@ -94,8 +121,8 @@ type requestError struct { requestID string } -// newRequestError returns a wrapped error with additional information for request -// status code, and service requestID. +// newRequestError returns a wrapped error with additional information for +// request status code, and service requestID. // // Should be used to wrap all request which involve service requests. Even if // the request failed without a service response, but had an HTTP status code @@ -113,7 +140,7 @@ func newRequestError(err Error, statusCode int, requestID string) *requestError // Error returns the string representation of the error. // Satisfies the error interface. func (r requestError) Error() string { - extra := fmt.Sprintf("status code: %d, request id: [%s]", + extra := fmt.Sprintf("status code: %d, request id: %s", r.statusCode, r.requestID) return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) } @@ -133,3 +160,35 @@ func (r requestError) StatusCode() int { func (r requestError) RequestID() string { return r.requestID } + +// OrigErrs returns the original errors if one was set. An empty slice is +// returned if no error was set. +func (r requestError) OrigErrs() []error { + if b, ok := r.awsError.(BatchedErrors); ok { + return b.OrigErrs() + } + return []error{r.OrigErr()} +} + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += fmt.Sprintf("%s", e[i].Error()) + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go similarity index 85% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go rename to vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go index f91743c..1a3d106 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/copy.go @@ -3,6 +3,7 @@ package awsutil import ( "io" "reflect" + "time" ) // Copy deeply copies a src structure to dst. Useful for copying request and @@ -49,7 +50,14 @@ func rcopy(dst, src reflect.Value, root bool) { } else { e := src.Type().Elem() if dst.CanSet() && !src.IsNil() { - dst.Set(reflect.New(e)) + if _, ok := src.Interface().(*time.Time); !ok { + dst.Set(reflect.New(e)) + } else { + tempValue := reflect.New(e) + tempValue.Elem().Set(src.Elem()) + // Sets time.Time's unexported values + dst.Set(tempValue) + } } if src.Elem().IsValid() { // Keep the current root state since the depth hasn't changed @@ -57,16 +65,13 @@ func rcopy(dst, src reflect.Value, root bool) { } } case reflect.Struct: - if !root { - dst.Set(reflect.New(src.Type()).Elem()) - } - t := dst.Type() for i := 0; i < t.NumField(); i++ { name := t.Field(i).Name - srcval := src.FieldByName(name) - if srcval.IsValid() { - rcopy(dst.FieldByName(name), srcval, false) + srcVal := src.FieldByName(name) + dstVal := dst.FieldByName(name) + if srcVal.IsValid() && dstVal.CanSet() { + rcopy(dstVal, srcVal, false) } } case reflect.Slice: diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go new file mode 100644 index 0000000..59fa4a5 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/equal.go @@ -0,0 +1,27 @@ +package awsutil + +import ( + "reflect" +) + +// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. +// In addition to this, this method will also dereference the input values if +// possible so the DeepEqual performed will not fail if one parameter is a +// pointer and the other is not. +// +// DeepEqual will not perform indirection of nested values of the input parameters. +func DeepEqual(a, b interface{}) bool { + ra := reflect.Indirect(reflect.ValueOf(a)) + rb := reflect.Indirect(reflect.ValueOf(b)) + + if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { + // If the elements are both nil, and of the same type the are equal + // If they are of different types they are not equal + return reflect.TypeOf(a) == reflect.TypeOf(b) + } else if raValid != rbValid { + // Both values must be valid to be equal + return false + } + + return reflect.DeepEqual(ra.Interface(), rb.Interface()) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go similarity index 57% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go rename to vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go index 905d823..11c52c3 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go @@ -5,18 +5,20 @@ import ( "regexp" "strconv" "strings" + + "github.com/jmespath/go-jmespath" ) var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) // rValuesAtPath returns a slice of values found in value v. The values // in v are explored recursively so all nested values are collected. -func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool) []reflect.Value { +func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { pathparts := strings.Split(path, "||") if len(pathparts) > 1 { for _, pathpart := range pathparts { - vals := rValuesAtPath(v, pathpart, create, caseSensitive) - if vals != nil && len(vals) > 0 { + vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) + if len(vals) > 0 { return vals } } @@ -74,7 +76,16 @@ func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool) return false }) - if create && value.Kind() == reflect.Ptr && value.IsNil() { + if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { + if !value.IsNil() { + value.Set(reflect.Zero(value.Type())) + } + return []reflect.Value{value} + } + + if createPath && value.Kind() == reflect.Ptr && value.IsNil() { + // TODO if the value is the terminus it should not be created + // if the value to be set to its position is nil. value.Set(reflect.New(value.Type().Elem())) value = value.Elem() } else { @@ -82,7 +93,7 @@ func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool) } if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !create && value.IsNil() { + if !createPath && value.IsNil() { value = reflect.ValueOf(nil) } } @@ -95,8 +106,8 @@ func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool) if indexStar || index != nil { nextvals = []reflect.Value{} - for _, value := range values { - value := reflect.Indirect(value) + for _, valItem := range values { + value := reflect.Indirect(valItem) if value.Kind() != reflect.Slice { continue } @@ -114,7 +125,7 @@ func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool) // pull out index i := int(*index) if i >= value.Len() { // check out of bounds - if create { + if createPath { // TODO resize slice } else { continue @@ -125,7 +136,7 @@ func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool) value = reflect.Indirect(value.Index(i)) if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !create && value.IsNil() { + if !createPath && value.IsNil() { value = reflect.ValueOf(nil) } } @@ -142,46 +153,70 @@ func rValuesAtPath(v interface{}, path string, create bool, caseSensitive bool) return values } -// ValuesAtPath returns a list of objects at the lexical path inside of a structure -func ValuesAtPath(i interface{}, path string) []interface{} { - if rvals := rValuesAtPath(i, path, false, true); rvals != nil { - vals := make([]interface{}, len(rvals)) - for i, rval := range rvals { - vals[i] = rval.Interface() - } - return vals +// ValuesAtPath returns a list of values at the case insensitive lexical +// path inside of a structure. +func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { + result, err := jmespath.Search(path, i) + if err != nil { + return nil, err } - return nil + + v := reflect.ValueOf(result) + if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { + return nil, nil + } + if s, ok := result.([]interface{}); ok { + return s, err + } + if v.Kind() == reflect.Map && v.Len() == 0 { + return nil, nil + } + if v.Kind() == reflect.Slice { + out := make([]interface{}, v.Len()) + for i := 0; i < v.Len(); i++ { + out[i] = v.Index(i).Interface() + } + return out, nil + } + + return []interface{}{result}, nil } -// ValuesAtAnyPath returns a list of objects at the case-insensitive lexical -// path inside of a structure -func ValuesAtAnyPath(i interface{}, path string) []interface{} { - if rvals := rValuesAtPath(i, path, false, false); rvals != nil { - vals := make([]interface{}, len(rvals)) - for i, rval := range rvals { - vals[i] = rval.Interface() - } - return vals - } - return nil -} - -// SetValueAtPath sets an object at the lexical path inside of a structure +// SetValueAtPath sets a value at the case insensitive lexical path inside +// of a structure. func SetValueAtPath(i interface{}, path string, v interface{}) { - if rvals := rValuesAtPath(i, path, true, true); rvals != nil { + if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { for _, rval := range rvals { - rval.Set(reflect.ValueOf(v)) + if rval.Kind() == reflect.Ptr && rval.IsNil() { + continue + } + setValue(rval, v) } } } -// SetValueAtAnyPath sets an object at the case insensitive lexical path inside -// of a structure -func SetValueAtAnyPath(i interface{}, path string, v interface{}) { - if rvals := rValuesAtPath(i, path, true, false); rvals != nil { - for _, rval := range rvals { - rval.Set(reflect.ValueOf(v)) - } +func setValue(dstVal reflect.Value, src interface{}) { + if dstVal.Kind() == reflect.Ptr { + dstVal = reflect.Indirect(dstVal) } + srcVal := reflect.ValueOf(src) + + if !srcVal.IsValid() { // src is literal nil + if dstVal.CanAddr() { + // Convert to pointer so that pointer's value can be nil'ed + // dstVal = dstVal.Addr() + } + dstVal.Set(reflect.Zero(dstVal.Type())) + + } else if srcVal.Kind() == reflect.Ptr { + if srcVal.IsNil() { + srcVal = reflect.Zero(dstVal.Type()) + } else { + srcVal = reflect.ValueOf(src).Elem() + } + dstVal.Set(srcVal) + } else { + dstVal.Set(srcVal) + } + } diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go similarity index 92% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go rename to vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go index 0de3eaa..710eb43 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go @@ -61,6 +61,12 @@ func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") case reflect.Slice: + strtype := v.Type().String() + if strtype == "[]uint8" { + fmt.Fprintf(buf, " len %d", v.Len()) + break + } + nl, id, id2 := "", "", "" if v.Len() > 3 { nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) @@ -91,6 +97,10 @@ func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") default: + if !v.IsValid() { + fmt.Fprint(buf, "") + return + } format := "%v" switch v.Interface().(type) { case string: diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go new file mode 100644 index 0000000..b6432f1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go @@ -0,0 +1,89 @@ +package awsutil + +import ( + "bytes" + "fmt" + "reflect" + "strings" +) + +// StringValue returns the string representation of a value. +func StringValue(i interface{}) string { + var buf bytes.Buffer + stringValue(reflect.ValueOf(i), 0, &buf) + return buf.String() +} + +func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { + for v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + buf.WriteString("{\n") + + names := []string{} + for i := 0; i < v.Type().NumField(); i++ { + name := v.Type().Field(i).Name + f := v.Field(i) + if name[0:1] == strings.ToLower(name[0:1]) { + continue // ignore unexported fields + } + if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { + continue // ignore unset fields + } + names = append(names, name) + } + + for i, n := range names { + val := v.FieldByName(n) + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(n + ": ") + stringValue(val, indent+2, buf) + + if i < len(names)-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + case reflect.Slice: + nl, id, id2 := "", "", "" + if v.Len() > 3 { + nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) + } + buf.WriteString("[" + nl) + for i := 0; i < v.Len(); i++ { + buf.WriteString(id2) + stringValue(v.Index(i), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString("," + nl) + } + } + + buf.WriteString(nl + id + "]") + case reflect.Map: + buf.WriteString("{\n") + + for i, k := range v.MapKeys() { + buf.WriteString(strings.Repeat(" ", indent+2)) + buf.WriteString(k.String() + ": ") + stringValue(v.MapIndex(k), indent+2, buf) + + if i < v.Len()-1 { + buf.WriteString(",\n") + } + } + + buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") + default: + format := "%v" + switch v.Interface().(type) { + case string: + format = "%q" + } + fmt.Fprintf(buf, format, v.Interface()) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go new file mode 100644 index 0000000..788fe6e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -0,0 +1,90 @@ +package client + +import ( + "fmt" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Config provides configuration to a service client instance. +type Config struct { + Config *aws.Config + Handlers request.Handlers + Endpoint string + SigningRegion string + SigningName string +} + +// ConfigProvider provides a generic way for a service client to receive +// the ClientConfig without circular dependencies. +type ConfigProvider interface { + ClientConfig(serviceName string, cfgs ...*aws.Config) Config +} + +// ConfigNoResolveEndpointProvider same as ConfigProvider except it will not +// resolve the endpoint automatically. The service client's endpoint must be +// provided via the aws.Config.Endpoint field. +type ConfigNoResolveEndpointProvider interface { + ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) Config +} + +// A Client implements the base client request and response handling +// used by all service clients. +type Client struct { + request.Retryer + metadata.ClientInfo + + Config aws.Config + Handlers request.Handlers +} + +// New will return a pointer to a new initialized service client. +func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { + svc := &Client{ + Config: cfg, + ClientInfo: info, + Handlers: handlers.Copy(), + } + + switch retryer, ok := cfg.Retryer.(request.Retryer); { + case ok: + svc.Retryer = retryer + case cfg.Retryer != nil && cfg.Logger != nil: + s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) + cfg.Logger.Log(s) + fallthrough + default: + maxRetries := aws.IntValue(cfg.MaxRetries) + if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { + maxRetries = 3 + } + svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} + } + + svc.AddDebugHandlers() + + for _, option := range options { + option(svc) + } + + return svc +} + +// NewRequest returns a new Request pointer for the service API +// operation and parameters. +func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { + return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) +} + +// AddDebugHandlers injects debug logging handlers into the service to log request +// debug information. +func (c *Client) AddDebugHandlers() { + if !c.Config.LogLevel.AtLeast(aws.LogDebug) { + return + } + + c.Handlers.Send.PushFrontNamed(request.NamedHandler{Name: "awssdk.client.LogRequest", Fn: logRequest}) + c.Handlers.Send.PushBackNamed(request.NamedHandler{Name: "awssdk.client.LogResponse", Fn: logResponse}) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go new file mode 100644 index 0000000..1313478 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/default_retryer.go @@ -0,0 +1,96 @@ +package client + +import ( + "math/rand" + "sync" + "time" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// DefaultRetryer implements basic retry logic using exponential backoff for +// most services. If you want to implement custom retry logic, implement the +// request.Retryer interface or create a structure type that composes this +// struct and override the specific methods. For example, to override only +// the MaxRetries method: +// +// type retryer struct { +// service.DefaultRetryer +// } +// +// // This implementation always has 100 max retries +// func (d retryer) MaxRetries() uint { return 100 } +type DefaultRetryer struct { + NumMaxRetries int +} + +// MaxRetries returns the number of maximum returns the service will use to make +// an individual API request. +func (d DefaultRetryer) MaxRetries() int { + return d.NumMaxRetries +} + +var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) + +// RetryRules returns the delay duration before retrying this request again +func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { + // Set the upper limit of delay in retrying at ~five minutes + minTime := 30 + throttle := d.shouldThrottle(r) + if throttle { + minTime = 500 + } + + retryCount := r.RetryCount + if retryCount > 13 { + retryCount = 13 + } else if throttle && retryCount > 8 { + retryCount = 8 + } + + delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) + return time.Duration(delay) * time.Millisecond +} + +// ShouldRetry returns true if the request should be retried. +func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable != nil { + return *r.Retryable + } + + if r.HTTPResponse.StatusCode >= 500 { + return true + } + return r.IsErrorRetryable() || d.shouldThrottle(r) +} + +// ShouldThrottle returns true if the request should be throttled. +func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { + if r.HTTPResponse.StatusCode == 502 || + r.HTTPResponse.StatusCode == 503 || + r.HTTPResponse.StatusCode == 504 { + return true + } + return r.IsErrorThrottle() +} + +// lockedSource is a thread-safe implementation of rand.Source +type lockedSource struct { + lk sync.Mutex + src rand.Source +} + +func (r *lockedSource) Int63() (n int64) { + r.lk.Lock() + n = r.src.Int63() + r.lk.Unlock() + return +} + +func (r *lockedSource) Seed(seed int64) { + r.lk.Lock() + r.src.Seed(seed) + r.lk.Unlock() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go new file mode 100644 index 0000000..1f39c91 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/logger.go @@ -0,0 +1,108 @@ +package client + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http/httputil" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +const logReqMsg = `DEBUG: Request %s/%s Details: +---[ REQUEST POST-SIGN ]----------------------------- +%s +-----------------------------------------------------` + +const logReqErrMsg = `DEBUG ERROR: Request %s/%s: +---[ REQUEST DUMP ERROR ]----------------------------- +%s +------------------------------------------------------` + +type logWriter struct { + // Logger is what we will use to log the payload of a response. + Logger aws.Logger + // buf stores the contents of what has been read + buf *bytes.Buffer +} + +func (logger *logWriter) Write(b []byte) (int, error) { + return logger.buf.Write(b) +} + +type teeReaderCloser struct { + // io.Reader will be a tee reader that is used during logging. + // This structure will read from a body and write the contents to a logger. + io.Reader + // Source is used just to close when we are done reading. + Source io.ReadCloser +} + +func (reader *teeReaderCloser) Close() error { + return reader.Source.Close() +} + +func logRequest(r *request.Request) { + logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) + dumpedBody, err := httputil.DumpRequestOut(r.HTTPRequest, logBody) + if err != nil { + r.Config.Logger.Log(fmt.Sprintf(logReqErrMsg, r.ClientInfo.ServiceName, r.Operation.Name, err)) + return + } + + if logBody { + // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's + // Body as a NoOpCloser and will not be reset after read by the HTTP + // client reader. + r.ResetBody() + } + + r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) +} + +const logRespMsg = `DEBUG: Response %s/%s Details: +---[ RESPONSE ]-------------------------------------- +%s +-----------------------------------------------------` + +const logRespErrMsg = `DEBUG ERROR: Response %s/%s: +---[ RESPONSE DUMP ERROR ]----------------------------- +%s +-----------------------------------------------------` + +func logResponse(r *request.Request) { + lw := &logWriter{r.Config.Logger, bytes.NewBuffer(nil)} + r.HTTPResponse.Body = &teeReaderCloser{ + Reader: io.TeeReader(r.HTTPResponse.Body, lw), + Source: r.HTTPResponse.Body, + } + + handlerFn := func(req *request.Request) { + body, err := httputil.DumpResponse(req.HTTPResponse, false) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + + b, err := ioutil.ReadAll(lw.buf) + if err != nil { + lw.Logger.Log(fmt.Sprintf(logRespErrMsg, req.ClientInfo.ServiceName, req.Operation.Name, err)) + return + } + lw.Logger.Log(fmt.Sprintf(logRespMsg, req.ClientInfo.ServiceName, req.Operation.Name, string(body))) + if req.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) { + lw.Logger.Log(string(b)) + } + } + + const handlerName = "awsdk.client.LogResponse.ResponseBody" + + r.Handlers.Unmarshal.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) + r.Handlers.UnmarshalError.SetBackNamed(request.NamedHandler{ + Name: handlerName, Fn: handlerFn, + }) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go new file mode 100644 index 0000000..4778056 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go @@ -0,0 +1,12 @@ +package metadata + +// ClientInfo wraps immutable data from the client.Client structure. +type ClientInfo struct { + ServiceName string + APIVersion string + Endpoint string + SigningName string + SigningRegion string + JSONVersion string + TargetPrefix string +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go new file mode 100644 index 0000000..d1f31f1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -0,0 +1,470 @@ +package aws + +import ( + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/endpoints" +) + +// UseServiceDefaultRetries instructs the config to use the service's own +// default number of retries. This will be the default action if +// Config.MaxRetries is nil also. +const UseServiceDefaultRetries = -1 + +// RequestRetryer is an alias for a type that implements the request.Retryer +// interface. +type RequestRetryer interface{} + +// A Config provides service configuration for service clients. By default, +// all clients will use the defaults.DefaultConfig tructure. +// +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(&aws.Config{ +// MaxRetries: aws.Int(3), +// })) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, &aws.Config{ +// Region: aws.String("us-west-2"), +// }) +type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to + // retrieve credentials. + CredentialsChainVerboseErrors *bool + + // The credentials object to use when signing requests. Defaults to a + // chain of credential providers to search for credentials in environment + // variables, shared credential file, and EC2 Instance Roles. + Credentials *credentials.Credentials + + // An optional endpoint URL (hostname only or fully qualified URI) + // that overrides the default generated endpoint for a client. Set this + // to `""` to use the default generated endpoint. + // + // @note You must still provide a `Region` value when specifying an + // endpoint for a client. + Endpoint *string + + // The resolver to use for looking up endpoints for AWS service clients + // to use based on region. + EndpointResolver endpoints.Resolver + + // EnforceShouldRetryCheck is used in the AfterRetryHandler to always call + // ShouldRetry regardless of whether or not if request.Retryable is set. + // This will utilize ShouldRetry method of custom retryers. If EnforceShouldRetryCheck + // is not set, then ShouldRetry will only be called if request.Retryable is nil. + // Proper handling of the request.Retryable field is important when setting this field. + EnforceShouldRetryCheck *bool + + // The region to send requests to. This parameter is required and must + // be configured globally or on a per-client basis unless otherwise + // noted. A full list of regions is found in the "Regions and Endpoints" + // document. + // + // @see http://docs.aws.amazon.com/general/latest/gr/rande.html + // AWS Regions and Endpoints + Region *string + + // Set this to `true` to disable SSL when sending requests. Defaults + // to `false`. + DisableSSL *bool + + // The HTTP client to use when sending requests. Defaults to + // `http.DefaultClient`. + HTTPClient *http.Client + + // An integer value representing the logging level. The default log level + // is zero (LogOff), which represents no logging. To enable logging set + // to a LogLevel Value. + LogLevel *LogLevelType + + // The logger writer interface to write logging messages to. Defaults to + // standard out. + Logger Logger + + // The maximum number of times that a request will be retried for failures. + // Defaults to -1, which defers the max retry setting to the service + // specific configuration. + MaxRetries *int + + // Retryer guides how HTTP requests should be retried in case of + // recoverable failures. + // + // When nil or the value does not implement the request.Retryer interface, + // the request.DefaultRetryer will be used. + // + // When both Retryer and MaxRetries are non-nil, the former is used and + // the latter ignored. + // + // To set the Retryer field in a type-safe manner and with chaining, use + // the request.WithRetryer helper function: + // + // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) + // + Retryer RequestRetryer + + // Disables semantic parameter validation, which validates input for + // missing required fields and/or other semantic request input errors. + DisableParamValidation *bool + + // Disables the computation of request and response checksums, e.g., + // CRC32 checksums in Amazon DynamoDB. + DisableComputeChecksums *bool + + // Set this to `true` to force the request to use path-style addressing, + // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client + // will use virtual hosted bucket addressing when possible + // (`http://BUCKET.s3.amazonaws.com/KEY`). + // + // @note This configuration option is specific to the Amazon S3 service. + // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html + // Amazon S3: Virtual Hosting of Buckets + S3ForcePathStyle *bool + + // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` + // header to PUT requests over 2MB of content. 100-Continue instructs the + // HTTP client not to send the body until the service responds with a + // `continue` status. This is useful to prevent sending the request body + // until after the request is authenticated, and validated. + // + // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html + // + // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s + // `ExpectContinueTimeout` for information on adjusting the continue wait + // timeout. https://golang.org/pkg/net/http/#Transport + // + // You should use this flag to disble 100-Continue if you experience issues + // with proxies or third party S3 compatible services. + S3Disable100Continue *bool + + // Set this to `true` to enable S3 Accelerate feature. For all operations + // compatible with S3 Accelerate will use the accelerate endpoint for + // requests. Requests not compatible will fall back to normal S3 requests. + // + // The bucket must be enable for accelerate to be used with S3 client with + // accelerate enabled. If the bucket is not enabled for accelerate an error + // will be returned. The bucket name must be DNS compatible to also work + // with accelerate. + S3UseAccelerate *bool + + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the + // EC2Metadata client to create a new http.Client. This options is only + // meaningful if you're not already using a custom HTTP client with the + // SDK. Enabled by default. + // + // Must be set and provided to the session.NewSession() in order to disable + // the EC2Metadata overriding the timeout for default credentials chain. + // + // Example: + // sess := session.Must(session.NewSession(aws.NewConfig() + // .WithEC2MetadataDiableTimeoutOverride(true))) + // + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + + // Instructs the endpiont to be generated for a service client to + // be the dual stack endpoint. The dual stack endpoint will support + // both IPv4 and IPv6 addressing. + // + // Setting this for a service which does not support dual stack will fail + // to make requets. It is not recommended to set this value on the session + // as it will apply to all service clients created with the session. Even + // services which don't support dual stack endpoints. + // + // If the Endpoint config value is also provided the UseDualStack flag + // will be ignored. + // + // Only supported with. + // + // sess := session.Must(session.NewSession()) + // + // svc := s3.New(sess, &aws.Config{ + // UseDualStack: aws.Bool(true), + // }) + UseDualStack *bool + + // SleepDelay is an override for the func the SDK will call when sleeping + // during the lifecycle of a request. Specifically this will be used for + // request delays. This value should only be used for testing. To adjust + // the delay of a request see the aws/client.DefaultRetryer and + // aws/request.Retryer. + // + // SleepDelay will prevent any Context from being used for canceling retry + // delay of an API operation. It is recommended to not use SleepDelay at all + // and specify a Retryer instead. + SleepDelay func(time.Duration) + + // DisableRestProtocolURICleaning will not clean the URL path when making rest protocol requests. + // Will default to false. This would only be used for empty directory names in s3 requests. + // + // Example: + // sess := session.Must(session.NewSession(&aws.Config{ + // DisableRestProtocolURICleaning: aws.Bool(true), + // })) + // + // svc := s3.New(sess) + // out, err := svc.GetObject(&s3.GetObjectInput { + // Bucket: aws.String("bucketname"), + // Key: aws.String("//foo//bar//moo"), + // }) + DisableRestProtocolURICleaning *bool +} + +// NewConfig returns a new Config pointer that can be chained with builder +// methods to set multiple configuration values inline without using pointers. +// +// // Create Session with MaxRetry configuration to be shared by multiple +// // service clients. +// sess := session.Must(session.NewSession(aws.NewConfig(). +// WithMaxRetries(3), +// )) +// +// // Create S3 service client with a specific Region. +// svc := s3.New(sess, aws.NewConfig(). +// WithRegion("us-west-2"), +// ) +func NewConfig() *Config { + return &Config{} +} + +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + +// WithCredentials sets a config Credentials value returning a Config pointer +// for chaining. +func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { + c.Credentials = creds + return c +} + +// WithEndpoint sets a config Endpoint value returning a Config pointer for +// chaining. +func (c *Config) WithEndpoint(endpoint string) *Config { + c.Endpoint = &endpoint + return c +} + +// WithEndpointResolver sets a config EndpointResolver value returning a +// Config pointer for chaining. +func (c *Config) WithEndpointResolver(resolver endpoints.Resolver) *Config { + c.EndpointResolver = resolver + return c +} + +// WithRegion sets a config Region value returning a Config pointer for +// chaining. +func (c *Config) WithRegion(region string) *Config { + c.Region = ®ion + return c +} + +// WithDisableSSL sets a config DisableSSL value returning a Config pointer +// for chaining. +func (c *Config) WithDisableSSL(disable bool) *Config { + c.DisableSSL = &disable + return c +} + +// WithHTTPClient sets a config HTTPClient value returning a Config pointer +// for chaining. +func (c *Config) WithHTTPClient(client *http.Client) *Config { + c.HTTPClient = client + return c +} + +// WithMaxRetries sets a config MaxRetries value returning a Config pointer +// for chaining. +func (c *Config) WithMaxRetries(max int) *Config { + c.MaxRetries = &max + return c +} + +// WithDisableParamValidation sets a config DisableParamValidation value +// returning a Config pointer for chaining. +func (c *Config) WithDisableParamValidation(disable bool) *Config { + c.DisableParamValidation = &disable + return c +} + +// WithDisableComputeChecksums sets a config DisableComputeChecksums value +// returning a Config pointer for chaining. +func (c *Config) WithDisableComputeChecksums(disable bool) *Config { + c.DisableComputeChecksums = &disable + return c +} + +// WithLogLevel sets a config LogLevel value returning a Config pointer for +// chaining. +func (c *Config) WithLogLevel(level LogLevelType) *Config { + c.LogLevel = &level + return c +} + +// WithLogger sets a config Logger value returning a Config pointer for +// chaining. +func (c *Config) WithLogger(logger Logger) *Config { + c.Logger = logger + return c +} + +// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config +// pointer for chaining. +func (c *Config) WithS3ForcePathStyle(force bool) *Config { + c.S3ForcePathStyle = &force + return c +} + +// WithS3Disable100Continue sets a config S3Disable100Continue value returning +// a Config pointer for chaining. +func (c *Config) WithS3Disable100Continue(disable bool) *Config { + c.S3Disable100Continue = &disable + return c +} + +// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config +// pointer for chaining. +func (c *Config) WithS3UseAccelerate(enable bool) *Config { + c.S3UseAccelerate = &enable + return c +} + +// WithUseDualStack sets a config UseDualStack value returning a Config +// pointer for chaining. +func (c *Config) WithUseDualStack(enable bool) *Config { + c.UseDualStack = &enable + return c +} + +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + +// WithSleepDelay overrides the function used to sleep while waiting for the +// next retry. Defaults to time.Sleep. +func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { + c.SleepDelay = fn + return c +} + +// MergeIn merges the passed in configs into the existing config object. +func (c *Config) MergeIn(cfgs ...*Config) { + for _, other := range cfgs { + mergeInConfig(c, other) + } +} + +func mergeInConfig(dst *Config, other *Config) { + if other == nil { + return + } + + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + + if other.Credentials != nil { + dst.Credentials = other.Credentials + } + + if other.Endpoint != nil { + dst.Endpoint = other.Endpoint + } + + if other.EndpointResolver != nil { + dst.EndpointResolver = other.EndpointResolver + } + + if other.Region != nil { + dst.Region = other.Region + } + + if other.DisableSSL != nil { + dst.DisableSSL = other.DisableSSL + } + + if other.HTTPClient != nil { + dst.HTTPClient = other.HTTPClient + } + + if other.LogLevel != nil { + dst.LogLevel = other.LogLevel + } + + if other.Logger != nil { + dst.Logger = other.Logger + } + + if other.MaxRetries != nil { + dst.MaxRetries = other.MaxRetries + } + + if other.Retryer != nil { + dst.Retryer = other.Retryer + } + + if other.DisableParamValidation != nil { + dst.DisableParamValidation = other.DisableParamValidation + } + + if other.DisableComputeChecksums != nil { + dst.DisableComputeChecksums = other.DisableComputeChecksums + } + + if other.S3ForcePathStyle != nil { + dst.S3ForcePathStyle = other.S3ForcePathStyle + } + + if other.S3Disable100Continue != nil { + dst.S3Disable100Continue = other.S3Disable100Continue + } + + if other.S3UseAccelerate != nil { + dst.S3UseAccelerate = other.S3UseAccelerate + } + + if other.UseDualStack != nil { + dst.UseDualStack = other.UseDualStack + } + + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + + if other.SleepDelay != nil { + dst.SleepDelay = other.SleepDelay + } + + if other.DisableRestProtocolURICleaning != nil { + dst.DisableRestProtocolURICleaning = other.DisableRestProtocolURICleaning + } + + if other.EnforceShouldRetryCheck != nil { + dst.EnforceShouldRetryCheck = other.EnforceShouldRetryCheck + } +} + +// Copy will return a shallow copy of the Config object. If any additional +// configurations are provided they will be merged into the new config returned. +func (c *Config) Copy(cfgs ...*Config) *Config { + dst := &Config{} + dst.MergeIn(c) + + for _, cfg := range cfgs { + dst.MergeIn(cfg) + } + + return dst +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context.go b/vendor/github.com/aws/aws-sdk-go/aws/context.go new file mode 100644 index 0000000..79f4268 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context.go @@ -0,0 +1,71 @@ +package aws + +import ( + "time" +) + +// Context is an copy of the Go v1.7 stdlib's context.Context interface. +// It is represented as a SDK interface to enable you to use the "WithContext" +// API methods with Go v1.6 and a Context type such as golang.org/x/net/context. +// +// See https://golang.org/pkg/context on how to use contexts. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + Value(key interface{}) interface{} +} + +// BackgroundContext returns a context that will never be canceled, has no +// values, and no deadline. This context is used by the SDK to provide +// backwards compatibility with non-context API operations and functionality. +// +// Go 1.6 and before: +// This context function is equivalent to context.Background in the Go stdlib. +// +// Go 1.7 and later: +// The context returned will be the value returned by context.Background() +// +// See https://golang.org/pkg/context for more information on Contexts. +func BackgroundContext() Context { + return backgroundCtx +} + +// SleepWithContext will wait for the timer duration to expire, or the context +// is canceled. Which ever happens first. If the context is canceled the Context's +// error will be returned. +// +// Expects Context to always return a non-nil error if the Done channel is closed. +func SleepWithContext(ctx Context, dur time.Duration) error { + t := time.NewTimer(dur) + defer t.Stop() + + select { + case <-t.C: + break + case <-ctx.Done(): + return ctx.Err() + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go new file mode 100644 index 0000000..e8cf93d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_6.go @@ -0,0 +1,41 @@ +// +build !go1.7 + +package aws + +import "time" + +// An emptyCtx is a copy of the the Go 1.7 context.emptyCtx type. This +// is copied to provide a 1.6 and 1.5 safe version of context that is compatible +// with Go 1.7's Context. +// +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case backgroundCtx: + return "aws.BackgroundContext" + } + return "unknown empty Context" +} + +var ( + backgroundCtx = new(emptyCtx) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go new file mode 100644 index 0000000..064f75c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/context_1_7.go @@ -0,0 +1,9 @@ +// +build go1.7 + +package aws + +import "context" + +var ( + backgroundCtx = context.Background() +) diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convutil.go b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go similarity index 89% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convutil.go rename to vendor/github.com/aws/aws-sdk-go/aws/convert_types.go index d6a7b08..3b73a7d 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/convutil.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/convert_types.go @@ -2,7 +2,7 @@ package aws import "time" -// String returns a pointer to of the string value passed in. +// String returns a pointer to the string value passed in. func String(v string) *string { return &v } @@ -61,7 +61,7 @@ func StringValueMap(src map[string]*string) map[string]string { return dst } -// Bool returns a pointer to of the bool value passed in. +// Bool returns a pointer to the bool value passed in. func Bool(v bool) *bool { return &v } @@ -120,7 +120,7 @@ func BoolValueMap(src map[string]*bool) map[string]bool { return dst } -// Int returns a pointer to of the int value passed in. +// Int returns a pointer to the int value passed in. func Int(v int) *int { return &v } @@ -179,7 +179,7 @@ func IntValueMap(src map[string]*int) map[string]int { return dst } -// Int64 returns a pointer to of the int64 value passed in. +// Int64 returns a pointer to the int64 value passed in. func Int64(v int64) *int64 { return &v } @@ -238,7 +238,7 @@ func Int64ValueMap(src map[string]*int64) map[string]int64 { return dst } -// Float64 returns a pointer to of the float64 value passed in. +// Float64 returns a pointer to the float64 value passed in. func Float64(v float64) *float64 { return &v } @@ -297,7 +297,7 @@ func Float64ValueMap(src map[string]*float64) map[string]float64 { return dst } -// Time returns a pointer to of the time.Time value passed in. +// Time returns a pointer to the time.Time value passed in. func Time(v time.Time) *time.Time { return &v } @@ -311,6 +311,18 @@ func TimeValue(v *time.Time) time.Time { return time.Time{} } +// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". +// The result is undefined if the Unix time cannot be represented by an int64. +// Which includes calling TimeUnixMilli on a zero Time is undefined. +// +// This utility is useful for service API's such as CloudWatch Logs which require +// their unix time values to be in milliseconds. +// +// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. +func TimeUnixMilli(t time.Time) int64 { + return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) +} + // TimeSlice converts a slice of time.Time values into a slice of // time.Time pointers func TimeSlice(src []time.Time) []*time.Time { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go new file mode 100644 index 0000000..495e3ef --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go @@ -0,0 +1,242 @@ +package corehandlers + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "regexp" + "runtime" + "strconv" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// Interface for matching types which also have a Len method. +type lener interface { + Len() int +} + +// BuildContentLengthHandler builds the content length of a request based on the body, +// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable +// to determine request body length and no "Content-Length" was specified it will panic. +// +// The Content-Length will only be added to the request if the length of the body +// is greater than 0. If the body is empty or the current `Content-Length` +// header is <= 0, the header will also be stripped. +var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { + var length int64 + + if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { + length, _ = strconv.ParseInt(slength, 10, 64) + } else { + switch body := r.Body.(type) { + case nil: + length = 0 + case lener: + length = int64(body.Len()) + case io.Seeker: + r.BodyStart, _ = body.Seek(0, 1) + end, _ := body.Seek(0, 2) + body.Seek(r.BodyStart, 0) // make sure to seek back to original location + length = end - r.BodyStart + default: + panic("Cannot get length of body, must provide `ContentLength`") + } + } + + if length > 0 { + r.HTTPRequest.ContentLength = length + r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) + } else { + r.HTTPRequest.ContentLength = 0 + r.HTTPRequest.Header.Del("Content-Length") + } +}} + +// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. +var SDKVersionUserAgentHandler = request.NamedHandler{ + Name: "core.SDKVersionUserAgentHandler", + Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, + runtime.Version(), runtime.GOOS, runtime.GOARCH), +} + +var reStatusCode = regexp.MustCompile(`^(\d{3})`) + +// ValidateReqSigHandler is a request handler to ensure that the request's +// signature doesn't expire before it is sent. This can happen when a request +// is built and signed significantly before it is sent. Or significant delays +// occur when retrying requests that would cause the signature to expire. +var ValidateReqSigHandler = request.NamedHandler{ + Name: "core.ValidateReqSigHandler", + Fn: func(r *request.Request) { + // Unsigned requests are not signed + if r.Config.Credentials == credentials.AnonymousCredentials { + return + } + + signedTime := r.Time + if !r.LastSignedAt.IsZero() { + signedTime = r.LastSignedAt + } + + // 10 minutes to allow for some clock skew/delays in transmission. + // Would be improved with aws/aws-sdk-go#423 + if signedTime.Add(10 * time.Minute).After(time.Now()) { + return + } + + fmt.Println("request expired, resigning") + r.Sign() + }, +} + +// SendHandler is a request handler to send service request using HTTP client. +var SendHandler = request.NamedHandler{ + Name: "core.SendHandler", + Fn: func(r *request.Request) { + sender := sendFollowRedirects + if r.DisableFollowRedirects { + sender = sendWithoutFollowRedirects + } + + if request.NoBody == r.HTTPRequest.Body { + // Strip off the request body if the NoBody reader was used as a + // place holder for a request body. This prevents the SDK from + // making requests with a request body when it would be invalid + // to do so. + // + // Use a shallow copy of the http.Request to ensure the race condition + // of transport on Body will not trigger + reqOrig, reqCopy := r.HTTPRequest, *r.HTTPRequest + reqCopy.Body = nil + r.HTTPRequest = &reqCopy + defer func() { + r.HTTPRequest = reqOrig + }() + } + + var err error + r.HTTPResponse, err = sender(r) + if err != nil { + handleSendError(r, err) + } + }, +} + +func sendFollowRedirects(r *request.Request) (*http.Response, error) { + return r.Config.HTTPClient.Do(r.HTTPRequest) +} + +func sendWithoutFollowRedirects(r *request.Request) (*http.Response, error) { + transport := r.Config.HTTPClient.Transport + if transport == nil { + transport = http.DefaultTransport + } + + return transport.RoundTrip(r.HTTPRequest) +} + +func handleSendError(r *request.Request, err error) { + // Prevent leaking if an HTTPResponse was returned. Clean up + // the body. + if r.HTTPResponse != nil { + r.HTTPResponse.Body.Close() + } + // Capture the case where url.Error is returned for error processing + // response. e.g. 301 without location header comes back as string + // error and r.HTTPResponse is nil. Other URL redirect errors will + // comeback in a similar method. + if e, ok := err.(*url.Error); ok && e.Err != nil { + if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { + code, _ := strconv.ParseInt(s[1], 10, 64) + r.HTTPResponse = &http.Response{ + StatusCode: int(code), + Status: http.StatusText(int(code)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + return + } + } + if r.HTTPResponse == nil { + // Add a dummy request response object to ensure the HTTPResponse + // value is consistent. + r.HTTPResponse = &http.Response{ + StatusCode: int(0), + Status: http.StatusText(int(0)), + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + // Catch all other request errors. + r.Error = awserr.New("RequestError", "send request failed", err) + r.Retryable = aws.Bool(true) // network errors are retryable + + // Override the error with a context canceled error, if that was canceled. + ctx := r.Context() + select { + case <-ctx.Done(): + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", ctx.Err()) + r.Retryable = aws.Bool(false) + default: + } +} + +// ValidateResponseHandler is a request handler to validate service response. +var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { + if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { + // this may be replaced by an UnmarshalError handler + r.Error = awserr.New("UnknownError", "unknown error", nil) + } +}} + +// AfterRetryHandler performs final checks to determine if the request should +// be retried and how long to delay. +var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { + // If one of the other handlers already set the retry state + // we don't want to override it based on the service's state + if r.Retryable == nil || aws.BoolValue(r.Config.EnforceShouldRetryCheck) { + r.Retryable = aws.Bool(r.ShouldRetry(r)) + } + + if r.WillRetry() { + r.RetryDelay = r.RetryRules(r) + + if sleepFn := r.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(r.RetryDelay) + } else if err := aws.SleepWithContext(r.Context(), r.RetryDelay); err != nil { + r.Error = awserr.New(request.CanceledErrorCode, + "request context canceled", err) + r.Retryable = aws.Bool(false) + return + } + + // when the expired token exception occurs the credentials + // need to be expired locally so that the next request to + // get credentials will trigger a credentials refresh. + if r.IsErrorExpired() { + r.Config.Credentials.Expire() + } + + r.RetryCount++ + r.Error = nil + } +}} + +// ValidateEndpointHandler is a request handler to validate a request had the +// appropriate Region and Endpoint set. Will set r.Error if the endpoint or +// region is not valid. +var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { + if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { + r.Error = aws.ErrMissingRegion + } else if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +}} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go new file mode 100644 index 0000000..7d50b15 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go @@ -0,0 +1,17 @@ +package corehandlers + +import "github.com/aws/aws-sdk-go/aws/request" + +// ValidateParametersHandler is a request handler to validate the input parameters. +// Validating parameters only has meaning if done prior to the request being sent. +var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { + if !r.ParamsFilled() { + return + } + + if v, ok := r.Params.(request.Validator); ok { + if err := v.Validate(); err != nil { + r.Error = err + } + } +}} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go similarity index 69% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go rename to vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go index 7f509ca..f298d65 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -8,8 +8,14 @@ var ( // ErrNoValidProvidersFoundInChain Is returned when there are no valid // providers in the ChainProvider. // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true + // // @readonly - ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", "no valid providers in chain", nil) + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) ) // A ChainProvider will search for a provider which returns credentials @@ -28,23 +34,28 @@ var ( // // Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. // In this example EnvProvider will first check if any credentials are available -// vai the environment variables. If there are none ChainProvider will check +// via the environment variables. If there are none ChainProvider will check // the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider // does not return any credentials ChainProvider will return the error // ErrNoValidProvidersFoundInChain // -// creds := NewChainCredentials( -// []Provider{ -// &EnvProvider{}, -// &EC2RoleProvider{}, +// creds := credentials.NewChainCredentials( +// []credentials.Provider{ +// &credentials.EnvProvider{}, +// &ec2rolecreds.EC2RoleProvider{ +// Client: ec2metadata.New(sess), +// }, // }) // // // Usage of ChainCredentials with aws.Config -// svc := ec2.New(&aws.Config{Credentials: creds}) +// svc := ec2.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: creds, +// }))) // type ChainProvider struct { - Providers []Provider - curr Provider + Providers []Provider + curr Provider + VerboseErrors bool } // NewChainCredentials returns a pointer to a new Credentials object @@ -61,17 +72,23 @@ func NewChainCredentials(providers []Provider) *Credentials { // If a provider is found it will be cached and any calls to IsExpired() // will return the expired state of the cached provider. func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error for _, p := range c.Providers { - if creds, err := p.Retrieve(); err == nil { + creds, err := p.Retrieve() + if err == nil { c.curr = p return creds, nil } + errs = append(errs, err) } c.curr = nil - // TODO better error reporting. maybe report error for each failed retrieve? - - return Value{}, ErrNoValidProvidersFoundInChain + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err } // IsExpired will returned the expired state of the currently cached provider diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go similarity index 84% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go rename to vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go index 2834a08..42416fc 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/credentials.go @@ -14,7 +14,7 @@ // // Example of using the environment variable credentials. // -// creds := NewEnvCredentials() +// creds := credentials.NewEnvCredentials() // // // Retrieve the credentials value // credValue, err := creds.Get() @@ -26,7 +26,7 @@ // This may be helpful to proactively expire credentials and refresh them sooner // than they would naturally expire on their own. // -// creds := NewCredentials(&EC2RoleProvider{}) +// creds := credentials.NewCredentials(&ec2rolecreds.EC2RoleProvider{}) // creds.Expire() // credsValue, err := creds.Get() // // New credentials will be retrieved instead of from cache. @@ -43,7 +43,7 @@ // func (m *MyProvider) Retrieve() (Value, error) {...} // func (m *MyProvider) IsExpired() bool {...} // -// creds := NewCredentials(&MyProvider{}) +// creds := credentials.NewCredentials(&MyProvider{}) // credValue, err := creds.Get() // package credentials @@ -53,14 +53,16 @@ import ( "time" ) -// Create an empty Credential object that can be used as dummy placeholder -// credentials for requests that do not need signed. +// AnonymousCredentials is an empty Credential object that can be used as +// dummy placeholder credentials for requests that do not need signed. // // This Credentials can be used to configure a service to not sign requests // when making service API calls. For example, when accessing public // s3 buckets. // -// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) +// svc := s3.New(session.Must(session.NewSession(&aws.Config{ +// Credentials: credentials.AnonymousCredentials, +// }))) // // Access public S3 buckets. // // @readonly @@ -76,6 +78,9 @@ type Value struct { // AWS Session Token SessionToken string + + // Provider used to get credentials + ProviderName string } // A Provider is the interface for any component which will provide credentials @@ -85,7 +90,7 @@ type Value struct { // The Provider should not need to implement its own mutexes, because // that will be managed by Credentials. type Provider interface { - // Refresh returns nil if it successfully retrieved the value. + // Retrieve returns nil if it successfully retrieved the value. // Error is returned if the value were not obtainable, or empty. Retrieve() (Value, error) @@ -94,6 +99,27 @@ type Provider interface { IsExpired() bool } +// An ErrorProvider is a stub credentials provider that always returns an error +// this is used by the SDK when construction a known provider is not possible +// due to an error. +type ErrorProvider struct { + // The error to be returned from Retrieve + Err error + + // The provider name to set on the Retrieved returned Value + ProviderName string +} + +// Retrieve will always return the error that the ErrorProvider was created with. +func (p ErrorProvider) Retrieve() (Value, error) { + return Value{ProviderName: p.ProviderName}, p.Err +} + +// IsExpired will always return not expired. +func (p ErrorProvider) IsExpired() bool { + return false +} + // A Expiry provides shared expiration logic to be used by credentials // providers to implement expiry functionality. // diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go new file mode 100644 index 0000000..c397495 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go @@ -0,0 +1,178 @@ +package ec2rolecreds + +import ( + "bufio" + "encoding/json" + "fmt" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/ec2metadata" +) + +// ProviderName provides a name of EC2Role provider +const ProviderName = "EC2RoleProvider" + +// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if +// those credentials are expired. +// +// Example how to configure the EC2RoleProvider with custom http Client, Endpoint +// or ExpiryWindow +// +// p := &ec2rolecreds.EC2RoleProvider{ +// // Pass in a custom timeout to be used when requesting +// // IAM EC2 Role credentials. +// Client: ec2metadata.New(sess, aws.Config{ +// HTTPClient: &http.Client{Timeout: 10 * time.Second}, +// }), +// +// // Do not use early expiry of credentials. If a non zero value is +// // specified the credentials will be expired early +// ExpiryWindow: 0, +// } +type EC2RoleProvider struct { + credentials.Expiry + + // Required EC2Metadata client to use when connecting to EC2 metadata service. + Client *ec2metadata.EC2Metadata + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. +// The ConfigProvider is satisfied by the session.Session type. +func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: ec2metadata.New(c), + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping +// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 +// metadata service. +func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { + p := &EC2RoleProvider{ + Client: client, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve retrieves credentials from the EC2 service. +// Error will be returned if the request fails, or unable to extract +// the desired credentials. +func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { + credsList, err := requestCredList(m.Client) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + if len(credsList) == 0 { + return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) + } + credsName := credsList[0] + + roleCreds, err := requestCred(m.Client, credsName) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: roleCreds.AccessKeyID, + SecretAccessKey: roleCreds.SecretAccessKey, + SessionToken: roleCreds.Token, + ProviderName: ProviderName, + }, nil +} + +// A ec2RoleCredRespBody provides the shape for unmarshaling credential +// request responses. +type ec2RoleCredRespBody struct { + // Success State + Expiration time.Time + AccessKeyID string + SecretAccessKey string + Token string + + // Error state + Code string + Message string +} + +const iamSecurityCredsPath = "/iam/security-credentials" + +// requestCredList requests a list of credentials from the EC2 service. +// If there are no credentials, or there is an error making or receiving the request +func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { + resp, err := client.GetMetadata(iamSecurityCredsPath) + if err != nil { + return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) + } + + credsList := []string{} + s := bufio.NewScanner(strings.NewReader(resp)) + for s.Scan() { + credsList = append(credsList, s.Text()) + } + + if err := s.Err(); err != nil { + return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) + } + + return credsList, nil +} + +// requestCred requests the credentials for a specific credentials from the EC2 service. +// +// If the credentials cannot be found, or there is an error reading the response +// and error will be returned. +func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { + resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) + if err != nil { + return ec2RoleCredRespBody{}, + awserr.New("EC2RoleRequestError", + fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), + err) + } + + respCreds := ec2RoleCredRespBody{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { + return ec2RoleCredRespBody{}, + awserr.New("SerializationError", + fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), + err) + } + + if respCreds.Code != "Success" { + // If an error code was returned something failed requesting the role. + return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) + } + + return respCreds, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go new file mode 100644 index 0000000..a4cec5c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/endpointcreds/provider.go @@ -0,0 +1,191 @@ +// Package endpointcreds provides support for retrieving credentials from an +// arbitrary HTTP endpoint. +// +// The credentials endpoint Provider can receive both static and refreshable +// credentials that will expire. Credentials are static when an "Expiration" +// value is not provided in the endpoint's response. +// +// Static credentials will never expire once they have been retrieved. The format +// of the static credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// } +// +// Refreshable credentials will expire within the "ExpiryWindow" of the Expiration +// value in the response. The format of the refreshable credentials response: +// { +// "AccessKeyId" : "MUA...", +// "SecretAccessKey" : "/7PC5om....", +// "Token" : "AQoDY....=", +// "Expiration" : "2016-02-25T06:03:31Z" +// } +// +// Errors should be returned in the following format and only returned with 400 +// or 500 HTTP status codes. +// { +// "code": "ErrorCode", +// "message": "Helpful error message." +// } +package endpointcreds + +import ( + "encoding/json" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ProviderName is the name of the credentials provider. +const ProviderName = `CredentialsEndpointProvider` + +// Provider satisfies the credentials.Provider interface, and is a client to +// retrieve credentials from an arbitrary endpoint. +type Provider struct { + staticCreds bool + credentials.Expiry + + // Requires a AWS Client to make HTTP requests to the endpoint with. + // the Endpoint the request will be made to is provided by the aws.Config's + // Endpoint value. + Client *client.Client + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewProviderClient returns a credentials Provider for retrieving AWS credentials +// from arbitrary endpoint. +func NewProviderClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) credentials.Provider { + p := &Provider{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: "CredentialsEndpoint", + Endpoint: endpoint, + }, + handlers, + ), + } + + p.Client.Handlers.Unmarshal.PushBack(unmarshalHandler) + p.Client.Handlers.UnmarshalError.PushBack(unmarshalError) + p.Client.Handlers.Validate.Clear() + p.Client.Handlers.Validate.PushBack(validateEndpointHandler) + + for _, option := range options { + option(p) + } + + return p +} + +// NewCredentialsClient returns a Credentials wrapper for retrieving credentials +// from an arbitrary endpoint concurrently. The client will request the +func NewCredentialsClient(cfg aws.Config, handlers request.Handlers, endpoint string, options ...func(*Provider)) *credentials.Credentials { + return credentials.NewCredentials(NewProviderClient(cfg, handlers, endpoint, options...)) +} + +// IsExpired returns true if the credentials retrieved are expired, or not yet +// retrieved. +func (p *Provider) IsExpired() bool { + if p.staticCreds { + return false + } + return p.Expiry.IsExpired() +} + +// Retrieve will attempt to request the credentials from the endpoint the Provider +// was configured for. And error will be returned if the retrieval fails. +func (p *Provider) Retrieve() (credentials.Value, error) { + resp, err := p.getCredentials() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("CredentialsEndpointError", "failed to load credentials", err) + } + + if resp.Expiration != nil { + p.SetExpiration(*resp.Expiration, p.ExpiryWindow) + } else { + p.staticCreds = true + } + + return credentials.Value{ + AccessKeyID: resp.AccessKeyID, + SecretAccessKey: resp.SecretAccessKey, + SessionToken: resp.Token, + ProviderName: ProviderName, + }, nil +} + +type getCredentialsOutput struct { + Expiration *time.Time + AccessKeyID string + SecretAccessKey string + Token string +} + +type errorOutput struct { + Code string `json:"code"` + Message string `json:"message"` +} + +func (p *Provider) getCredentials() (*getCredentialsOutput, error) { + op := &request.Operation{ + Name: "GetCredentials", + HTTPMethod: "GET", + } + + out := &getCredentialsOutput{} + req := p.Client.NewRequest(op, nil, out) + req.HTTPRequest.Header.Set("Accept", "application/json") + + return out, req.Send() +} + +func validateEndpointHandler(r *request.Request) { + if len(r.ClientInfo.Endpoint) == 0 { + r.Error = aws.ErrMissingEndpoint + } +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + out := r.Data.(*getCredentialsOutput) + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&out); err != nil { + r.Error = awserr.New("SerializationError", + "failed to decode endpoint credentials", + err, + ) + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + var errOut errorOutput + if err := json.NewDecoder(r.HTTPResponse.Body).Decode(&errOut); err != nil { + r.Error = awserr.New("SerializationError", + "failed to decode endpoint credentials", + err, + ) + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New(errOut.Code, errOut.Message, nil) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go similarity index 86% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go rename to vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go index 043e861..c14231a 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go @@ -6,6 +6,9 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" ) +// EnvProviderName provides a name of Env provider +const EnvProviderName = "EnvProvider" + var ( // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be // found in the process's environment. @@ -26,6 +29,7 @@ var ( // Environment variables used: // // * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY +// // * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY type EnvProvider struct { retrieved bool @@ -52,11 +56,11 @@ func (e *EnvProvider) Retrieve() (Value, error) { } if id == "" { - return Value{}, ErrAccessKeyIDNotFound + return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound } if secret == "" { - return Value{}, ErrSecretAccessKeyNotFound + return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound } e.retrieved = true @@ -64,6 +68,7 @@ func (e *EnvProvider) Retrieve() (Value, error) { AccessKeyID: id, SecretAccessKey: secret, SessionToken: os.Getenv("AWS_SESSION_TOKEN"), + ProviderName: EnvProviderName, }, nil } diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini similarity index 69% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini rename to vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini index aa2dc50..7fc91d9 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/example.ini @@ -6,3 +6,7 @@ aws_session_token = token [no_token] aws_access_key_id = accessKey aws_secret_access_key = secret + +[with_colon] +aws_access_key_id: accessKey +aws_secret_access_key: secret diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go similarity index 66% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go rename to vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go index b457e63..7fb7cbf 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go @@ -5,11 +5,14 @@ import ( "os" "path/filepath" - "github.com/vaughan0/go-ini" + "github.com/go-ini/ini" "github.com/aws/aws-sdk-go/aws/awserr" ) +// SharedCredsProviderName provides a name of SharedCreds provider +const SharedCredsProviderName = "SharedCredentialsProvider" + var ( // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. // @@ -22,8 +25,12 @@ var ( // // Profile ini file example: $HOME/.aws/credentials type SharedCredentialsProvider struct { - // Path to the shared credentials file. If empty will default to current user's - // home directory. + // Path to the shared credentials file. + // + // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the + // env value is empty will default to current user's home directory. + // Linux/OSX: "$HOME/.aws/credentials" + // Windows: "%USERPROFILE%\.aws\credentials" Filename string // AWS Profile to extract credentials from the shared credentials file. If empty @@ -51,12 +58,12 @@ func (p *SharedCredentialsProvider) Retrieve() (Value, error) { filename, err := p.filename() if err != nil { - return Value{}, err + return Value{ProviderName: SharedCredsProviderName}, err } creds, err := loadProfile(filename, p.profile()) if err != nil { - return Value{}, err + return Value{ProviderName: SharedCredsProviderName}, err } p.retrieved = true @@ -72,32 +79,37 @@ func (p *SharedCredentialsProvider) IsExpired() bool { // The credentials retrieved from the profile will be returned or error. Error will be // returned if it fails to read from the file, or the data is invalid. func loadProfile(filename, profile string) (Value, error) { - config, err := ini.LoadFile(filename) + config, err := ini.Load(filename) if err != nil { - return Value{}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) + } + iniProfile, err := config.GetSection(profile) + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) } - iniProfile := config.Section(profile) - id, ok := iniProfile["aws_access_key_id"] - if !ok { - return Value{}, awserr.New("SharedCredsAccessKey", + id, err := iniProfile.GetKey("aws_access_key_id") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), - nil) + err) } - secret, ok := iniProfile["aws_secret_access_key"] - if !ok { - return Value{}, awserr.New("SharedCredsSecret", + secret, err := iniProfile.GetKey("aws_secret_access_key") + if err != nil { + return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), nil) } - token := iniProfile["aws_session_token"] + // Default to empty string if not found + token := iniProfile.Key("aws_session_token") return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: token, + AccessKeyID: id.String(), + SecretAccessKey: secret.String(), + SessionToken: token.String(), + ProviderName: SharedCredsProviderName, }, nil } @@ -106,6 +118,10 @@ func loadProfile(filename, profile string) (Value, error) { // Will return an error if the user's home directory path cannot be found. func (p *SharedCredentialsProvider) filename() (string, error) { if p.Filename == "" { + if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { + return p.Filename, nil + } + homeDir := os.Getenv("HOME") // *nix if homeDir == "" { // Windows homeDir = os.Getenv("USERPROFILE") diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go similarity index 60% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go rename to vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go index 530a9ac..4f5dab3 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go @@ -4,6 +4,9 @@ import ( "github.com/aws/aws-sdk-go/aws/awserr" ) +// StaticProviderName provides a name of Static provider +const StaticProviderName = "StaticProvider" + var ( // ErrStaticCredentialsEmpty is emitted when static credentials are empty. // @@ -11,7 +14,7 @@ var ( ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) ) -// A StaticProvider is a set of credentials which are set pragmatically, +// A StaticProvider is a set of credentials which are set programmatically, // and will never expire. type StaticProvider struct { Value @@ -27,12 +30,22 @@ func NewStaticCredentials(id, secret, token string) *Credentials { }}) } +// NewStaticCredentialsFromCreds returns a pointer to a new Credentials object +// wrapping the static credentials value provide. Same as NewStaticCredentials +// but takes the creds Value instead of individual fields +func NewStaticCredentialsFromCreds(creds Value) *Credentials { + return NewCredentials(&StaticProvider{Value: creds}) +} + // Retrieve returns the credentials or error if the credentials are invalid. func (s *StaticProvider) Retrieve() (Value, error) { if s.AccessKeyID == "" || s.SecretAccessKey == "" { - return Value{}, ErrStaticCredentialsEmpty + return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty } + if len(s.Value.ProviderName) == 0 { + s.Value.ProviderName = StaticProviderName + } return s.Value, nil } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go new file mode 100644 index 0000000..4108e43 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/stscreds/assume_role_provider.go @@ -0,0 +1,298 @@ +/* +Package stscreds are credential Providers to retrieve STS AWS credentials. + +STS provides multiple ways to retrieve credentials which can be used when making +future AWS service API operation calls. + +The SDK will ensure that per instance of credentials.Credentials all requests +to refresh the credentials will be synchronized. But, the SDK is unable to +ensure synchronous usage of the AssumeRoleProvider if the value is shared +between multiple Credentials, Sessions or service clients. + +Assume Role + +To assume an IAM role using STS with the SDK you can create a new Credentials +with the SDKs's stscreds package. + + // Initial credentials loaded from SDK's default credential chain. Such as + // the environment, shared credentials (~/.aws/credentials), or EC2 Instance + // Role. These credentials will be used to to make the STS Assume Role API. + sess := session.Must(session.NewSession()) + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. + creds := stscreds.NewCredentials(sess, "myRoleArn") + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with static MFA Token + +To assume an IAM role with a MFA token you can either specify a MFA token code +directly or provide a function to prompt the user each time the credentials +need to refresh the role's credentials. Specifying the TokenCode should be used +for short lived operations that will not need to be refreshed, and when you do +not want to have direct control over the user provides their MFA token. + +With TokenCode the AssumeRoleProvider will be not be able to refresh the role's +credentials. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN using the MFA token code provided. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenCode = aws.String("00000000") + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +Assume Role with MFA Token Provider + +To assume an IAM role with MFA for longer running tasks where the credentials +may need to be refreshed setting the TokenProvider field of AssumeRoleProvider +will allow the credential provider to prompt for new MFA token code when the +role's credentials need to be refreshed. + +The StdinTokenProvider function is available to prompt on stdin to retrieve +the MFA token code from the user. You can also implement custom prompts by +satisfing the TokenProvider function signature. + +Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +have undesirable results as the StdinTokenProvider will not be synchronized. A +single Credentials with an AssumeRoleProvider can be shared safely. + + // Create the credentials from AssumeRoleProvider to assume the role + // referenced by the "myRoleARN" ARN. Prompting for MFA token from stdin. + creds := stscreds.NewCredentials(sess, "myRoleArn", func(p *stscreds.AssumeRoleProvider) { + p.SerialNumber = aws.String("myTokenSerialNumber") + p.TokenProvider = stscreds.StdinTokenProvider + }) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess, &aws.Config{Credentials: creds}) + +*/ +package stscreds + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/service/sts" +) + +// StdinTokenProvider will prompt on stdout and read from stdin for a string value. +// An error is returned if reading from stdin fails. +// +// Use this function go read MFA tokens from stdin. The function makes no attempt +// to make atomic prompts from stdin across multiple gorouties. +// +// Using StdinTokenProvider with multiple AssumeRoleProviders, or Credentials will +// have undesirable results as the StdinTokenProvider will not be synchronized. A +// single Credentials with an AssumeRoleProvider can be shared safely +// +// Will wait forever until something is provided on the stdin. +func StdinTokenProvider() (string, error) { + var v string + fmt.Printf("Assume Role MFA token code: ") + _, err := fmt.Scanln(&v) + + return v, err +} + +// ProviderName provides a name of AssumeRole provider +const ProviderName = "AssumeRoleProvider" + +// AssumeRoler represents the minimal subset of the STS client API used by this provider. +type AssumeRoler interface { + AssumeRole(input *sts.AssumeRoleInput) (*sts.AssumeRoleOutput, error) +} + +// DefaultDuration is the default amount of time in minutes that the credentials +// will be valid for. +var DefaultDuration = time.Duration(15) * time.Minute + +// AssumeRoleProvider retrieves temporary credentials from the STS service, and +// keeps track of their expiration time. +// +// This credential provider will be used by the SDKs default credential change +// when shared configuration is enabled, and the shared config or shared credentials +// file configure assume role. See Session docs for how to do this. +// +// AssumeRoleProvider does not provide any synchronization and it is not safe +// to share this value across multiple Credentials, Sessions, or service clients +// without also sharing the same Credentials instance. +type AssumeRoleProvider struct { + credentials.Expiry + + // STS client to make assume role request with. + Client AssumeRoler + + // Role to be assumed. + RoleARN string + + // Session name, if you wish to reuse the credentials elsewhere. + RoleSessionName string + + // Expiry duration of the STS credentials. Defaults to 15 minutes if not set. + Duration time.Duration + + // Optional ExternalID to pass along, defaults to nil if not set. + ExternalID *string + + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + SerialNumber *string + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // If SerialNumber is set and neither TokenCode nor TokenProvider are also + // set an error will be returned. + TokenCode *string + + // Async method of providing MFA token code for assuming an IAM role with MFA. + // The value returned by the function will be used as the TokenCode in the Retrieve + // call. See StdinTokenProvider for a provider that prompts and reads from stdin. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed when SerialNumber is also set and + // TokenCode is not set. + // + // If both TokenCode and TokenProvider is set, TokenProvider will be used and + // TokenCode is ignored. + TokenProvider func() (string, error) + + // ExpiryWindow will allow the credentials to trigger refreshing prior to + // the credentials actually expiring. This is beneficial so race conditions + // with expiring credentials do not cause request to fail unexpectedly + // due to ExpiredTokenException exceptions. + // + // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true + // 10 seconds before the credentials are actually expired. + // + // If ExpiryWindow is 0 or less it will be ignored. + ExpiryWindow time.Duration +} + +// NewCredentials returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes a Config provider to create the STS client. The ConfigProvider is +// satisfied by the session.Session type. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentials(c client.ConfigProvider, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: sts.New(c), + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping the +// AssumeRoleProvider. The credentials will expire every 15 minutes and the +// role will be named after a nanosecond timestamp of this operation. +// +// Takes an AssumeRoler which can be satisfied by the STS client. +// +// It is safe to share the returned Credentials with multiple Sessions and +// service clients. All access to the credentials and refreshing them +// will be synchronized. +func NewCredentialsWithClient(svc AssumeRoler, roleARN string, options ...func(*AssumeRoleProvider)) *credentials.Credentials { + p := &AssumeRoleProvider{ + Client: svc, + RoleARN: roleARN, + Duration: DefaultDuration, + } + + for _, option := range options { + option(p) + } + + return credentials.NewCredentials(p) +} + +// Retrieve generates a new set of temporary credentials using STS. +func (p *AssumeRoleProvider) Retrieve() (credentials.Value, error) { + + // Apply defaults where parameters are not set. + if p.RoleSessionName == "" { + // Try to work out a role name that will hopefully end up unique. + p.RoleSessionName = fmt.Sprintf("%d", time.Now().UTC().UnixNano()) + } + if p.Duration == 0 { + // Expire as often as AWS permits. + p.Duration = DefaultDuration + } + input := &sts.AssumeRoleInput{ + DurationSeconds: aws.Int64(int64(p.Duration / time.Second)), + RoleArn: aws.String(p.RoleARN), + RoleSessionName: aws.String(p.RoleSessionName), + ExternalId: p.ExternalID, + } + if p.Policy != nil { + input.Policy = p.Policy + } + if p.SerialNumber != nil { + if p.TokenCode != nil { + input.SerialNumber = p.SerialNumber + input.TokenCode = p.TokenCode + } else if p.TokenProvider != nil { + input.SerialNumber = p.SerialNumber + code, err := p.TokenProvider() + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + input.TokenCode = aws.String(code) + } else { + return credentials.Value{ProviderName: ProviderName}, + awserr.New("AssumeRoleTokenNotAvailable", + "assume role with MFA enabled, but neither TokenCode nor TokenProvider are set", nil) + } + } + + roleOutput, err := p.Client.AssumeRole(input) + if err != nil { + return credentials.Value{ProviderName: ProviderName}, err + } + + // We will proactively generate new credentials before they expire. + p.SetExpiration(*roleOutput.Credentials.Expiration, p.ExpiryWindow) + + return credentials.Value{ + AccessKeyID: *roleOutput.Credentials.AccessKeyId, + SecretAccessKey: *roleOutput.Credentials.SecretAccessKey, + SessionToken: *roleOutput.Credentials.SessionToken, + ProviderName: ProviderName, + }, nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go new file mode 100644 index 0000000..07afe3b --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -0,0 +1,163 @@ +// Package defaults is a collection of helpers to retrieve the SDK's default +// configuration and handlers. +// +// Generally this package shouldn't be used directly, but session.Session +// instead. This package is useful when you need to reset the defaults +// of a session or service client to the SDK defaults before setting +// additional parameters. +package defaults + +import ( + "fmt" + "net/http" + "net/url" + "os" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" + "github.com/aws/aws-sdk-go/aws/credentials/endpointcreds" + "github.com/aws/aws-sdk-go/aws/ec2metadata" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Defaults provides a collection of default values for SDK clients. +type Defaults struct { + Config *aws.Config + Handlers request.Handlers +} + +// Get returns the SDK's default values with Config and handlers pre-configured. +func Get() Defaults { + cfg := Config() + handlers := Handlers() + cfg.Credentials = CredChain(cfg, handlers) + + return Defaults{ + Config: cfg, + Handlers: handlers, + } +} + +// Config returns the default configuration without credentials. +// To retrieve a config with credentials also included use +// `defaults.Get().Config` instead. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the configuration of an +// existing service client or session. +func Config() *aws.Config { + return aws.NewConfig(). + WithCredentials(credentials.AnonymousCredentials). + WithRegion(os.Getenv("AWS_REGION")). + WithHTTPClient(http.DefaultClient). + WithMaxRetries(aws.UseServiceDefaultRetries). + WithLogger(aws.NewDefaultLogger()). + WithLogLevel(aws.LogOff). + WithEndpointResolver(endpoints.DefaultResolver()) +} + +// Handlers returns the default request handlers. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the request handlers of an +// existing service client or session. +func Handlers() request.Handlers { + var handlers request.Handlers + + handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) + handlers.Validate.AfterEachFn = request.HandlerListStopOnError + handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) + handlers.Build.AfterEachFn = request.HandlerListStopOnError + handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) + handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler) + handlers.Send.PushBackNamed(corehandlers.SendHandler) + handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) + handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) + + return handlers +} + +// CredChain returns the default credential chain. +// +// Generally you shouldn't need to use this method directly, but +// is available if you need to reset the credentials of an +// existing service client or session's Config. +func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credentials.EnvProvider{}, + &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, + RemoteCredProvider(*cfg, handlers), + }, + }) +} + +const ( + httpProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_FULL_URI" + ecsCredsProviderEnvVar = "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI" +) + +// RemoteCredProvider returns a credentials provider for the default remote +// endpoints such as EC2 or ECS Roles. +func RemoteCredProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + if u := os.Getenv(httpProviderEnvVar); len(u) > 0 { + return localHTTPCredProvider(cfg, handlers, u) + } + + if uri := os.Getenv(ecsCredsProviderEnvVar); len(uri) > 0 { + u := fmt.Sprintf("http://169.254.170.2%s", uri) + return httpCredProvider(cfg, handlers, u) + } + + return ec2RoleProvider(cfg, handlers) +} + +func localHTTPCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + var errMsg string + + parsed, err := url.Parse(u) + if err != nil { + errMsg = fmt.Sprintf("invalid URL, %v", err) + } else if host := aws.URLHostname(parsed); !(host == "localhost" || host == "127.0.0.1") { + errMsg = fmt.Sprintf("invalid host address, %q, only localhost and 127.0.0.1 are valid.", host) + } + + if len(errMsg) > 0 { + if cfg.Logger != nil { + cfg.Logger.Log("Ignoring, HTTP credential provider", errMsg, err) + } + return credentials.ErrorProvider{ + Err: awserr.New("CredentialsEndpointError", errMsg, err), + ProviderName: endpointcreds.ProviderName, + } + } + + return httpCredProvider(cfg, handlers, u) +} + +func httpCredProvider(cfg aws.Config, handlers request.Handlers, u string) credentials.Provider { + return endpointcreds.NewProviderClient(cfg, handlers, u, + func(p *endpointcreds.Provider) { + p.ExpiryWindow = 5 * time.Minute + }, + ) +} + +func ec2RoleProvider(cfg aws.Config, handlers request.Handlers) credentials.Provider { + resolver := cfg.EndpointResolver + if resolver == nil { + resolver = endpoints.DefaultResolver() + } + + e, _ := resolver.EndpointFor(endpoints.Ec2metadataServiceID, "") + return &ec2rolecreds.EC2RoleProvider{ + Client: ec2metadata.NewClient(cfg, handlers, e.URL, e.SigningRegion), + ExpiryWindow: 5 * time.Minute, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/doc.go new file mode 100644 index 0000000..4fcb616 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/doc.go @@ -0,0 +1,56 @@ +// Package aws provides the core SDK's utilities and shared types. Use this package's +// utilities to simplify setting and reading API operations parameters. +// +// Value and Pointer Conversion Utilities +// +// This package includes a helper conversion utility for each scalar type the SDK's +// API use. These utilities make getting a pointer of the scalar, and dereferencing +// a pointer easier. +// +// Each conversion utility comes in two forms. Value to Pointer and Pointer to Value. +// The Pointer to value will safely dereference the pointer and return its value. +// If the pointer was nil, the scalar's zero value will be returned. +// +// The value to pointer functions will be named after the scalar type. So get a +// *string from a string value use the "String" function. This makes it easy to +// to get pointer of a literal string value, because getting the address of a +// literal requires assigning the value to a variable first. +// +// var strPtr *string +// +// // Without the SDK's conversion functions +// str := "my string" +// strPtr = &str +// +// // With the SDK's conversion functions +// strPtr = aws.String("my string") +// +// // Convert *string to string value +// str = aws.StringValue(strPtr) +// +// In addition to scalars the aws package also includes conversion utilities for +// map and slice for commonly types used in API parameters. The map and slice +// conversion functions use similar naming pattern as the scalar conversion +// functions. +// +// var strPtrs []*string +// var strs []string = []string{"Go", "Gophers", "Go"} +// +// // Convert []string to []*string +// strPtrs = aws.StringSlice(strs) +// +// // Convert []*string to []string +// strs = aws.StringValueSlice(strPtrs) +// +// SDK Default HTTP Client +// +// The SDK will use the http.DefaultClient if a HTTP client is not provided to +// the SDK's Session, or service client constructor. This means that if the +// http.DefaultClient is modified by other components of your application the +// modifications will be picked up by the SDK as well. +// +// In some cases this might be intended, but it is a better practice to create +// a custom HTTP Client to share explicitly through your application. You can +// configure the SDK to use the custom HTTP Client by setting the HTTPClient +// value of the SDK's Config type when creating a Session or service client. +package aws diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go new file mode 100644 index 0000000..984407a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go @@ -0,0 +1,162 @@ +package ec2metadata + +import ( + "encoding/json" + "fmt" + "net/http" + "path" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// GetMetadata uses the path provided to request information from the EC2 +// instance metdata service. The content will be returned as a string, or +// error if the request failed. +func (c *EC2Metadata) GetMetadata(p string) (string, error) { + op := &request.Operation{ + Name: "GetMetadata", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "meta-data", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetUserData returns the userdata that was configured for the service. If +// there is no user-data setup for the EC2 instance a "NotFoundError" error +// code will be returned. +func (c *EC2Metadata) GetUserData() (string, error) { + op := &request.Operation{ + Name: "GetUserData", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "user-data"), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + req.Handlers.UnmarshalError.PushBack(func(r *request.Request) { + if r.HTTPResponse.StatusCode == http.StatusNotFound { + r.Error = awserr.New("NotFoundError", "user-data not found", r.Error) + } + }) + + return output.Content, req.Send() +} + +// GetDynamicData uses the path provided to request information from the EC2 +// instance metadata service for dynamic data. The content will be returned +// as a string, or error if the request failed. +func (c *EC2Metadata) GetDynamicData(p string) (string, error) { + op := &request.Operation{ + Name: "GetDynamicData", + HTTPMethod: "GET", + HTTPPath: path.Join("/", "dynamic", p), + } + + output := &metadataOutput{} + req := c.NewRequest(op, nil, output) + + return output.Content, req.Send() +} + +// GetInstanceIdentityDocument retrieves an identity document describing an +// instance. Error is returned if the request fails or is unable to parse +// the response. +func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { + resp, err := c.GetDynamicData("instance-identity/document") + if err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 instance identity document", err) + } + + doc := EC2InstanceIdentityDocument{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { + return EC2InstanceIdentityDocument{}, + awserr.New("SerializationError", + "failed to decode EC2 instance identity document", err) + } + + return doc, nil +} + +// IAMInfo retrieves IAM info from the metadata API +func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { + resp, err := c.GetMetadata("iam/info") + if err != nil { + return EC2IAMInfo{}, + awserr.New("EC2MetadataRequestError", + "failed to get EC2 IAM info", err) + } + + info := EC2IAMInfo{} + if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { + return EC2IAMInfo{}, + awserr.New("SerializationError", + "failed to decode EC2 IAM info", err) + } + + if info.Code != "Success" { + errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) + return EC2IAMInfo{}, + awserr.New("EC2MetadataError", errMsg, nil) + } + + return info, nil +} + +// Region returns the region the instance is running in. +func (c *EC2Metadata) Region() (string, error) { + resp, err := c.GetMetadata("placement/availability-zone") + if err != nil { + return "", err + } + + // returns region without the suffix. Eg: us-west-2a becomes us-west-2 + return resp[:len(resp)-1], nil +} + +// Available returns if the application has access to the EC2 Metadata service. +// Can be used to determine if application is running within an EC2 Instance and +// the metadata service is available. +func (c *EC2Metadata) Available() bool { + if _, err := c.GetMetadata("instance-id"); err != nil { + return false + } + + return true +} + +// An EC2IAMInfo provides the shape for unmarshaling +// an IAM info from the metadata API +type EC2IAMInfo struct { + Code string + LastUpdated time.Time + InstanceProfileArn string + InstanceProfileID string +} + +// An EC2InstanceIdentityDocument provides the shape for unmarshaling +// an instance identity document +type EC2InstanceIdentityDocument struct { + DevpayProductCodes []string `json:"devpayProductCodes"` + AvailabilityZone string `json:"availabilityZone"` + PrivateIP string `json:"privateIp"` + Version string `json:"version"` + Region string `json:"region"` + InstanceID string `json:"instanceId"` + BillingProducts []string `json:"billingProducts"` + InstanceType string `json:"instanceType"` + AccountID string `json:"accountId"` + PendingTime time.Time `json:"pendingTime"` + ImageID string `json:"imageId"` + KernelID string `json:"kernelId"` + RamdiskID string `json:"ramdiskId"` + Architecture string `json:"architecture"` +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go new file mode 100644 index 0000000..5b4379d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -0,0 +1,124 @@ +// Package ec2metadata provides the client for making API calls to the +// EC2 Metadata service. +package ec2metadata + +import ( + "bytes" + "errors" + "io" + "net/http" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" +) + +// ServiceName is the name of the service. +const ServiceName = "ec2metadata" + +// A EC2Metadata is an EC2 Metadata service Client. +type EC2Metadata struct { + *client.Client +} + +// New creates a new instance of the EC2Metadata client with a session. +// This client is safe to use across multiple goroutines. +// +// +// Example: +// // Create a EC2Metadata client from just a session. +// svc := ec2metadata.New(mySession) +// +// // Create a EC2Metadata client with additional configuration +// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { + c := p.ClientConfig(ServiceName, cfgs...) + return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) +} + +// NewClient returns a new EC2Metadata client. Should be used to create +// a client when not using a session. Generally using just New with a session +// is preferred. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. +func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. + cfg.HTTPClient = &http.Client{ + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 5 * time.Second, + } + } + + svc := &EC2Metadata{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + Endpoint: endpoint, + APIVersion: "latest", + }, + handlers, + ), + } + + svc.Handlers.Unmarshal.PushBack(unmarshalHandler) + svc.Handlers.UnmarshalError.PushBack(unmarshalError) + svc.Handlers.Validate.Clear() + svc.Handlers.Validate.PushBack(validateEndpointHandler) + + // Add additional options to the service config + for _, option := range opts { + option(svc.Client) + } + + return svc +} + +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + +type metadataOutput struct { + Content string +} + +func unmarshalHandler(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) + return + } + + if data, ok := r.Data.(*metadataOutput); ok { + data.Content = b.String() + } +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + b := &bytes.Buffer{} + if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { + r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) + return + } + + // Response body format is not consistent between metadata endpoints. + // Grab the error message as a string and include that as the source error + r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) +} + +func validateEndpointHandler(r *request.Request) { + if r.ClientInfo.Endpoint == "" { + r.Error = aws.ErrMissingEndpoint + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go new file mode 100644 index 0000000..74f72de --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/decode.go @@ -0,0 +1,133 @@ +package endpoints + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +type modelDefinition map[string]json.RawMessage + +// A DecodeModelOptions are the options for how the endpoints model definition +// are decoded. +type DecodeModelOptions struct { + SkipCustomizations bool +} + +// Set combines all of the option functions together. +func (d *DecodeModelOptions) Set(optFns ...func(*DecodeModelOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// DecodeModel unmarshals a Regions and Endpoint model definition file into +// a endpoint Resolver. If the file format is not supported, or an error occurs +// when unmarshaling the model an error will be returned. +// +// Casting the return value of this func to a EnumPartitions will +// allow you to get a list of the partitions in the order the endpoints +// will be resolved in. +// +// resolver, err := endpoints.DecodeModel(reader) +// +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// for _, p := range partitions { +// // ... inspect partitions +// } +func DecodeModel(r io.Reader, optFns ...func(*DecodeModelOptions)) (Resolver, error) { + var opts DecodeModelOptions + opts.Set(optFns...) + + // Get the version of the partition file to determine what + // unmarshaling model to use. + modelDef := modelDefinition{} + if err := json.NewDecoder(r).Decode(&modelDef); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + var version string + if b, ok := modelDef["version"]; ok { + version = string(b) + } else { + return nil, newDecodeModelError("endpoints version not found in model", nil) + } + + if version == "3" { + return decodeV3Endpoints(modelDef, opts) + } + + return nil, newDecodeModelError( + fmt.Sprintf("endpoints version %s, not supported", version), nil) +} + +func decodeV3Endpoints(modelDef modelDefinition, opts DecodeModelOptions) (Resolver, error) { + b, ok := modelDef["partitions"] + if !ok { + return nil, newDecodeModelError("endpoints model missing partitions", nil) + } + + ps := partitions{} + if err := json.Unmarshal(b, &ps); err != nil { + return nil, newDecodeModelError("failed to decode endpoints model", err) + } + + if opts.SkipCustomizations { + return ps, nil + } + + // Customization + for i := 0; i < len(ps); i++ { + p := &ps[i] + custAddEC2Metadata(p) + custAddS3DualStack(p) + custRmIotDataService(p) + } + + return ps, nil +} + +func custAddS3DualStack(p *partition) { + if p.ID != "aws" { + return + } + + s, ok := p.Services["s3"] + if !ok { + return + } + + s.Defaults.HasDualStack = boxedTrue + s.Defaults.DualStackHostname = "{service}.dualstack.{region}.{dnsSuffix}" + + p.Services["s3"] = s +} + +func custAddEC2Metadata(p *partition) { + p.Services["ec2metadata"] = service{ + IsRegionalized: boxedFalse, + PartitionEndpoint: "aws-global", + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + } +} + +func custRmIotDataService(p *partition) { + delete(p.Services, "data.iot") +} + +type decodeModelError struct { + awsError +} + +func newDecodeModelError(msg string, err error) decodeModelError { + return decodeModelError{ + awsError: awserr.New("DecodeEndpointsModelError", msg, err), + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go new file mode 100644 index 0000000..b1ae679 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -0,0 +1,2221 @@ +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + +// Partition identifiers +const ( + AwsPartitionID = "aws" // AWS Standard partition. + AwsCnPartitionID = "aws-cn" // AWS China partition. + AwsUsGovPartitionID = "aws-us-gov" // AWS GovCloud (US) partition. +) + +// AWS Standard partition's regions. +const ( + ApNortheast1RegionID = "ap-northeast-1" // Asia Pacific (Tokyo). + ApNortheast2RegionID = "ap-northeast-2" // Asia Pacific (Seoul). + ApSouth1RegionID = "ap-south-1" // Asia Pacific (Mumbai). + ApSoutheast1RegionID = "ap-southeast-1" // Asia Pacific (Singapore). + ApSoutheast2RegionID = "ap-southeast-2" // Asia Pacific (Sydney). + CaCentral1RegionID = "ca-central-1" // Canada (Central). + EuCentral1RegionID = "eu-central-1" // EU (Frankfurt). + EuWest1RegionID = "eu-west-1" // EU (Ireland). + EuWest2RegionID = "eu-west-2" // EU (London). + SaEast1RegionID = "sa-east-1" // South America (Sao Paulo). + UsEast1RegionID = "us-east-1" // US East (N. Virginia). + UsEast2RegionID = "us-east-2" // US East (Ohio). + UsWest1RegionID = "us-west-1" // US West (N. California). + UsWest2RegionID = "us-west-2" // US West (Oregon). +) + +// AWS China partition's regions. +const ( + CnNorth1RegionID = "cn-north-1" // China (Beijing). +) + +// AWS GovCloud (US) partition's regions. +const ( + UsGovWest1RegionID = "us-gov-west-1" // AWS GovCloud (US). +) + +// Service identifiers +const ( + AcmServiceID = "acm" // Acm. + ApigatewayServiceID = "apigateway" // Apigateway. + ApplicationAutoscalingServiceID = "application-autoscaling" // ApplicationAutoscaling. + Appstream2ServiceID = "appstream2" // Appstream2. + AutoscalingServiceID = "autoscaling" // Autoscaling. + BatchServiceID = "batch" // Batch. + BudgetsServiceID = "budgets" // Budgets. + ClouddirectoryServiceID = "clouddirectory" // Clouddirectory. + CloudformationServiceID = "cloudformation" // Cloudformation. + CloudfrontServiceID = "cloudfront" // Cloudfront. + CloudhsmServiceID = "cloudhsm" // Cloudhsm. + CloudsearchServiceID = "cloudsearch" // Cloudsearch. + CloudtrailServiceID = "cloudtrail" // Cloudtrail. + CodebuildServiceID = "codebuild" // Codebuild. + CodecommitServiceID = "codecommit" // Codecommit. + CodedeployServiceID = "codedeploy" // Codedeploy. + CodepipelineServiceID = "codepipeline" // Codepipeline. + CodestarServiceID = "codestar" // Codestar. + CognitoIdentityServiceID = "cognito-identity" // CognitoIdentity. + CognitoIdpServiceID = "cognito-idp" // CognitoIdp. + CognitoSyncServiceID = "cognito-sync" // CognitoSync. + ConfigServiceID = "config" // Config. + CurServiceID = "cur" // Cur. + DatapipelineServiceID = "datapipeline" // Datapipeline. + DevicefarmServiceID = "devicefarm" // Devicefarm. + DirectconnectServiceID = "directconnect" // Directconnect. + DiscoveryServiceID = "discovery" // Discovery. + DmsServiceID = "dms" // Dms. + DsServiceID = "ds" // Ds. + DynamodbServiceID = "dynamodb" // Dynamodb. + Ec2ServiceID = "ec2" // Ec2. + Ec2metadataServiceID = "ec2metadata" // Ec2metadata. + EcrServiceID = "ecr" // Ecr. + EcsServiceID = "ecs" // Ecs. + ElasticacheServiceID = "elasticache" // Elasticache. + ElasticbeanstalkServiceID = "elasticbeanstalk" // Elasticbeanstalk. + ElasticfilesystemServiceID = "elasticfilesystem" // Elasticfilesystem. + ElasticloadbalancingServiceID = "elasticloadbalancing" // Elasticloadbalancing. + ElasticmapreduceServiceID = "elasticmapreduce" // Elasticmapreduce. + ElastictranscoderServiceID = "elastictranscoder" // Elastictranscoder. + EmailServiceID = "email" // Email. + EntitlementMarketplaceServiceID = "entitlement.marketplace" // EntitlementMarketplace. + EsServiceID = "es" // Es. + EventsServiceID = "events" // Events. + FirehoseServiceID = "firehose" // Firehose. + GameliftServiceID = "gamelift" // Gamelift. + GlacierServiceID = "glacier" // Glacier. + HealthServiceID = "health" // Health. + IamServiceID = "iam" // Iam. + ImportexportServiceID = "importexport" // Importexport. + InspectorServiceID = "inspector" // Inspector. + IotServiceID = "iot" // Iot. + KinesisServiceID = "kinesis" // Kinesis. + KinesisanalyticsServiceID = "kinesisanalytics" // Kinesisanalytics. + KmsServiceID = "kms" // Kms. + LambdaServiceID = "lambda" // Lambda. + LightsailServiceID = "lightsail" // Lightsail. + LogsServiceID = "logs" // Logs. + MachinelearningServiceID = "machinelearning" // Machinelearning. + MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. + MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. + MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. + ModelsLexServiceID = "models.lex" // ModelsLex. + MonitoringServiceID = "monitoring" // Monitoring. + MturkRequesterServiceID = "mturk-requester" // MturkRequester. + OpsworksServiceID = "opsworks" // Opsworks. + OpsworksCmServiceID = "opsworks-cm" // OpsworksCm. + OrganizationsServiceID = "organizations" // Organizations. + PinpointServiceID = "pinpoint" // Pinpoint. + PollyServiceID = "polly" // Polly. + RdsServiceID = "rds" // Rds. + RedshiftServiceID = "redshift" // Redshift. + RekognitionServiceID = "rekognition" // Rekognition. + Route53ServiceID = "route53" // Route53. + Route53domainsServiceID = "route53domains" // Route53domains. + RuntimeLexServiceID = "runtime.lex" // RuntimeLex. + S3ServiceID = "s3" // S3. + SdbServiceID = "sdb" // Sdb. + ServicecatalogServiceID = "servicecatalog" // Servicecatalog. + ShieldServiceID = "shield" // Shield. + SmsServiceID = "sms" // Sms. + SnowballServiceID = "snowball" // Snowball. + SnsServiceID = "sns" // Sns. + SqsServiceID = "sqs" // Sqs. + SsmServiceID = "ssm" // Ssm. + StatesServiceID = "states" // States. + StoragegatewayServiceID = "storagegateway" // Storagegateway. + StreamsDynamodbServiceID = "streams.dynamodb" // StreamsDynamodb. + StsServiceID = "sts" // Sts. + SupportServiceID = "support" // Support. + SwfServiceID = "swf" // Swf. + TaggingServiceID = "tagging" // Tagging. + WafServiceID = "waf" // Waf. + WafRegionalServiceID = "waf-regional" // WafRegional. + WorkdocsServiceID = "workdocs" // Workdocs. + WorkspacesServiceID = "workspaces" // Workspaces. + XrayServiceID = "xray" // Xray. +) + +// DefaultResolver returns an Endpoint resolver that will be able +// to resolve endpoints for: AWS Standard, AWS China, and AWS GovCloud (US). +// +// Use DefaultPartitions() to get the list of the default partitions. +func DefaultResolver() Resolver { + return defaultPartitions +} + +// DefaultPartitions returns a list of the partitions the SDK is bundled +// with. The available partitions are: AWS Standard, AWS China, and AWS GovCloud (US). +// +// partitions := endpoints.DefaultPartitions +// for _, p := range partitions { +// // ... inspect partitions +// } +func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() +} + +var defaultPartitions = partitions{ + awsPartition, + awscnPartition, + awsusgovPartition, +} + +// AwsPartition returns the Resolver for AWS Standard. +func AwsPartition() Partition { + return awsPartition.Partition() +} + +var awsPartition = partition{ + ID: "aws", + Name: "AWS Standard", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^(us|eu|ap|sa|ca)\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "ap-northeast-1": region{ + Description: "Asia Pacific (Tokyo)", + }, + "ap-northeast-2": region{ + Description: "Asia Pacific (Seoul)", + }, + "ap-south-1": region{ + Description: "Asia Pacific (Mumbai)", + }, + "ap-southeast-1": region{ + Description: "Asia Pacific (Singapore)", + }, + "ap-southeast-2": region{ + Description: "Asia Pacific (Sydney)", + }, + "ca-central-1": region{ + Description: "Canada (Central)", + }, + "eu-central-1": region{ + Description: "EU (Frankfurt)", + }, + "eu-west-1": region{ + Description: "EU (Ireland)", + }, + "eu-west-2": region{ + Description: "EU (London)", + }, + "sa-east-1": region{ + Description: "South America (Sao Paulo)", + }, + "us-east-1": region{ + Description: "US East (N. Virginia)", + }, + "us-east-2": region{ + Description: "US East (Ohio)", + }, + "us-west-1": region{ + Description: "US West (N. California)", + }, + "us-west-2": region{ + Description: "US West (Oregon)", + }, + }, + Services: services{ + "acm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "apigateway": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "application-autoscaling": service{ + Defaults: endpoint{ + Hostname: "autoscaling.{region}.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "application-autoscaling", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "appstream2": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + CredentialScope: credentialScope{ + Service: "appstream", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "batch": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "budgets": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "budgets.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "clouddirectory": service{ + + Endpoints: endpoints{ + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudfront": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "cloudfront.amazonaws.com", + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudsearch": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codebuild": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codecommit": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codepipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "codestar": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-identity": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-idp": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cognito-sync": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "cur": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "datapipeline": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "devicefarm": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "discovery": service{ + + Endpoints: endpoints{ + "us-west-2": endpoint{}, + }, + }, + "dms": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ds": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "ecr": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ecs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticfilesystem": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.{service}.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{region}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "elastictranscoder": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "email": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "entitlement.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "es": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "firehose": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "gamelift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "health": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "iam.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "importexport": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "importexport.amazonaws.com", + SignatureVersions: []string{"v2", "v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + Service: "IngestionService", + }, + }, + }, + }, + "inspector": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "iot": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "execute-api", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kinesisanalytics": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "lightsail": service{ + + Endpoints: endpoints{ + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "machinelearning": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "marketplacecommerceanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "metering.marketplace": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "aws-marketplace", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mobileanalytics": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "models.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "mturk-requester": service{ + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "sandbox": endpoint{ + Hostname: "mturk-requester-sandbox.us-east-1.amazonaws.com", + }, + "us-east-1": endpoint{}, + }, + }, + "opsworks": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "opsworks-cm": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "organizations": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "organizations.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "pinpoint": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "mobiletargeting", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "polly": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "{service}.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "rekognition": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "route53": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "route53.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "route53domains": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "runtime.lex": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "lex", + }, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "s3": service{ + PartitionEndpoint: "us-east-1", + IsRegionalized: boxedTrue, + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + + HasDualStack: boxedTrue, + DualStackHostname: "{service}.dualstack.{region}.{dnsSuffix}", + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{ + Hostname: "s3-ap-northeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{ + Hostname: "s3-ap-southeast-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ap-southeast-2": endpoint{ + Hostname: "s3-ap-southeast-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{ + Hostname: "s3-eu-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "eu-west-2": endpoint{}, + "s3-external-1": endpoint{ + Hostname: "s3-external-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{ + Hostname: "s3-sa-east-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-1": endpoint{ + Hostname: "s3.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{ + Hostname: "s3-us-west-1.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + "us-west-2": endpoint{ + Hostname: "s3-us-west-2.amazonaws.com", + SignatureVersions: []string{"s3", "s3v4"}, + }, + }, + }, + "sdb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"v2"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + Hostname: "sdb.amazonaws.com", + }, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "servicecatalog": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "shield": service{ + IsRegionalized: boxedFalse, + Defaults: endpoint{ + SSLCommonName: "Shield.us-east-1.amazonaws.com", + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "sms": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "ap-south-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{ + SSLCommonName: "queue.{dnsSuffix}", + }, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "ssm": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "states": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "local": endpoint{ + Hostname: "localhost:8000", + Protocols: []string{"http"}, + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "sts": service{ + PartitionEndpoint: "aws-global", + Defaults: endpoint{ + Hostname: "sts.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{ + Hostname: "sts.ap-northeast-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-northeast-2", + }, + }, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "aws-global": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-1-fips": endpoint{ + Hostname: "sts-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "us-east-2": endpoint{}, + "us-east-2-fips": endpoint{ + Hostname: "sts-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "us-west-1": endpoint{}, + "us-west-1-fips": endpoint{ + Hostname: "sts-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "us-west-2": endpoint{}, + "us-west-2-fips": endpoint{ + Hostname: "sts-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + }, + }, + "support": service{ + + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "waf": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "waf.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, + "waf-regional": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workdocs": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "workspaces": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + "xray": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "ap-south-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, + }, +} + +// AwsCnPartition returns the Resolver for AWS China. +func AwsCnPartition() Partition { + return awscnPartition.Partition() +} + +var awscnPartition = partition{ + ID: "aws-cn", + Name: "AWS China", + DNSSuffix: "amazonaws.com.cn", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^cn\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "cn-north-1": region{ + Description: "China (Beijing)", + }, + }, + Services: services{ + "autoscaling": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "codedeploy": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "ec2": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "elasticbeanstalk": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "elasticmapreduce": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "glacier": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-cn-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-cn-global": endpoint{ + Hostname: "iam.cn-north-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-north-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "monitoring": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + SignatureVersions: []string{"s3v4"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "sns": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "sqs": service{ + Defaults: endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "storagegateway": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + Protocols: []string{"http", "https"}, + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + "tagging": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + }, + }, + }, +} + +// AwsUsGovPartition returns the Resolver for AWS GovCloud (US). +func AwsUsGovPartition() Partition { + return awsusgovPartition.Partition() +} + +var awsusgovPartition = partition{ + ID: "aws-us-gov", + Name: "AWS GovCloud (US)", + DNSSuffix: "amazonaws.com", + RegionRegex: regionRegex{ + Regexp: func() *regexp.Regexp { + reg, _ := regexp.Compile("^us\\-gov\\-\\w+\\-\\d+$") + return reg + }(), + }, + Defaults: endpoint{ + Hostname: "{service}.{region}.{dnsSuffix}", + Protocols: []string{"https"}, + SignatureVersions: []string{"v4"}, + }, + Regions: regions{ + "us-gov-west-1": region{ + Description: "AWS GovCloud (US)", + }, + }, + Services: services{ + "autoscaling": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "cloudformation": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudhsm": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "cloudtrail": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "config": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "directconnect": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "dynamodb": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "ec2": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "ec2metadata": service{ + PartitionEndpoint: "aws-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-global": endpoint{ + Hostname: "169.254.169.254/latest", + Protocols: []string{"http"}, + }, + }, + }, + "elasticache": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "elasticloadbalancing": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "elasticmapreduce": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "events": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "glacier": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "iam": service{ + PartitionEndpoint: "aws-us-gov-global", + IsRegionalized: boxedFalse, + + Endpoints: endpoints{ + "aws-us-gov-global": endpoint{ + Hostname: "iam.us-gov.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + }, + }, + "kinesis": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "kms": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "lambda": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "logs": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "monitoring": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "rds": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "redshift": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "s3": service{ + Defaults: endpoint{ + SignatureVersions: []string{"s3", "s3v4"}, + }, + Endpoints: endpoints{ + "fips-us-gov-west-1": endpoint{ + Hostname: "s3-fips-us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-west-1": endpoint{ + Hostname: "s3-us-gov-west-1.amazonaws.com", + Protocols: []string{"http", "https"}, + }, + }, + }, + "snowball": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "sns": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + Protocols: []string{"http", "https"}, + }, + }, + }, + "sqs": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{ + SSLCommonName: "{region}.queue.{dnsSuffix}", + Protocols: []string{"http", "https"}, + }, + }, + }, + "streams.dynamodb": service{ + Defaults: endpoint{ + CredentialScope: credentialScope{ + Service: "dynamodb", + }, + }, + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "sts": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + "swf": service{ + + Endpoints: endpoints{ + "us-gov-west-1": endpoint{}, + }, + }, + }, +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go new file mode 100644 index 0000000..a0e9bc4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/doc.go @@ -0,0 +1,66 @@ +// Package endpoints provides the types and functionality for defining regions +// and endpoints, as well as querying those definitions. +// +// The SDK's Regions and Endpoints metadata is code generated into the endpoints +// package, and is accessible via the DefaultResolver function. This function +// returns a endpoint Resolver will search the metadata and build an associated +// endpoint if one is found. The default resolver will search all partitions +// known by the SDK. e.g AWS Standard (aws), AWS China (aws-cn), and +// AWS GovCloud (US) (aws-us-gov). +// . +// +// Enumerating Regions and Endpoint Metadata +// +// Casting the Resolver returned by DefaultResolver to a EnumPartitions interface +// will allow you to get access to the list of underlying Partitions with the +// Partitions method. This is helpful if you want to limit the SDK's endpoint +// resolving to a single partition, or enumerate regions, services, and endpoints +// in the partition. +// +// resolver := endpoints.DefaultResolver() +// partitions := resolver.(endpoints.EnumPartitions).Partitions() +// +// for _, p := range partitions { +// fmt.Println("Regions for", p.Name) +// for id, _ := range p.Regions() { +// fmt.Println("*", id) +// } +// +// fmt.Println("Services for", p.Name) +// for id, _ := range p.Services() { +// fmt.Println("*", id) +// } +// } +// +// Using Custom Endpoints +// +// The endpoints package also gives you the ability to use your own logic how +// endpoints are resolved. This is a great way to define a custom endpoint +// for select services, without passing that logic down through your code. +// +// If a type implements the Resolver interface it can be used to resolve +// endpoints. To use this with the SDK's Session and Config set the value +// of the type to the EndpointsResolver field of aws.Config when initializing +// the session, or service client. +// +// In addition the ResolverFunc is a wrapper for a func matching the signature +// of Resolver.EndpointFor, converting it to a type that satisfies the +// Resolver interface. +// +// +// myCustomResolver := func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) { +// if service == endpoints.S3ServiceID { +// return endpoints.ResolvedEndpoint{ +// URL: "s3.custom.endpoint.com", +// SigningRegion: "custom-signing-region", +// }, nil +// } +// +// return endpoints.DefaultResolver().EndpointFor(service, region, optFns...) +// } +// +// sess := session.Must(session.NewSession(&aws.Config{ +// Region: aws.String("us-west-2"), +// EndpointResolver: endpoints.ResolverFunc(myCustomResolver), +// })) +package endpoints diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go new file mode 100644 index 0000000..9c3eedb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/endpoints.go @@ -0,0 +1,439 @@ +package endpoints + +import ( + "fmt" + "regexp" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Options provide the configuration needed to direct how the +// endpoints will be resolved. +type Options struct { + // DisableSSL forces the endpoint to be resolved as HTTP. + // instead of HTTPS if the service supports it. + DisableSSL bool + + // Sets the resolver to resolve the endpoint as a dualstack endpoint + // for the service. If dualstack support for a service is not known and + // StrictMatching is not enabled a dualstack endpoint for the service will + // be returned. This endpoint may not be valid. If StrictMatching is + // enabled only services that are known to support dualstack will return + // dualstack endpoints. + UseDualStack bool + + // Enables strict matching of services and regions resolved endpoints. + // If the partition doesn't enumerate the exact service and region an + // error will be returned. This option will prevent returning endpoints + // that look valid, but may not resolve to any real endpoint. + StrictMatching bool + + // Enables resolving a service endpoint based on the region provided if the + // service does not exist. The service endpoint ID will be used as the service + // domain name prefix. By default the endpoint resolver requires the service + // to be known when resolving endpoints. + // + // If resolving an endpoint on the partition list the provided region will + // be used to determine which partition's domain name pattern to the service + // endpoint ID with. If both the service and region are unkonwn and resolving + // the endpoint on partition list an UnknownEndpointError error will be returned. + // + // If resolving and endpoint on a partition specific resolver that partition's + // domain name pattern will be used with the service endpoint ID. If both + // region and service do not exist when resolving an endpoint on a specific + // partition the partition's domain pattern will be used to combine the + // endpoint and region together. + // + // This option is ignored if StrictMatching is enabled. + ResolveUnknownService bool +} + +// Set combines all of the option functions together. +func (o *Options) Set(optFns ...func(*Options)) { + for _, fn := range optFns { + fn(o) + } +} + +// DisableSSLOption sets the DisableSSL options. Can be used as a functional +// option when resolving endpoints. +func DisableSSLOption(o *Options) { + o.DisableSSL = true +} + +// UseDualStackOption sets the UseDualStack option. Can be used as a functional +// option when resolving endpoints. +func UseDualStackOption(o *Options) { + o.UseDualStack = true +} + +// StrictMatchingOption sets the StrictMatching option. Can be used as a functional +// option when resolving endpoints. +func StrictMatchingOption(o *Options) { + o.StrictMatching = true +} + +// ResolveUnknownServiceOption sets the ResolveUnknownService option. Can be used +// as a functional option when resolving endpoints. +func ResolveUnknownServiceOption(o *Options) { + o.ResolveUnknownService = true +} + +// A Resolver provides the interface for functionality to resolve endpoints. +// The build in Partition and DefaultResolver return value satisfy this interface. +type Resolver interface { + EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) +} + +// ResolverFunc is a helper utility that wraps a function so it satisfies the +// Resolver interface. This is useful when you want to add additional endpoint +// resolving logic, or stub out specific endpoints with custom values. +type ResolverFunc func(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) + +// EndpointFor wraps the ResolverFunc function to satisfy the Resolver interface. +func (fn ResolverFunc) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return fn(service, region, opts...) +} + +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will set HTTP instead of the default HTTPS. +// +// If disableSSL is set, it will only set the URL's scheme if the URL does not +// contain a scheme. +func AddScheme(endpoint string, disableSSL bool) string { + if !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} + +// EnumPartitions a provides a way to retrieve the underlying partitions that +// make up the SDK's default Resolver, or any resolver decoded from a model +// file. +// +// Use this interface with DefaultResolver and DecodeModels to get the list of +// Partitions. +type EnumPartitions interface { + Partitions() []Partition +} + +// RegionsForService returns a map of regions for the partition and service. +// If either the partition or service does not exist false will be returned +// as the second parameter. +// +// This example shows how to get the regions for DynamoDB in the AWS partition. +// rs, exists := endpoints.RegionsForService(endpoints.DefaultPartitions(), endpoints.AwsPartitionID, endpoints.DynamodbServiceID) +// +// This is equivalent to using the partition directly. +// rs := endpoints.AwsPartition().Services()[endpoints.DynamodbServiceID].Regions() +func RegionsForService(ps []Partition, partitionID, serviceID string) (map[string]Region, bool) { + for _, p := range ps { + if p.ID() != partitionID { + continue + } + if _, ok := p.p.Services[serviceID]; !ok { + break + } + + s := Service{ + id: serviceID, + p: p.p, + } + return s.Regions(), true + } + + return map[string]Region{}, false +} + +// PartitionForRegion returns the first partition which includes the region +// passed in. This includes both known regions and regions which match +// a pattern supported by the partition which may include regions that are +// not explicitly known by the partition. Use the Regions method of the +// returned Partition if explicit support is needed. +func PartitionForRegion(ps []Partition, regionID string) (Partition, bool) { + for _, p := range ps { + if _, ok := p.p.Regions[regionID]; ok || p.p.RegionRegex.MatchString(regionID) { + return p, true + } + } + + return Partition{}, false +} + +// A Partition provides the ability to enumerate the partition's regions +// and services. +type Partition struct { + id string + p *partition +} + +// ID returns the identifier of the partition. +func (p Partition) ID() string { return p.id } + +// EndpointFor attempts to resolve the endpoint based on service and region. +// See Options for information on configuring how the endpoint is resolved. +// +// If the service cannot be found in the metadata the UnknownServiceError +// error will be returned. This validation will occur regardless if +// StrictMatching is enabled. To enable resolving unknown services set the +// "ResolveUnknownService" option to true. When StrictMatching is disabled +// this option allows the partition resolver to resolve a endpoint based on +// the service endpoint ID provided. +// +// When resolving endpoints you can choose to enable StrictMatching. This will +// require the provided service and region to be known by the partition. +// If the endpoint cannot be strictly resolved an error will be returned. This +// mode is useful to ensure the endpoint resolved is valid. Without +// StrictMatching enabled the endpoint returned my look valid but may not work. +// StrictMatching requires the SDK to be updated if you want to take advantage +// of new regions and services expansions. +// +// Errors that can be returned. +// * UnknownServiceError +// * UnknownEndpointError +func (p Partition) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return p.p.EndpointFor(service, region, opts...) +} + +// Regions returns a map of Regions indexed by their ID. This is useful for +// enumerating over the regions in a partition. +func (p Partition) Regions() map[string]Region { + rs := map[string]Region{} + for id := range p.p.Regions { + rs[id] = Region{ + id: id, + p: p.p, + } + } + + return rs +} + +// Services returns a map of Service indexed by their ID. This is useful for +// enumerating over the services in a partition. +func (p Partition) Services() map[string]Service { + ss := map[string]Service{} + for id := range p.p.Services { + ss[id] = Service{ + id: id, + p: p.p, + } + } + + return ss +} + +// A Region provides information about a region, and ability to resolve an +// endpoint from the context of a region, given a service. +type Region struct { + id, desc string + p *partition +} + +// ID returns the region's identifier. +func (r Region) ID() string { return r.id } + +// ResolveEndpoint resolves an endpoint from the context of the region given +// a service. See Partition.EndpointFor for usage and errors that can be returned. +func (r Region) ResolveEndpoint(service string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return r.p.EndpointFor(service, r.id, opts...) +} + +// Services returns a list of all services that are known to be in this region. +func (r Region) Services() map[string]Service { + ss := map[string]Service{} + for id, s := range r.p.Services { + if _, ok := s.Endpoints[r.id]; ok { + ss[id] = Service{ + id: id, + p: r.p, + } + } + } + + return ss +} + +// A Service provides information about a service, and ability to resolve an +// endpoint from the context of a service, given a region. +type Service struct { + id string + p *partition +} + +// ID returns the identifier for the service. +func (s Service) ID() string { return s.id } + +// ResolveEndpoint resolves an endpoint from the context of a service given +// a region. See Partition.EndpointFor for usage and errors that can be returned. +func (s Service) ResolveEndpoint(region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + return s.p.EndpointFor(s.id, region, opts...) +} + +// Regions returns a map of Regions that the service is present in. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Regions() map[string]Region { + rs := map[string]Region{} + for id := range s.p.Services[s.id].Endpoints { + if _, ok := s.p.Regions[id]; ok { + rs[id] = Region{ + id: id, + p: s.p, + } + } + } + + return rs +} + +// Endpoints returns a map of Endpoints indexed by their ID for all known +// endpoints for a service. +// +// A region is the AWS region the service exists in. Whereas a Endpoint is +// an URL that can be resolved to a instance of a service. +func (s Service) Endpoints() map[string]Endpoint { + es := map[string]Endpoint{} + for id := range s.p.Services[s.id].Endpoints { + es[id] = Endpoint{ + id: id, + serviceID: s.id, + p: s.p, + } + } + + return es +} + +// A Endpoint provides information about endpoints, and provides the ability +// to resolve that endpoint for the service, and the region the endpoint +// represents. +type Endpoint struct { + id string + serviceID string + p *partition +} + +// ID returns the identifier for an endpoint. +func (e Endpoint) ID() string { return e.id } + +// ServiceID returns the identifier the endpoint belongs to. +func (e Endpoint) ServiceID() string { return e.serviceID } + +// ResolveEndpoint resolves an endpoint from the context of a service and +// region the endpoint represents. See Partition.EndpointFor for usage and +// errors that can be returned. +func (e Endpoint) ResolveEndpoint(opts ...func(*Options)) (ResolvedEndpoint, error) { + return e.p.EndpointFor(e.serviceID, e.id, opts...) +} + +// A ResolvedEndpoint is an endpoint that has been resolved based on a partition +// service, and region. +type ResolvedEndpoint struct { + // The endpoint URL + URL string + + // The region that should be used for signing requests. + SigningRegion string + + // The service name that should be used for signing requests. + SigningName string + + // The signing method that should be used for signing requests. + SigningMethod string +} + +// So that the Error interface type can be included as an anonymous field +// in the requestError struct and not conflict with the error.Error() method. +type awsError awserr.Error + +// A EndpointNotFoundError is returned when in StrictMatching mode, and the +// endpoint for the service and region cannot be found in any of the partitions. +type EndpointNotFoundError struct { + awsError + Partition string + Service string + Region string +} + +// A UnknownServiceError is returned when the service does not resolve to an +// endpoint. Includes a list of all known services for the partition. Returned +// when a partition does not support the service. +type UnknownServiceError struct { + awsError + Partition string + Service string + Known []string +} + +// NewUnknownServiceError builds and returns UnknownServiceError. +func NewUnknownServiceError(p, s string, known []string) UnknownServiceError { + return UnknownServiceError{ + awsError: awserr.New("UnknownServiceError", + "could not resolve endpoint for unknown service", nil), + Partition: p, + Service: s, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownServiceError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q", + e.Partition, e.Service) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownServiceError) String() string { + return e.Error() +} + +// A UnknownEndpointError is returned when in StrictMatching mode and the +// service is valid, but the region does not resolve to an endpoint. Includes +// a list of all known endpoints for the service. +type UnknownEndpointError struct { + awsError + Partition string + Service string + Region string + Known []string +} + +// NewUnknownEndpointError builds and returns UnknownEndpointError. +func NewUnknownEndpointError(p, s, r string, known []string) UnknownEndpointError { + return UnknownEndpointError{ + awsError: awserr.New("UnknownEndpointError", + "could not resolve endpoint", nil), + Partition: p, + Service: s, + Region: r, + Known: known, + } +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) Error() string { + extra := fmt.Sprintf("partition: %q, service: %q, region: %q", + e.Partition, e.Service, e.Region) + if len(e.Known) > 0 { + extra += fmt.Sprintf(", known: %v", e.Known) + } + return awserr.SprintError(e.Code(), e.Message(), extra, e.OrigErr()) +} + +// String returns the string representation of the error. +func (e UnknownEndpointError) String() string { + return e.Error() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go new file mode 100644 index 0000000..13d968a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model.go @@ -0,0 +1,303 @@ +package endpoints + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +type partitions []partition + +func (ps partitions) EndpointFor(service, region string, opts ...func(*Options)) (ResolvedEndpoint, error) { + var opt Options + opt.Set(opts...) + + for i := 0; i < len(ps); i++ { + if !ps[i].canResolveEndpoint(service, region, opt.StrictMatching) { + continue + } + + return ps[i].EndpointFor(service, region, opts...) + } + + // If loose matching fallback to first partition format to use + // when resolving the endpoint. + if !opt.StrictMatching && len(ps) > 0 { + return ps[0].EndpointFor(service, region, opts...) + } + + return ResolvedEndpoint{}, NewUnknownEndpointError("all partitions", service, region, []string{}) +} + +// Partitions satisfies the EnumPartitions interface and returns a list +// of Partitions representing each partition represented in the SDK's +// endpoints model. +func (ps partitions) Partitions() []Partition { + parts := make([]Partition, 0, len(ps)) + for i := 0; i < len(ps); i++ { + parts = append(parts, ps[i].Partition()) + } + + return parts +} + +type partition struct { + ID string `json:"partition"` + Name string `json:"partitionName"` + DNSSuffix string `json:"dnsSuffix"` + RegionRegex regionRegex `json:"regionRegex"` + Defaults endpoint `json:"defaults"` + Regions regions `json:"regions"` + Services services `json:"services"` +} + +func (p partition) Partition() Partition { + return Partition{ + id: p.ID, + p: &p, + } +} + +func (p partition) canResolveEndpoint(service, region string, strictMatch bool) bool { + s, hasService := p.Services[service] + _, hasEndpoint := s.Endpoints[region] + + if hasEndpoint && hasService { + return true + } + + if strictMatch { + return false + } + + return p.RegionRegex.MatchString(region) +} + +func (p partition) EndpointFor(service, region string, opts ...func(*Options)) (resolved ResolvedEndpoint, err error) { + var opt Options + opt.Set(opts...) + + s, hasService := p.Services[service] + if !(hasService || opt.ResolveUnknownService) { + // Only return error if the resolver will not fallback to creating + // endpoint based on service endpoint ID passed in. + return resolved, NewUnknownServiceError(p.ID, service, serviceList(p.Services)) + } + + e, hasEndpoint := s.endpointForRegion(region) + if !hasEndpoint && opt.StrictMatching { + return resolved, NewUnknownEndpointError(p.ID, service, region, endpointList(s.Endpoints)) + } + + defs := []endpoint{p.Defaults, s.Defaults} + return e.resolve(service, region, p.DNSSuffix, defs, opt), nil +} + +func serviceList(ss services) []string { + list := make([]string, 0, len(ss)) + for k := range ss { + list = append(list, k) + } + return list +} +func endpointList(es endpoints) []string { + list := make([]string, 0, len(es)) + for k := range es { + list = append(list, k) + } + return list +} + +type regionRegex struct { + *regexp.Regexp +} + +func (rr *regionRegex) UnmarshalJSON(b []byte) (err error) { + // Strip leading and trailing quotes + regex, err := strconv.Unquote(string(b)) + if err != nil { + return fmt.Errorf("unable to strip quotes from regex, %v", err) + } + + rr.Regexp, err = regexp.Compile(regex) + if err != nil { + return fmt.Errorf("unable to unmarshal region regex, %v", err) + } + return nil +} + +type regions map[string]region + +type region struct { + Description string `json:"description"` +} + +type services map[string]service + +type service struct { + PartitionEndpoint string `json:"partitionEndpoint"` + IsRegionalized boxedBool `json:"isRegionalized,omitempty"` + Defaults endpoint `json:"defaults"` + Endpoints endpoints `json:"endpoints"` +} + +func (s *service) endpointForRegion(region string) (endpoint, bool) { + if s.IsRegionalized == boxedFalse { + return s.Endpoints[s.PartitionEndpoint], region == s.PartitionEndpoint + } + + if e, ok := s.Endpoints[region]; ok { + return e, true + } + + // Unable to find any matching endpoint, return + // blank that will be used for generic endpoint creation. + return endpoint{}, false +} + +type endpoints map[string]endpoint + +type endpoint struct { + Hostname string `json:"hostname"` + Protocols []string `json:"protocols"` + CredentialScope credentialScope `json:"credentialScope"` + + // Custom fields not modeled + HasDualStack boxedBool `json:"-"` + DualStackHostname string `json:"-"` + + // Signature Version not used + SignatureVersions []string `json:"signatureVersions"` + + // SSLCommonName not used. + SSLCommonName string `json:"sslCommonName"` +} + +const ( + defaultProtocol = "https" + defaultSigner = "v4" +) + +var ( + protocolPriority = []string{"https", "http"} + signerPriority = []string{"v4", "v2"} +) + +func getByPriority(s []string, p []string, def string) string { + if len(s) == 0 { + return def + } + + for i := 0; i < len(p); i++ { + for j := 0; j < len(s); j++ { + if s[j] == p[i] { + return s[j] + } + } + } + + return s[0] +} + +func (e endpoint) resolve(service, region, dnsSuffix string, defs []endpoint, opts Options) ResolvedEndpoint { + var merged endpoint + for _, def := range defs { + merged.mergeIn(def) + } + merged.mergeIn(e) + e = merged + + hostname := e.Hostname + + // Offset the hostname for dualstack if enabled + if opts.UseDualStack && e.HasDualStack == boxedTrue { + hostname = e.DualStackHostname + } + + u := strings.Replace(hostname, "{service}", service, 1) + u = strings.Replace(u, "{region}", region, 1) + u = strings.Replace(u, "{dnsSuffix}", dnsSuffix, 1) + + scheme := getEndpointScheme(e.Protocols, opts.DisableSSL) + u = fmt.Sprintf("%s://%s", scheme, u) + + signingRegion := e.CredentialScope.Region + if len(signingRegion) == 0 { + signingRegion = region + } + signingName := e.CredentialScope.Service + if len(signingName) == 0 { + signingName = service + } + + return ResolvedEndpoint{ + URL: u, + SigningRegion: signingRegion, + SigningName: signingName, + SigningMethod: getByPriority(e.SignatureVersions, signerPriority, defaultSigner), + } +} + +func getEndpointScheme(protocols []string, disableSSL bool) string { + if disableSSL { + return "http" + } + + return getByPriority(protocols, protocolPriority, defaultProtocol) +} + +func (e *endpoint) mergeIn(other endpoint) { + if len(other.Hostname) > 0 { + e.Hostname = other.Hostname + } + if len(other.Protocols) > 0 { + e.Protocols = other.Protocols + } + if len(other.SignatureVersions) > 0 { + e.SignatureVersions = other.SignatureVersions + } + if len(other.CredentialScope.Region) > 0 { + e.CredentialScope.Region = other.CredentialScope.Region + } + if len(other.CredentialScope.Service) > 0 { + e.CredentialScope.Service = other.CredentialScope.Service + } + if len(other.SSLCommonName) > 0 { + e.SSLCommonName = other.SSLCommonName + } + if other.HasDualStack != boxedBoolUnset { + e.HasDualStack = other.HasDualStack + } + if len(other.DualStackHostname) > 0 { + e.DualStackHostname = other.DualStackHostname + } +} + +type credentialScope struct { + Region string `json:"region"` + Service string `json:"service"` +} + +type boxedBool int + +func (b *boxedBool) UnmarshalJSON(buf []byte) error { + v, err := strconv.ParseBool(string(buf)) + if err != nil { + return err + } + + if v { + *b = boxedTrue + } else { + *b = boxedFalse + } + + return nil +} + +const ( + boxedBoolUnset boxedBool = iota + boxedFalse + boxedTrue +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go new file mode 100644 index 0000000..05e92df --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/v3model_codegen.go @@ -0,0 +1,337 @@ +// +build codegen + +package endpoints + +import ( + "fmt" + "io" + "reflect" + "strings" + "text/template" + "unicode" +) + +// A CodeGenOptions are the options for code generating the endpoints into +// Go code from the endpoints model definition. +type CodeGenOptions struct { + // Options for how the model will be decoded. + DecodeModelOptions DecodeModelOptions +} + +// Set combines all of the option functions together +func (d *CodeGenOptions) Set(optFns ...func(*CodeGenOptions)) { + for _, fn := range optFns { + fn(d) + } +} + +// CodeGenModel given a endpoints model file will decode it and attempt to +// generate Go code from the model definition. Error will be returned if +// the code is unable to be generated, or decoded. +func CodeGenModel(modelFile io.Reader, outFile io.Writer, optFns ...func(*CodeGenOptions)) error { + var opts CodeGenOptions + opts.Set(optFns...) + + resolver, err := DecodeModel(modelFile, func(d *DecodeModelOptions) { + *d = opts.DecodeModelOptions + }) + if err != nil { + return err + } + + tmpl := template.Must(template.New("tmpl").Funcs(funcMap).Parse(v3Tmpl)) + if err := tmpl.ExecuteTemplate(outFile, "defaults", resolver); err != nil { + return fmt.Errorf("failed to execute template, %v", err) + } + + return nil +} + +func toSymbol(v string) string { + out := []rune{} + for _, c := range strings.Title(v) { + if !(unicode.IsNumber(c) || unicode.IsLetter(c)) { + continue + } + + out = append(out, c) + } + + return string(out) +} + +func quoteString(v string) string { + return fmt.Sprintf("%q", v) +} + +func regionConstName(p, r string) string { + return toSymbol(p) + toSymbol(r) +} + +func partitionGetter(id string) string { + return fmt.Sprintf("%sPartition", toSymbol(id)) +} + +func partitionVarName(id string) string { + return fmt.Sprintf("%sPartition", strings.ToLower(toSymbol(id))) +} + +func listPartitionNames(ps partitions) string { + names := []string{} + switch len(ps) { + case 1: + return ps[0].Name + case 2: + return fmt.Sprintf("%s and %s", ps[0].Name, ps[1].Name) + default: + for i, p := range ps { + if i == len(ps)-1 { + names = append(names, "and "+p.Name) + } else { + names = append(names, p.Name) + } + } + return strings.Join(names, ", ") + } +} + +func boxedBoolIfSet(msg string, v boxedBool) string { + switch v { + case boxedTrue: + return fmt.Sprintf(msg, "boxedTrue") + case boxedFalse: + return fmt.Sprintf(msg, "boxedFalse") + default: + return "" + } +} + +func stringIfSet(msg, v string) string { + if len(v) == 0 { + return "" + } + + return fmt.Sprintf(msg, v) +} + +func stringSliceIfSet(msg string, vs []string) string { + if len(vs) == 0 { + return "" + } + + names := []string{} + for _, v := range vs { + names = append(names, `"`+v+`"`) + } + + return fmt.Sprintf(msg, strings.Join(names, ",")) +} + +func endpointIsSet(v endpoint) bool { + return !reflect.DeepEqual(v, endpoint{}) +} + +func serviceSet(ps partitions) map[string]struct{} { + set := map[string]struct{}{} + for _, p := range ps { + for id := range p.Services { + set[id] = struct{}{} + } + } + + return set +} + +var funcMap = template.FuncMap{ + "ToSymbol": toSymbol, + "QuoteString": quoteString, + "RegionConst": regionConstName, + "PartitionGetter": partitionGetter, + "PartitionVarName": partitionVarName, + "ListPartitionNames": listPartitionNames, + "BoxedBoolIfSet": boxedBoolIfSet, + "StringIfSet": stringIfSet, + "StringSliceIfSet": stringSliceIfSet, + "EndpointIsSet": endpointIsSet, + "ServicesSet": serviceSet, +} + +const v3Tmpl = ` +{{ define "defaults" -}} +// Code generated by aws/endpoints/v3model_codegen.go. DO NOT EDIT. + +package endpoints + +import ( + "regexp" +) + + {{ template "partition consts" . }} + + {{ range $_, $partition := . }} + {{ template "partition region consts" $partition }} + {{ end }} + + {{ template "service consts" . }} + + {{ template "endpoint resolvers" . }} +{{- end }} + +{{ define "partition consts" }} + // Partition identifiers + const ( + {{ range $_, $p := . -}} + {{ ToSymbol $p.ID }}PartitionID = {{ QuoteString $p.ID }} // {{ $p.Name }} partition. + {{ end -}} + ) +{{- end }} + +{{ define "partition region consts" }} + // {{ .Name }} partition's regions. + const ( + {{ range $id, $region := .Regions -}} + {{ ToSymbol $id }}RegionID = {{ QuoteString $id }} // {{ $region.Description }}. + {{ end -}} + ) +{{- end }} + +{{ define "service consts" }} + // Service identifiers + const ( + {{ $serviceSet := ServicesSet . -}} + {{ range $id, $_ := $serviceSet -}} + {{ ToSymbol $id }}ServiceID = {{ QuoteString $id }} // {{ ToSymbol $id }}. + {{ end -}} + ) +{{- end }} + +{{ define "endpoint resolvers" }} + // DefaultResolver returns an Endpoint resolver that will be able + // to resolve endpoints for: {{ ListPartitionNames . }}. + // + // Use DefaultPartitions() to get the list of the default partitions. + func DefaultResolver() Resolver { + return defaultPartitions + } + + // DefaultPartitions returns a list of the partitions the SDK is bundled + // with. The available partitions are: {{ ListPartitionNames . }}. + // + // partitions := endpoints.DefaultPartitions + // for _, p := range partitions { + // // ... inspect partitions + // } + func DefaultPartitions() []Partition { + return defaultPartitions.Partitions() + } + + var defaultPartitions = partitions{ + {{ range $_, $partition := . -}} + {{ PartitionVarName $partition.ID }}, + {{ end }} + } + + {{ range $_, $partition := . -}} + {{ $name := PartitionGetter $partition.ID -}} + // {{ $name }} returns the Resolver for {{ $partition.Name }}. + func {{ $name }}() Partition { + return {{ PartitionVarName $partition.ID }}.Partition() + } + var {{ PartitionVarName $partition.ID }} = {{ template "gocode Partition" $partition }} + {{ end }} +{{ end }} + +{{ define "default partitions" }} + func DefaultPartitions() []Partition { + return []partition{ + {{ range $_, $partition := . -}} + // {{ ToSymbol $partition.ID}}Partition(), + {{ end }} + } + } +{{ end }} + +{{ define "gocode Partition" -}} +partition{ + {{ StringIfSet "ID: %q,\n" .ID -}} + {{ StringIfSet "Name: %q,\n" .Name -}} + {{ StringIfSet "DNSSuffix: %q,\n" .DNSSuffix -}} + RegionRegex: {{ template "gocode RegionRegex" .RegionRegex }}, + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults }}, + {{- end }} + Regions: {{ template "gocode Regions" .Regions }}, + Services: {{ template "gocode Services" .Services }}, +} +{{- end }} + +{{ define "gocode RegionRegex" -}} +regionRegex{ + Regexp: func() *regexp.Regexp{ + reg, _ := regexp.Compile({{ QuoteString .Regexp.String }}) + return reg + }(), +} +{{- end }} + +{{ define "gocode Regions" -}} +regions{ + {{ range $id, $region := . -}} + "{{ $id }}": {{ template "gocode Region" $region }}, + {{ end -}} +} +{{- end }} + +{{ define "gocode Region" -}} +region{ + {{ StringIfSet "Description: %q,\n" .Description -}} +} +{{- end }} + +{{ define "gocode Services" -}} +services{ + {{ range $id, $service := . -}} + "{{ $id }}": {{ template "gocode Service" $service }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Service" -}} +service{ + {{ StringIfSet "PartitionEndpoint: %q,\n" .PartitionEndpoint -}} + {{ BoxedBoolIfSet "IsRegionalized: %s,\n" .IsRegionalized -}} + {{ if EndpointIsSet .Defaults -}} + Defaults: {{ template "gocode Endpoint" .Defaults -}}, + {{- end }} + {{ if .Endpoints -}} + Endpoints: {{ template "gocode Endpoints" .Endpoints }}, + {{- end }} +} +{{- end }} + +{{ define "gocode Endpoints" -}} +endpoints{ + {{ range $id, $endpoint := . -}} + "{{ $id }}": {{ template "gocode Endpoint" $endpoint }}, + {{ end }} +} +{{- end }} + +{{ define "gocode Endpoint" -}} +endpoint{ + {{ StringIfSet "Hostname: %q,\n" .Hostname -}} + {{ StringIfSet "SSLCommonName: %q,\n" .SSLCommonName -}} + {{ StringSliceIfSet "Protocols: []string{%s},\n" .Protocols -}} + {{ StringSliceIfSet "SignatureVersions: []string{%s},\n" .SignatureVersions -}} + {{ if or .CredentialScope.Region .CredentialScope.Service -}} + CredentialScope: credentialScope{ + {{ StringIfSet "Region: %q,\n" .CredentialScope.Region -}} + {{ StringIfSet "Service: %q,\n" .CredentialScope.Service -}} + }, + {{- end }} + {{ BoxedBoolIfSet "HasDualStack: %s,\n" .HasDualStack -}} + {{ StringIfSet "DualStackHostname: %q,\n" .DualStackHostname -}} + +} +{{- end }} +` diff --git a/vendor/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/github.com/aws/aws-sdk-go/aws/errors.go new file mode 100644 index 0000000..5766361 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/errors.go @@ -0,0 +1,17 @@ +package aws + +import "github.com/aws/aws-sdk-go/aws/awserr" + +var ( + // ErrMissingRegion is an error that is returned if region configuration is + // not found. + // + // @readonly + ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) + + // ErrMissingEndpoint is an error that is returned if an endpoint cannot be + // resolved for a service. + // + // @readonly + ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) +) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go new file mode 100644 index 0000000..91a6f27 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/jsonvalue.go @@ -0,0 +1,12 @@ +package aws + +// JSONValue is a representation of a grab bag type that will be marshaled +// into a json string. This type can be used just like any other map. +// +// Example: +// +// values := aws.JSONValue{ +// "Foo": "Bar", +// } +// values["Baz"] = "Qux" +type JSONValue map[string]interface{} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/github.com/aws/aws-sdk-go/aws/logger.go similarity index 76% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go rename to vendor/github.com/aws/aws-sdk-go/aws/logger.go index 935661c..db87188 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/logger.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/logger.go @@ -62,6 +62,15 @@ const ( // see the body content of requests and responses made while using the SDK // Will also enable LogDebug. LogDebugWithHTTPBody + + // LogDebugWithRequestRetries states the SDK should log when service requests will + // be retried. This should be used to log when you want to log when service + // requests are being retried. Will also enable LogDebug. + LogDebugWithRequestRetries + + // LogDebugWithRequestErrors states the SDK should log when service requests fail + // to build, send, validate, or unmarshal. + LogDebugWithRequestErrors ) // A Logger is a minimalistic interface for the SDK to log messages to. Should @@ -70,6 +79,20 @@ type Logger interface { Log(...interface{}) } +// A LoggerFunc is a convenience type to convert a function taking a variadic +// list of arguments and wrap it so the Logger interface can be used. +// +// Example: +// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { +// fmt.Fprintln(os.Stdout, args...) +// })}) +type LoggerFunc func(...interface{}) + +// Log calls the wrapped function with the arguments provided +func (f LoggerFunc) Log(args ...interface{}) { + f(args...) +} + // NewDefaultLogger returns a Logger which will write log messages to stdout, and // use same formatting runes as the stdlib log.Logger func NewDefaultLogger() Logger { diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go new file mode 100644 index 0000000..271da43 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error.go @@ -0,0 +1,19 @@ +// +build !appengine,!plan9 + +package request + +import ( + "net" + "os" + "syscall" +) + +func isErrConnectionReset(err error) bool { + if opErr, ok := err.(*net.OpError); ok { + if sysErr, ok := opErr.Err.(*os.SyscallError); ok { + return sysErr.Err == syscall.ECONNRESET + } + } + + return false +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go new file mode 100644 index 0000000..daf9eca --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/connection_reset_error_other.go @@ -0,0 +1,11 @@ +// +build appengine plan9 + +package request + +import ( + "strings" +) + +func isErrConnectionReset(err error) bool { + return strings.Contains(err.Error(), "connection reset") +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go new file mode 100644 index 0000000..802ac88 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/handlers.go @@ -0,0 +1,256 @@ +package request + +import ( + "fmt" + "strings" +) + +// A Handlers provides a collection of request handlers for various +// stages of handling requests. +type Handlers struct { + Validate HandlerList + Build HandlerList + Sign HandlerList + Send HandlerList + ValidateResponse HandlerList + Unmarshal HandlerList + UnmarshalMeta HandlerList + UnmarshalError HandlerList + Retry HandlerList + AfterRetry HandlerList + Complete HandlerList +} + +// Copy returns of this handler's lists. +func (h *Handlers) Copy() Handlers { + return Handlers{ + Validate: h.Validate.copy(), + Build: h.Build.copy(), + Sign: h.Sign.copy(), + Send: h.Send.copy(), + ValidateResponse: h.ValidateResponse.copy(), + Unmarshal: h.Unmarshal.copy(), + UnmarshalError: h.UnmarshalError.copy(), + UnmarshalMeta: h.UnmarshalMeta.copy(), + Retry: h.Retry.copy(), + AfterRetry: h.AfterRetry.copy(), + Complete: h.Complete.copy(), + } +} + +// Clear removes callback functions for all handlers +func (h *Handlers) Clear() { + h.Validate.Clear() + h.Build.Clear() + h.Send.Clear() + h.Sign.Clear() + h.Unmarshal.Clear() + h.UnmarshalMeta.Clear() + h.UnmarshalError.Clear() + h.ValidateResponse.Clear() + h.Retry.Clear() + h.AfterRetry.Clear() + h.Complete.Clear() +} + +// A HandlerListRunItem represents an entry in the HandlerList which +// is being run. +type HandlerListRunItem struct { + Index int + Handler NamedHandler + Request *Request +} + +// A HandlerList manages zero or more handlers in a list. +type HandlerList struct { + list []NamedHandler + + // Called after each request handler in the list is called. If set + // and the func returns true the HandlerList will continue to iterate + // over the request handlers. If false is returned the HandlerList + // will stop iterating. + // + // Should be used if extra logic to be performed between each handler + // in the list. This can be used to terminate a list's iteration + // based on a condition such as error like, HandlerListStopOnError. + // Or for logging like HandlerListLogItem. + AfterEachFn func(item HandlerListRunItem) bool +} + +// A NamedHandler is a struct that contains a name and function callback. +type NamedHandler struct { + Name string + Fn func(*Request) +} + +// copy creates a copy of the handler list. +func (l *HandlerList) copy() HandlerList { + n := HandlerList{ + AfterEachFn: l.AfterEachFn, + } + if len(l.list) == 0 { + return n + } + + n.list = append(make([]NamedHandler, 0, len(l.list)), l.list...) + return n +} + +// Clear clears the handler list. +func (l *HandlerList) Clear() { + l.list = l.list[0:0] +} + +// Len returns the number of handlers in the list. +func (l *HandlerList) Len() int { + return len(l.list) +} + +// PushBack pushes handler f to the back of the handler list. +func (l *HandlerList) PushBack(f func(*Request)) { + l.PushBackNamed(NamedHandler{"__anonymous", f}) +} + +// PushBackNamed pushes named handler f to the back of the handler list. +func (l *HandlerList) PushBackNamed(n NamedHandler) { + if cap(l.list) == 0 { + l.list = make([]NamedHandler, 0, 5) + } + l.list = append(l.list, n) +} + +// PushFront pushes handler f to the front of the handler list. +func (l *HandlerList) PushFront(f func(*Request)) { + l.PushFrontNamed(NamedHandler{"__anonymous", f}) +} + +// PushFrontNamed pushes named handler f to the front of the handler list. +func (l *HandlerList) PushFrontNamed(n NamedHandler) { + if cap(l.list) == len(l.list) { + // Allocating new list required + l.list = append([]NamedHandler{n}, l.list...) + } else { + // Enough room to prepend into list. + l.list = append(l.list, NamedHandler{}) + copy(l.list[1:], l.list) + l.list[0] = n + } +} + +// Remove removes a NamedHandler n +func (l *HandlerList) Remove(n NamedHandler) { + l.RemoveByName(n.Name) +} + +// RemoveByName removes a NamedHandler by name. +func (l *HandlerList) RemoveByName(name string) { + for i := 0; i < len(l.list); i++ { + m := l.list[i] + if m.Name == name { + // Shift array preventing creating new arrays + copy(l.list[i:], l.list[i+1:]) + l.list[len(l.list)-1] = NamedHandler{} + l.list = l.list[:len(l.list)-1] + + // decrement list so next check to length is correct + i-- + } + } +} + +// SwapNamed will swap out any existing handlers with the same name as the +// passed in NamedHandler returning true if handlers were swapped. False is +// returned otherwise. +func (l *HandlerList) SwapNamed(n NamedHandler) (swapped bool) { + for i := 0; i < len(l.list); i++ { + if l.list[i].Name == n.Name { + l.list[i].Fn = n.Fn + swapped = true + } + } + + return swapped +} + +// SetBackNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the end of the list. +func (l *HandlerList) SetBackNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushBackNamed(n) + } +} + +// SetFrontNamed will replace the named handler if it exists in the handler list. +// If the handler does not exist the handler will be added to the beginning of +// the list. +func (l *HandlerList) SetFrontNamed(n NamedHandler) { + if !l.SwapNamed(n) { + l.PushFrontNamed(n) + } +} + +// Run executes all handlers in the list with a given request object. +func (l *HandlerList) Run(r *Request) { + for i, h := range l.list { + h.Fn(r) + item := HandlerListRunItem{ + Index: i, Handler: h, Request: r, + } + if l.AfterEachFn != nil && !l.AfterEachFn(item) { + return + } + } +} + +// HandlerListLogItem logs the request handler and the state of the +// request's Error value. Always returns true to continue iterating +// request handlers in a HandlerList. +func HandlerListLogItem(item HandlerListRunItem) bool { + if item.Request.Config.Logger == nil { + return true + } + item.Request.Config.Logger.Log("DEBUG: RequestHandler", + item.Index, item.Handler.Name, item.Request.Error) + + return true +} + +// HandlerListStopOnError returns false to stop the HandlerList iterating +// over request handlers if Request.Error is not nil. True otherwise +// to continue iterating. +func HandlerListStopOnError(item HandlerListRunItem) bool { + return item.Request.Error == nil +} + +// WithAppendUserAgent will add a string to the user agent prefixed with a +// single white space. +func WithAppendUserAgent(s string) Option { + return func(r *Request) { + r.Handlers.Build.PushBack(func(r2 *Request) { + AddToUserAgent(r, s) + }) + } +} + +// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request +// header. If the extra parameters are provided they will be added as metadata to the +// name/version pair resulting in the following format. +// "name/version (extra0; extra1; ...)" +// The user agent part will be concatenated with this current request's user agent string. +func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { + ua := fmt.Sprintf("%s/%s", name, version) + if len(extra) > 0 { + ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) + } + return func(r *Request) { + AddToUserAgent(r, ua) + } +} + +// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. +// The input string will be concatenated with the current request's user agent string. +func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { + return func(r *Request) { + AddToUserAgent(r, s) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go new file mode 100644 index 0000000..79f7960 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/http_request.go @@ -0,0 +1,24 @@ +package request + +import ( + "io" + "net/http" + "net/url" +) + +func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { + req := new(http.Request) + *req = *r + req.URL = &url.URL{} + *req.URL = *r.URL + req.Body = body + + req.Header = http.Header{} + for k, v := range r.Header { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + + return req +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go new file mode 100644 index 0000000..02f07f4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/offset_reader.go @@ -0,0 +1,58 @@ +package request + +import ( + "io" + "sync" +) + +// offsetReader is a thread-safe io.ReadCloser to prevent racing +// with retrying requests +type offsetReader struct { + buf io.ReadSeeker + lock sync.Mutex + closed bool +} + +func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { + reader := &offsetReader{} + buf.Seek(offset, 0) + + reader.buf = buf + return reader +} + +// Close will close the instance of the offset reader's access to +// the underlying io.ReadSeeker. +func (o *offsetReader) Close() error { + o.lock.Lock() + defer o.lock.Unlock() + o.closed = true + return nil +} + +// Read is a thread-safe read of the underlying io.ReadSeeker +func (o *offsetReader) Read(p []byte) (int, error) { + o.lock.Lock() + defer o.lock.Unlock() + + if o.closed { + return 0, io.EOF + } + + return o.buf.Read(p) +} + +// Seek is a thread-safe seeking operation. +func (o *offsetReader) Seek(offset int64, whence int) (int64, error) { + o.lock.Lock() + defer o.lock.Unlock() + + return o.buf.Seek(offset, whence) +} + +// CloseAndCopy will return a new offsetReader with a copy of the old buffer +// and close the old buffer. +func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { + o.Close() + return newOffsetReader(o.buf, offset) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go new file mode 100644 index 0000000..299dc37 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request.go @@ -0,0 +1,575 @@ +package request + +import ( + "bytes" + "fmt" + "io" + "net" + "net/http" + "net/url" + "reflect" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client/metadata" +) + +const ( + // ErrCodeSerialization is the serialization error code that is received + // during protocol unmarshaling. + ErrCodeSerialization = "SerializationError" + + // ErrCodeRead is an error that is returned during HTTP reads. + ErrCodeRead = "ReadError" + + // ErrCodeResponseTimeout is the connection timeout error that is recieved + // during body reads. + ErrCodeResponseTimeout = "ResponseTimeout" + + // CanceledErrorCode is the error code that will be returned by an + // API request that was canceled. Requests given a aws.Context may + // return this error when canceled. + CanceledErrorCode = "RequestCanceled" +) + +// A Request is the service request to be made. +type Request struct { + Config aws.Config + ClientInfo metadata.ClientInfo + Handlers Handlers + + Retryer + Time time.Time + ExpireTime time.Duration + Operation *Operation + HTTPRequest *http.Request + HTTPResponse *http.Response + Body io.ReadSeeker + BodyStart int64 // offset from beginning of Body that the request body starts + Params interface{} + Error error + Data interface{} + RequestID string + RetryCount int + Retryable *bool + RetryDelay time.Duration + NotHoist bool + SignedHeaderVals http.Header + LastSignedAt time.Time + DisableFollowRedirects bool + + context aws.Context + + built bool + + // Need to persist an intermediate body between the input Body and HTTP + // request body because the HTTP Client's transport can maintain a reference + // to the HTTP request's body after the client has returned. This value is + // safe to use concurrently and wrap the input Body for each HTTP request. + safeBody *offsetReader +} + +// An Operation is the service API operation to be made. +type Operation struct { + Name string + HTTPMethod string + HTTPPath string + *Paginator + + BeforePresignFn func(r *Request) error +} + +// New returns a new Request pointer for the service API +// operation and parameters. +// +// Params is any value of input parameters to be the request payload. +// Data is pointer value to an object which the request's response +// payload will be deserialized to. +func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, + retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { + + method := operation.HTTPMethod + if method == "" { + method = "POST" + } + + httpReq, _ := http.NewRequest(method, "", nil) + + var err error + httpReq.URL, err = url.Parse(clientInfo.Endpoint + operation.HTTPPath) + if err != nil { + httpReq.URL = &url.URL{} + err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) + } + + r := &Request{ + Config: cfg, + ClientInfo: clientInfo, + Handlers: handlers.Copy(), + + Retryer: retryer, + Time: time.Now(), + ExpireTime: 0, + Operation: operation, + HTTPRequest: httpReq, + Body: nil, + Params: params, + Error: err, + Data: data, + } + r.SetBufferBody([]byte{}) + + return r +} + +// A Option is a functional option that can augment or modify a request when +// using a WithContext API operation method. +type Option func(*Request) + +// WithGetResponseHeader builds a request Option which will retrieve a single +// header value from the HTTP Response. If there are multiple values for the +// header key use WithGetResponseHeaders instead to access the http.Header +// map directly. The passed in val pointer must be non-nil. +// +// This Option can be used multiple times with a single API operation. +// +// var id2, versionID string +// svc.PutObjectWithContext(ctx, params, +// request.WithGetResponseHeader("x-amz-id-2", &id2), +// request.WithGetResponseHeader("x-amz-version-id", &versionID), +// ) +func WithGetResponseHeader(key string, val *string) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *val = req.HTTPResponse.Header.Get(key) + }) + } +} + +// WithGetResponseHeaders builds a request Option which will retrieve the +// headers from the HTTP response and assign them to the passed in headers +// variable. The passed in headers pointer must be non-nil. +// +// var headers http.Header +// svc.PutObjectWithContext(ctx, params, request.WithGetResponseHeaders(&headers)) +func WithGetResponseHeaders(headers *http.Header) Option { + return func(r *Request) { + r.Handlers.Complete.PushBack(func(req *Request) { + *headers = req.HTTPResponse.Header + }) + } +} + +// WithLogLevel is a request option that will set the request to use a specific +// log level when the request is made. +// +// svc.PutObjectWithContext(ctx, params, request.WithLogLevel(aws.LogDebugWithHTTPBody) +func WithLogLevel(l aws.LogLevelType) Option { + return func(r *Request) { + r.Config.LogLevel = aws.LogLevel(l) + } +} + +// ApplyOptions will apply each option to the request calling them in the order +// the were provided. +func (r *Request) ApplyOptions(opts ...Option) { + for _, opt := range opts { + opt(r) + } +} + +// Context will always returns a non-nil context. If Request does not have a +// context aws.BackgroundContext will be returned. +func (r *Request) Context() aws.Context { + if r.context != nil { + return r.context + } + return aws.BackgroundContext() +} + +// SetContext adds a Context to the current request that can be used to cancel +// a in-flight request. The Context value must not be nil, or this method will +// panic. +// +// Unlike http.Request.WithContext, SetContext does not return a copy of the +// Request. It is not safe to use use a single Request value for multiple +// requests. A new Request should be created for each API operation request. +// +// Go 1.6 and below: +// The http.Request's Cancel field will be set to the Done() value of +// the context. This will overwrite the Cancel field's value. +// +// Go 1.7 and above: +// The http.Request.WithContext will be used to set the context on the underlying +// http.Request. This will create a shallow copy of the http.Request. The SDK +// may create sub contexts in the future for nested requests such as retries. +func (r *Request) SetContext(ctx aws.Context) { + if ctx == nil { + panic("context cannot be nil") + } + setRequestContext(r, ctx) +} + +// WillRetry returns if the request's can be retried. +func (r *Request) WillRetry() bool { + return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() +} + +// ParamsFilled returns if the request's parameters have been populated +// and the parameters are valid. False is returned if no parameters are +// provided or invalid. +func (r *Request) ParamsFilled() bool { + return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() +} + +// DataFilled returns true if the request's data for response deserialization +// target has been set and is a valid. False is returned if data is not +// set, or is invalid. +func (r *Request) DataFilled() bool { + return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() +} + +// SetBufferBody will set the request's body bytes that will be sent to +// the service API. +func (r *Request) SetBufferBody(buf []byte) { + r.SetReaderBody(bytes.NewReader(buf)) +} + +// SetStringBody sets the body of the request to be backed by a string. +func (r *Request) SetStringBody(s string) { + r.SetReaderBody(strings.NewReader(s)) +} + +// SetReaderBody will set the request's body reader. +func (r *Request) SetReaderBody(reader io.ReadSeeker) { + r.Body = reader + r.ResetBody() +} + +// Presign returns the request's signed URL. Error will be returned +// if the signing fails. +func (r *Request) Presign(expireTime time.Duration) (string, error) { + r.ExpireTime = expireTime + r.NotHoist = false + + if r.Operation.BeforePresignFn != nil { + r = r.copy() + err := r.Operation.BeforePresignFn(r) + if err != nil { + return "", err + } + } + + r.Sign() + if r.Error != nil { + return "", r.Error + } + return r.HTTPRequest.URL.String(), nil +} + +// PresignRequest behaves just like presign, but hoists all headers and signs them. +// Also returns the signed hash back to the user +func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { + r.ExpireTime = expireTime + r.NotHoist = true + r.Sign() + if r.Error != nil { + return "", nil, r.Error + } + return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil +} + +func debugLogReqError(r *Request, stage string, retrying bool, err error) { + if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { + return + } + + retryStr := "not retrying" + if retrying { + retryStr = "will retry" + } + + r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", + stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) +} + +// Build will build the request's object so it can be signed and sent +// to the service. Build will also validate all the request's parameters. +// Anny additional build Handlers set on this request will be run +// in the order they were set. +// +// The request will only be built once. Multiple calls to build will have +// no effect. +// +// If any Validate or Build errors occur the build will stop and the error +// which occurred will be returned. +func (r *Request) Build() error { + if !r.built { + r.Handlers.Validate.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Request", false, r.Error) + return r.Error + } + r.Handlers.Build.Run(r) + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + r.built = true + } + + return r.Error +} + +// Sign will sign the request returning error if errors are encountered. +// +// Send will build the request prior to signing. All Sign Handlers will +// be executed in the order they were set. +func (r *Request) Sign() error { + r.Build() + if r.Error != nil { + debugLogReqError(r, "Build Request", false, r.Error) + return r.Error + } + + r.Handlers.Sign.Run(r) + return r.Error +} + +func (r *Request) getNextRequestBody() (io.ReadCloser, error) { + if r.safeBody != nil { + r.safeBody.Close() + } + + r.safeBody = newOffsetReader(r.Body, r.BodyStart) + + // Go 1.8 tightened and clarified the rules code needs to use when building + // requests with the http package. Go 1.8 removed the automatic detection + // of if the Request.Body was empty, or actually had bytes in it. The SDK + // always sets the Request.Body even if it is empty and should not actually + // be sent. This is incorrect. + // + // Go 1.8 did add a http.NoBody value that the SDK can use to tell the http + // client that the request really should be sent without a body. The + // Request.Body cannot be set to nil, which is preferable, because the + // field is exported and could introduce nil pointer dereferences for users + // of the SDK if they used that field. + // + // Related golang/go#18257 + l, err := computeBodyLength(r.Body) + if err != nil { + return nil, awserr.New(ErrCodeSerialization, "failed to compute request body size", err) + } + + var body io.ReadCloser + if l == 0 { + body = NoBody + } else if l > 0 { + body = r.safeBody + } else { + // Hack to prevent sending bodies for methods where the body + // should be ignored by the server. Sending bodies on these + // methods without an associated ContentLength will cause the + // request to socket timeout because the server does not handle + // Transfer-Encoding: chunked bodies for these methods. + // + // This would only happen if a aws.ReaderSeekerCloser was used with + // a io.Reader that was not also an io.Seeker. + switch r.Operation.HTTPMethod { + case "GET", "HEAD", "DELETE": + body = NoBody + default: + body = r.safeBody + } + } + + return body, nil +} + +// Attempts to compute the length of the body of the reader using the +// io.Seeker interface. If the value is not seekable because of being +// a ReaderSeekerCloser without an unerlying Seeker -1 will be returned. +// If no error occurs the length of the body will be returned. +func computeBodyLength(r io.ReadSeeker) (int64, error) { + seekable := true + // Determine if the seeker is actually seekable. ReaderSeekerCloser + // hides the fact that a io.Readers might not actually be seekable. + switch v := r.(type) { + case aws.ReaderSeekerCloser: + seekable = v.IsSeeker() + case *aws.ReaderSeekerCloser: + seekable = v.IsSeeker() + } + if !seekable { + return -1, nil + } + + curOffset, err := r.Seek(0, 1) + if err != nil { + return 0, err + } + + endOffset, err := r.Seek(0, 2) + if err != nil { + return 0, err + } + + _, err = r.Seek(curOffset, 0) + if err != nil { + return 0, err + } + + return endOffset - curOffset, nil +} + +// GetBody will return an io.ReadSeeker of the Request's underlying +// input body with a concurrency safe wrapper. +func (r *Request) GetBody() io.ReadSeeker { + return r.safeBody +} + +// Send will send the request returning error if errors are encountered. +// +// Send will sign the request prior to sending. All Send Handlers will +// be executed in the order they were set. +// +// Canceling a request is non-deterministic. If a request has been canceled, +// then the transport will choose, randomly, one of the state channels during +// reads or getting the connection. +// +// readLoop() and getConn(req *Request, cm connectMethod) +// https://github.com/golang/go/blob/master/src/net/http/transport.go +// +// Send will not close the request.Request's body. +func (r *Request) Send() error { + defer func() { + // Regardless of success or failure of the request trigger the Complete + // request handlers. + r.Handlers.Complete.Run(r) + }() + + for { + if aws.BoolValue(r.Retryable) { + if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { + r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", + r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) + } + + // The previous http.Request will have a reference to the r.Body + // and the HTTP Client's Transport may still be reading from + // the request's body even though the Client's Do returned. + r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil) + r.ResetBody() + + // Closing response body to ensure that no response body is leaked + // between retry attempts. + if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { + r.HTTPResponse.Body.Close() + } + } + + r.Sign() + if r.Error != nil { + return r.Error + } + + r.Retryable = nil + + r.Handlers.Send.Run(r) + if r.Error != nil { + if !shouldRetryCancel(r) { + return r.Error + } + + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Send Request", false, err) + return r.Error + } + debugLogReqError(r, "Send Request", true, err) + continue + } + r.Handlers.UnmarshalMeta.Run(r) + r.Handlers.ValidateResponse.Run(r) + if r.Error != nil { + r.Handlers.UnmarshalError.Run(r) + err := r.Error + + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Validate Response", false, err) + return r.Error + } + debugLogReqError(r, "Validate Response", true, err) + continue + } + + r.Handlers.Unmarshal.Run(r) + if r.Error != nil { + err := r.Error + r.Handlers.Retry.Run(r) + r.Handlers.AfterRetry.Run(r) + if r.Error != nil { + debugLogReqError(r, "Unmarshal Response", false, err) + return r.Error + } + debugLogReqError(r, "Unmarshal Response", true, err) + continue + } + + break + } + + return nil +} + +// copy will copy a request which will allow for local manipulation of the +// request. +func (r *Request) copy() *Request { + req := &Request{} + *req = *r + req.Handlers = r.Handlers.Copy() + op := *r.Operation + req.Operation = &op + return req +} + +// AddToUserAgent adds the string to the end of the request's current user agent. +func AddToUserAgent(r *Request, s string) { + curUA := r.HTTPRequest.Header.Get("User-Agent") + if len(curUA) > 0 { + s = curUA + " " + s + } + r.HTTPRequest.Header.Set("User-Agent", s) +} + +func shouldRetryCancel(r *Request) bool { + awsErr, ok := r.Error.(awserr.Error) + timeoutErr := false + errStr := r.Error.Error() + if ok { + if awsErr.Code() == CanceledErrorCode { + return false + } + err := awsErr.OrigErr() + netErr, netOK := err.(net.Error) + timeoutErr = netOK && netErr.Temporary() + if urlErr, ok := err.(*url.Error); !timeoutErr && ok { + errStr = urlErr.Err.Error() + } + } + + // There can be two types of canceled errors here. + // The first being a net.Error and the other being an error. + // If the request was timed out, we want to continue the retry + // process. Otherwise, return the canceled error. + return timeoutErr || + (errStr != "net/http: request canceled" && + errStr != "net/http: request canceled while waiting for connection") + +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go new file mode 100644 index 0000000..869b97a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_7.go @@ -0,0 +1,39 @@ +// +build !go1.8 + +package request + +import "io" + +// NoBody is an io.ReadCloser with no bytes. Read always returns EOF +// and Close always returns nil. It can be used in an outgoing client +// request to explicitly signal that a request has zero bytes. +// An alternative, however, is to simply set Request.Body to nil. +// +// Copy of Go 1.8 NoBody type from net/http/http.go +type noBody struct{} + +func (noBody) Read([]byte) (int, error) { return 0, io.EOF } +func (noBody) Close() error { return nil } +func (noBody) WriteTo(io.Writer) (int64, error) { return 0, nil } + +// NoBody is an empty reader that will trigger the Go HTTP client to not include +// and body in the HTTP request. +var NoBody = noBody{} + +// ResetBody rewinds the request body back to its starting position, and +// set's the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go new file mode 100644 index 0000000..c32fc69 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_1_8.go @@ -0,0 +1,33 @@ +// +build go1.8 + +package request + +import ( + "net/http" +) + +// NoBody is a http.NoBody reader instructing Go HTTP client to not include +// and body in the HTTP request. +var NoBody = http.NoBody + +// ResetBody rewinds the request body back to its starting position, and +// set's the HTTP Request body reference. When the body is read prior +// to being sent in the HTTP request it will need to be rewound. +// +// ResetBody will automatically be called by the SDK's build handler, but if +// the request is being used directly ResetBody must be called before the request +// is Sent. SetStringBody, SetBufferBody, and SetReaderBody will automatically +// call ResetBody. +// +// Will also set the Go 1.8's http.Request.GetBody member to allow retrying +// PUT/POST redirects. +func (r *Request) ResetBody() { + body, err := r.getNextRequestBody() + if err != nil { + r.Error = err + return + } + + r.HTTPRequest.Body = body + r.HTTPRequest.GetBody = r.getNextRequestBody +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go new file mode 100644 index 0000000..a7365cd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context.go @@ -0,0 +1,14 @@ +// +build go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest = r.HTTPRequest.WithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go new file mode 100644 index 0000000..307fa07 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_context_1_6.go @@ -0,0 +1,14 @@ +// +build !go1.7 + +package request + +import "github.com/aws/aws-sdk-go/aws" + +// setContext updates the Request to use the passed in context for cancellation. +// Context will also be used for request retry delay. +// +// Creates shallow copy of the http.Request with the WithContext method. +func setRequestContext(r *Request, ctx aws.Context) { + r.context = ctx + r.HTTPRequest.Cancel = ctx.Done() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go new file mode 100644 index 0000000..59de673 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/request_pagination.go @@ -0,0 +1,236 @@ +package request + +import ( + "reflect" + "sync/atomic" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// A Pagination provides paginating of SDK API operations which are paginatable. +// Generally you should not use this type directly, but use the "Pages" API +// operations method to automatically perform pagination for you. Such as, +// "S3.ListObjectsPages", and "S3.ListObjectsPagesWithContext" methods. +// +// Pagination differs from a Paginator type in that pagination is the type that +// does the pagination between API operations, and Paginator defines the +// configuration that will be used per page request. +// +// cont := true +// for p.Next() && cont { +// data := p.Page().(*s3.ListObjectsOutput) +// // process the page's data +// } +// return p.Err() +// +// See service client API operation Pages methods for examples how the SDK will +// use the Pagination type. +type Pagination struct { + // Function to return a Request value for each pagination request. + // Any configuration or handlers that need to be applied to the request + // prior to getting the next page should be done here before the request + // returned. + // + // NewRequest should always be built from the same API operations. It is + // undefined if different API operations are returned on subsequent calls. + NewRequest func() (*Request, error) + + started bool + nextTokens []interface{} + + err error + curPage interface{} +} + +// HasNextPage will return true if Pagination is able to determine that the API +// operation has additional pages. False will be returned if there are no more +// pages remaining. +// +// Will always return true if Next has not been called yet. +func (p *Pagination) HasNextPage() bool { + return !(p.started && len(p.nextTokens) == 0) +} + +// Err returns the error Pagination encountered when retrieving the next page. +func (p *Pagination) Err() error { + return p.err +} + +// Page returns the current page. Page should only be called after a successful +// call to Next. It is undefined what Page will return if Page is called after +// Next returns false. +func (p *Pagination) Page() interface{} { + return p.curPage +} + +// Next will attempt to retrieve the next page for the API operation. When a page +// is retrieved true will be returned. If the page cannot be retrieved, or there +// are no more pages false will be returned. +// +// Use the Page method to retrieve the current page data. The data will need +// to be cast to the API operation's output type. +// +// Use the Err method to determine if an error occurred if Page returns false. +func (p *Pagination) Next() bool { + if !p.HasNextPage() { + return false + } + + req, err := p.NewRequest() + if err != nil { + p.err = err + return false + } + + if p.started { + for i, intok := range req.Operation.InputTokens { + awsutil.SetValueAtPath(req.Params, intok, p.nextTokens[i]) + } + } + p.started = true + + err = req.Send() + if err != nil { + p.err = err + return false + } + + p.nextTokens = req.nextPageTokens() + p.curPage = req.Data + + return true +} + +// A Paginator is the configuration data that defines how an API operation +// should be paginated. This type is used by the API service models to define +// the generated pagination config for service APIs. +// +// The Pagination type is what provides iterating between pages of an API. It +// is only used to store the token metadata the SDK should use for performing +// pagination. +type Paginator struct { + InputTokens []string + OutputTokens []string + LimitToken string + TruncationToken string +} + +// nextPageTokens returns the tokens to use when asking for the next page of data. +func (r *Request) nextPageTokens() []interface{} { + if r.Operation.Paginator == nil { + return nil + } + if r.Operation.TruncationToken != "" { + tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) + if len(tr) == 0 { + return nil + } + + switch v := tr[0].(type) { + case *bool: + if !aws.BoolValue(v) { + return nil + } + case bool: + if v == false { + return nil + } + } + } + + tokens := []interface{}{} + tokenAdded := false + for _, outToken := range r.Operation.OutputTokens { + v, _ := awsutil.ValuesAtPath(r.Data, outToken) + if len(v) > 0 { + tokens = append(tokens, v[0]) + tokenAdded = true + } else { + tokens = append(tokens, nil) + } + } + if !tokenAdded { + return nil + } + + return tokens +} + +// Ensure a deprecated item is only logged once instead of each time its used. +func logDeprecatedf(logger aws.Logger, flag *int32, msg string) { + if logger == nil { + return + } + if atomic.CompareAndSwapInt32(flag, 0, 1) { + logger.Log(msg) + } +} + +var ( + logDeprecatedHasNextPage int32 + logDeprecatedNextPage int32 + logDeprecatedEachPage int32 +) + +// HasNextPage returns true if this request has more pages of data available. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) HasNextPage() bool { + logDeprecatedf(r.Config.Logger, &logDeprecatedHasNextPage, + "Request.HasNextPage deprecated. Use Pagination type for configurable pagination of API operations") + + return len(r.nextPageTokens()) > 0 +} + +// NextPage returns a new Request that can be executed to return the next +// page of result data. Call .Send() on this request to execute it. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) NextPage() *Request { + logDeprecatedf(r.Config.Logger, &logDeprecatedNextPage, + "Request.NextPage deprecated. Use Pagination type for configurable pagination of API operations") + + tokens := r.nextPageTokens() + if len(tokens) == 0 { + return nil + } + + data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() + nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) + for i, intok := range nr.Operation.InputTokens { + awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) + } + return nr +} + +// EachPage iterates over each page of a paginated request object. The fn +// parameter should be a function with the following sample signature: +// +// func(page *T, lastPage bool) bool { +// return true // return false to stop iterating +// } +// +// Where "T" is the structure type matching the output structure of the given +// operation. For example, a request object generated by +// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput +// as the structure "T". The lastPage value represents whether the page is +// the last page of data or not. The return value of this function should +// return true to keep iterating or false to stop. +// +// Deprecated Use Pagination type for configurable pagination of API operations +func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { + logDeprecatedf(r.Config.Logger, &logDeprecatedEachPage, + "Request.EachPage deprecated. Use Pagination type for configurable pagination of API operations") + + for page := r; page != nil; page = page.NextPage() { + if err := page.Send(); err != nil { + return err + } + if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { + return page.Error + } + } + + return nil +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go new file mode 100644 index 0000000..8d369c1 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/retryer.go @@ -0,0 +1,161 @@ +package request + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" +) + +// Retryer is an interface to control retry logic for a given service. +// The default implementation used by most services is the service.DefaultRetryer +// structure, which contains basic retry logic using exponential backoff. +type Retryer interface { + RetryRules(*Request) time.Duration + ShouldRetry(*Request) bool + MaxRetries() int +} + +// WithRetryer sets a config Retryer value to the given Config returning it +// for chaining. +func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { + cfg.Retryer = retryer + return cfg +} + +// retryableCodes is a collection of service response codes which are retry-able +// without any further action. +var retryableCodes = map[string]struct{}{ + "RequestError": {}, + "RequestTimeout": {}, + ErrCodeResponseTimeout: {}, + "RequestTimeoutException": {}, // Glacier's flavor of RequestTimeout +} + +var throttleCodes = map[string]struct{}{ + "ProvisionedThroughputExceededException": {}, + "Throttling": {}, + "ThrottlingException": {}, + "RequestLimitExceeded": {}, + "RequestThrottled": {}, + "TooManyRequestsException": {}, // Lambda functions + "PriorRequestNotComplete": {}, // Route53 +} + +// credsExpiredCodes is a collection of error codes which signify the credentials +// need to be refreshed. Expired tokens require refreshing of credentials, and +// resigning before the request can be retried. +var credsExpiredCodes = map[string]struct{}{ + "ExpiredToken": {}, + "ExpiredTokenException": {}, + "RequestExpired": {}, // EC2 Only +} + +func isCodeThrottle(code string) bool { + _, ok := throttleCodes[code] + return ok +} + +func isCodeRetryable(code string) bool { + if _, ok := retryableCodes[code]; ok { + return true + } + + return isCodeExpiredCreds(code) +} + +func isCodeExpiredCreds(code string) bool { + _, ok := credsExpiredCodes[code] + return ok +} + +var validParentCodes = map[string]struct{}{ + ErrCodeSerialization: struct{}{}, + ErrCodeRead: struct{}{}, +} + +type temporaryError interface { + Temporary() bool +} + +func isNestedErrorRetryable(parentErr awserr.Error) bool { + if parentErr == nil { + return false + } + + if _, ok := validParentCodes[parentErr.Code()]; !ok { + return false + } + + err := parentErr.OrigErr() + if err == nil { + return false + } + + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) + } + + if t, ok := err.(temporaryError); ok { + return t.Temporary() + } + + return isErrConnectionReset(err) +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if error is nil. +func IsErrorRetryable(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeRetryable(aerr.Code()) || isNestedErrorRetryable(aerr) + } + } + return false +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if error is nil. +func IsErrorThrottle(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeThrottle(aerr.Code()) + } + } + return false +} + +// IsErrorExpiredCreds returns whether the error code is a credential expiry error. +// Returns false if error is nil. +func IsErrorExpiredCreds(err error) bool { + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + return isCodeExpiredCreds(aerr.Code()) + } + } + return false +} + +// IsErrorRetryable returns whether the error is retryable, based on its Code. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorRetryable +func (r *Request) IsErrorRetryable() bool { + return IsErrorRetryable(r.Error) +} + +// IsErrorThrottle returns whether the error is to be throttled based on its code. +// Returns false if the request has no Error set +// +// Alias for the utility function IsErrorThrottle +func (r *Request) IsErrorThrottle() bool { + return IsErrorThrottle(r.Error) +} + +// IsErrorExpired returns whether the error code is a credential expiry error. +// Returns false if the request has no Error set. +// +// Alias for the utility function IsErrorExpiredCreds +func (r *Request) IsErrorExpired() bool { + return IsErrorExpiredCreds(r.Error) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go new file mode 100644 index 0000000..09a44eb --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/timeout_read_closer.go @@ -0,0 +1,94 @@ +package request + +import ( + "io" + "time" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +var timeoutErr = awserr.New( + ErrCodeResponseTimeout, + "read on body has reached the timeout limit", + nil, +) + +type readResult struct { + n int + err error +} + +// timeoutReadCloser will handle body reads that take too long. +// We will return a ErrReadTimeout error if a timeout occurs. +type timeoutReadCloser struct { + reader io.ReadCloser + duration time.Duration +} + +// Read will spin off a goroutine to call the reader's Read method. We will +// select on the timer's channel or the read's channel. Whoever completes first +// will be returned. +func (r *timeoutReadCloser) Read(b []byte) (int, error) { + timer := time.NewTimer(r.duration) + c := make(chan readResult, 1) + + go func() { + n, err := r.reader.Read(b) + timer.Stop() + c <- readResult{n: n, err: err} + }() + + select { + case data := <-c: + return data.n, data.err + case <-timer.C: + return 0, timeoutErr + } +} + +func (r *timeoutReadCloser) Close() error { + return r.reader.Close() +} + +const ( + // HandlerResponseTimeout is what we use to signify the name of the + // response timeout handler. + HandlerResponseTimeout = "ResponseTimeoutHandler" +) + +// adaptToResponseTimeoutError is a handler that will replace any top level error +// to a ErrCodeResponseTimeout, if its child is that. +func adaptToResponseTimeoutError(req *Request) { + if err, ok := req.Error.(awserr.Error); ok { + aerr, ok := err.OrigErr().(awserr.Error) + if ok && aerr.Code() == ErrCodeResponseTimeout { + req.Error = aerr + } + } +} + +// WithResponseReadTimeout is a request option that will wrap the body in a timeout read closer. +// This will allow for per read timeouts. If a timeout occurred, we will return the +// ErrCodeResponseTimeout. +// +// svc.PutObjectWithContext(ctx, params, request.WithTimeoutReadCloser(30 * time.Second) +func WithResponseReadTimeout(duration time.Duration) Option { + return func(r *Request) { + + var timeoutHandler = NamedHandler{ + HandlerResponseTimeout, + func(req *Request) { + req.HTTPResponse.Body = &timeoutReadCloser{ + reader: req.HTTPResponse.Body, + duration: duration, + } + }} + + // remove the handler so we are not stomping over any new durations. + r.Handlers.Send.RemoveByName(HandlerResponseTimeout) + r.Handlers.Send.PushBackNamed(timeoutHandler) + + r.Handlers.Unmarshal.PushBack(adaptToResponseTimeoutError) + r.Handlers.UnmarshalError.PushBack(adaptToResponseTimeoutError) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go new file mode 100644 index 0000000..2520286 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/validation.go @@ -0,0 +1,234 @@ +package request + +import ( + "bytes" + "fmt" + + "github.com/aws/aws-sdk-go/aws/awserr" +) + +const ( + // InvalidParameterErrCode is the error code for invalid parameters errors + InvalidParameterErrCode = "InvalidParameter" + // ParamRequiredErrCode is the error code for required parameter errors + ParamRequiredErrCode = "ParamRequiredError" + // ParamMinValueErrCode is the error code for fields with too low of a + // number value. + ParamMinValueErrCode = "ParamMinValueError" + // ParamMinLenErrCode is the error code for fields without enough elements. + ParamMinLenErrCode = "ParamMinLenError" +) + +// Validator provides a way for types to perform validation logic on their +// input values that external code can use to determine if a type's values +// are valid. +type Validator interface { + Validate() error +} + +// An ErrInvalidParams provides wrapping of invalid parameter errors found when +// validating API operation input parameters. +type ErrInvalidParams struct { + // Context is the base context of the invalid parameter group. + Context string + errs []ErrInvalidParam +} + +// Add adds a new invalid parameter error to the collection of invalid +// parameters. The context of the invalid parameter will be updated to reflect +// this collection. +func (e *ErrInvalidParams) Add(err ErrInvalidParam) { + err.SetContext(e.Context) + e.errs = append(e.errs, err) +} + +// AddNested adds the invalid parameter errors from another ErrInvalidParams +// value into this collection. The nested errors will have their nested context +// updated and base context to reflect the merging. +// +// Use for nested validations errors. +func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { + for _, err := range nested.errs { + err.SetContext(e.Context) + err.AddNestedContext(nestedCtx) + e.errs = append(e.errs, err) + } +} + +// Len returns the number of invalid parameter errors +func (e ErrInvalidParams) Len() int { + return len(e.errs) +} + +// Code returns the code of the error +func (e ErrInvalidParams) Code() string { + return InvalidParameterErrCode +} + +// Message returns the message of the error +func (e ErrInvalidParams) Message() string { + return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) +} + +// Error returns the string formatted form of the invalid parameters. +func (e ErrInvalidParams) Error() string { + w := &bytes.Buffer{} + fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) + + for _, err := range e.errs { + fmt.Fprintf(w, "- %s\n", err.Message()) + } + + return w.String() +} + +// OrigErr returns the invalid parameters as a awserr.BatchedErrors value +func (e ErrInvalidParams) OrigErr() error { + return awserr.NewBatchError( + InvalidParameterErrCode, e.Message(), e.OrigErrs()) +} + +// OrigErrs returns a slice of the invalid parameters +func (e ErrInvalidParams) OrigErrs() []error { + errs := make([]error, len(e.errs)) + for i := 0; i < len(errs); i++ { + errs[i] = e.errs[i] + } + + return errs +} + +// An ErrInvalidParam represents an invalid parameter error type. +type ErrInvalidParam interface { + awserr.Error + + // Field name the error occurred on. + Field() string + + // SetContext updates the context of the error. + SetContext(string) + + // AddNestedContext updates the error's context to include a nested level. + AddNestedContext(string) +} + +type errInvalidParam struct { + context string + nestedContext string + field string + code string + msg string +} + +// Code returns the error code for the type of invalid parameter. +func (e *errInvalidParam) Code() string { + return e.code +} + +// Message returns the reason the parameter was invalid, and its context. +func (e *errInvalidParam) Message() string { + return fmt.Sprintf("%s, %s.", e.msg, e.Field()) +} + +// Error returns the string version of the invalid parameter error. +func (e *errInvalidParam) Error() string { + return fmt.Sprintf("%s: %s", e.code, e.Message()) +} + +// OrigErr returns nil, Implemented for awserr.Error interface. +func (e *errInvalidParam) OrigErr() error { + return nil +} + +// Field Returns the field and context the error occurred. +func (e *errInvalidParam) Field() string { + field := e.context + if len(field) > 0 { + field += "." + } + if len(e.nestedContext) > 0 { + field += fmt.Sprintf("%s.", e.nestedContext) + } + field += e.field + + return field +} + +// SetContext updates the base context of the error. +func (e *errInvalidParam) SetContext(ctx string) { + e.context = ctx +} + +// AddNestedContext prepends a context to the field's path. +func (e *errInvalidParam) AddNestedContext(ctx string) { + if len(e.nestedContext) == 0 { + e.nestedContext = ctx + } else { + e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) + } + +} + +// An ErrParamRequired represents an required parameter error. +type ErrParamRequired struct { + errInvalidParam +} + +// NewErrParamRequired creates a new required parameter error. +func NewErrParamRequired(field string) *ErrParamRequired { + return &ErrParamRequired{ + errInvalidParam{ + code: ParamRequiredErrCode, + field: field, + msg: fmt.Sprintf("missing required field"), + }, + } +} + +// An ErrParamMinValue represents a minimum value parameter error. +type ErrParamMinValue struct { + errInvalidParam + min float64 +} + +// NewErrParamMinValue creates a new minimum value parameter error. +func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { + return &ErrParamMinValue{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field value of %v", min), + }, + min: min, + } +} + +// MinValue returns the field's require minimum value. +// +// float64 is returned for both int and float min values. +func (e *ErrParamMinValue) MinValue() float64 { + return e.min +} + +// An ErrParamMinLen represents a minimum length parameter error. +type ErrParamMinLen struct { + errInvalidParam + min int +} + +// NewErrParamMinLen creates a new minimum length parameter error. +func NewErrParamMinLen(field string, min int) *ErrParamMinLen { + return &ErrParamMinLen{ + errInvalidParam: errInvalidParam{ + code: ParamMinValueErrCode, + field: field, + msg: fmt.Sprintf("minimum field size of %v", min), + }, + min: min, + } +} + +// MinLen returns the field's required minimum length. +func (e *ErrParamMinLen) MinLen() int { + return e.min +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go new file mode 100644 index 0000000..22d2f80 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/request/waiter.go @@ -0,0 +1,287 @@ +package request + +import ( + "fmt" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" +) + +// WaiterResourceNotReadyErrorCode is the error code returned by a waiter when +// the waiter's max attempts have been exhausted. +const WaiterResourceNotReadyErrorCode = "ResourceNotReady" + +// A WaiterOption is a function that will update the Waiter value's fields to +// configure the waiter. +type WaiterOption func(*Waiter) + +// WithWaiterMaxAttempts returns the maximum number of times the waiter should +// attempt to check the resource for the target state. +func WithWaiterMaxAttempts(max int) WaiterOption { + return func(w *Waiter) { + w.MaxAttempts = max + } +} + +// WaiterDelay will return a delay the waiter should pause between attempts to +// check the resource state. The passed in attempt is the number of times the +// Waiter has checked the resource state. +// +// Attempt is the number of attempts the Waiter has made checking the resource +// state. +type WaiterDelay func(attempt int) time.Duration + +// ConstantWaiterDelay returns a WaiterDelay that will always return a constant +// delay the waiter should use between attempts. It ignores the number of +// attempts made. +func ConstantWaiterDelay(delay time.Duration) WaiterDelay { + return func(attempt int) time.Duration { + return delay + } +} + +// WithWaiterDelay will set the Waiter to use the WaiterDelay passed in. +func WithWaiterDelay(delayer WaiterDelay) WaiterOption { + return func(w *Waiter) { + w.Delay = delayer + } +} + +// WithWaiterLogger returns a waiter option to set the logger a waiter +// should use to log warnings and errors to. +func WithWaiterLogger(logger aws.Logger) WaiterOption { + return func(w *Waiter) { + w.Logger = logger + } +} + +// WithWaiterRequestOptions returns a waiter option setting the request +// options for each request the waiter makes. Appends to waiter's request +// options already set. +func WithWaiterRequestOptions(opts ...Option) WaiterOption { + return func(w *Waiter) { + w.RequestOptions = append(w.RequestOptions, opts...) + } +} + +// A Waiter provides the functionality to perform a blocking call which will +// wait for a resource state to be satisfied by a service. +// +// This type should not be used directly. The API operations provided in the +// service packages prefixed with "WaitUntil" should be used instead. +type Waiter struct { + Name string + Acceptors []WaiterAcceptor + Logger aws.Logger + + MaxAttempts int + Delay WaiterDelay + + RequestOptions []Option + NewRequest func([]Option) (*Request, error) +} + +// ApplyOptions updates the waiter with the list of waiter options provided. +func (w *Waiter) ApplyOptions(opts ...WaiterOption) { + for _, fn := range opts { + fn(w) + } +} + +// WaiterState are states the waiter uses based on WaiterAcceptor definitions +// to identify if the resource state the waiter is waiting on has occurred. +type WaiterState int + +// String returns the string representation of the waiter state. +func (s WaiterState) String() string { + switch s { + case SuccessWaiterState: + return "success" + case FailureWaiterState: + return "failure" + case RetryWaiterState: + return "retry" + default: + return "unknown waiter state" + } +} + +// States the waiter acceptors will use to identify target resource states. +const ( + SuccessWaiterState WaiterState = iota // waiter successful + FailureWaiterState // waiter failed + RetryWaiterState // waiter needs to be retried +) + +// WaiterMatchMode is the mode that the waiter will use to match the WaiterAcceptor +// definition's Expected attribute. +type WaiterMatchMode int + +// Modes the waiter will use when inspecting API response to identify target +// resource states. +const ( + PathAllWaiterMatch WaiterMatchMode = iota // match on all paths + PathWaiterMatch // match on specific path + PathAnyWaiterMatch // match on any path + PathListWaiterMatch // match on list of paths + StatusWaiterMatch // match on status code + ErrorWaiterMatch // match on error +) + +// String returns the string representation of the waiter match mode. +func (m WaiterMatchMode) String() string { + switch m { + case PathAllWaiterMatch: + return "pathAll" + case PathWaiterMatch: + return "path" + case PathAnyWaiterMatch: + return "pathAny" + case PathListWaiterMatch: + return "pathList" + case StatusWaiterMatch: + return "status" + case ErrorWaiterMatch: + return "error" + default: + return "unknown waiter match mode" + } +} + +// WaitWithContext will make requests for the API operation using NewRequest to +// build API requests. The request's response will be compared against the +// Waiter's Acceptors to determine the successful state of the resource the +// waiter is inspecting. +// +// The passed in context must not be nil. If it is nil a panic will occur. The +// Context will be used to cancel the waiter's pending requests and retry delays. +// Use aws.BackgroundContext if no context is available. +// +// The waiter will continue until the target state defined by the Acceptors, +// or the max attempts expires. +// +// Will return the WaiterResourceNotReadyErrorCode error code if the waiter's +// retryer ShouldRetry returns false. This normally will happen when the max +// wait attempts expires. +func (w Waiter) WaitWithContext(ctx aws.Context) error { + + for attempt := 1; ; attempt++ { + req, err := w.NewRequest(w.RequestOptions) + if err != nil { + waiterLogf(w.Logger, "unable to create request %v", err) + return err + } + req.Handlers.Build.PushBack(MakeAddToUserAgentFreeFormHandler("Waiter")) + err = req.Send() + + // See if any of the acceptors match the request's response, or error + for _, a := range w.Acceptors { + if matched, matchErr := a.match(w.Name, w.Logger, req, err); matched { + return matchErr + } + } + + // The Waiter should only check the resource state MaxAttempts times + // This is here instead of in the for loop above to prevent delaying + // unnecessary when the waiter will not retry. + if attempt == w.MaxAttempts { + break + } + + // Delay to wait before inspecting the resource again + delay := w.Delay(attempt) + if sleepFn := req.Config.SleepDelay; sleepFn != nil { + // Support SleepDelay for backwards compatibility and testing + sleepFn(delay) + } else if err := aws.SleepWithContext(ctx, delay); err != nil { + return awserr.New(CanceledErrorCode, "waiter context canceled", err) + } + } + + return awserr.New(WaiterResourceNotReadyErrorCode, "exceeded wait attempts", nil) +} + +// A WaiterAcceptor provides the information needed to wait for an API operation +// to complete. +type WaiterAcceptor struct { + State WaiterState + Matcher WaiterMatchMode + Argument string + Expected interface{} +} + +// match returns if the acceptor found a match with the passed in request +// or error. True is returned if the acceptor made a match, error is returned +// if there was an error attempting to perform the match. +func (a *WaiterAcceptor) match(name string, l aws.Logger, req *Request, err error) (bool, error) { + result := false + var vals []interface{} + + switch a.Matcher { + case PathAllWaiterMatch, PathWaiterMatch: + // Require all matches to be equal for result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } + result = true + for _, val := range vals { + if !awsutil.DeepEqual(val, a.Expected) { + result = false + break + } + } + case PathAnyWaiterMatch: + // Only a single match needs to equal for the result to match + vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + for _, val := range vals { + if awsutil.DeepEqual(val, a.Expected) { + result = true + break + } + } + case PathListWaiterMatch: + // ignored matcher + case StatusWaiterMatch: + s := a.Expected.(int) + result = s == req.HTTPResponse.StatusCode + case ErrorWaiterMatch: + if aerr, ok := err.(awserr.Error); ok { + result = aerr.Code() == a.Expected.(string) + } + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected matcher: %s", + name, a.Matcher) + } + + if !result { + // If there was no matching result found there is nothing more to do + // for this response, retry the request. + return false, nil + } + + switch a.State { + case SuccessWaiterState: + // waiter completed + return true, nil + case FailureWaiterState: + // Waiter failure state triggered + return true, awserr.New(WaiterResourceNotReadyErrorCode, + "failed waiting for successful resource state", err) + case RetryWaiterState: + // clear the error and retry the operation + return false, nil + default: + waiterLogf(l, "WARNING: Waiter %s encountered unexpected state: %s", + name, a.State) + return false, nil + } +} + +func waiterLogf(logger aws.Logger, msg string, args ...interface{}) { + if logger != nil { + logger.Log(fmt.Sprintf(msg, args...)) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go new file mode 100644 index 0000000..ea7b886 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -0,0 +1,273 @@ +/* +Package session provides configuration for the SDK's service clients. + +Sessions can be shared across all service clients that share the same base +configuration. The Session is built from the SDK's default configuration and +request handlers. + +Sessions should be cached when possible, because creating a new Session will +load all configuration values from the environment, and config files each time +the Session is created. Sharing the Session value across all of your service +clients will ensure the configuration is loaded the fewest number of times possible. + +Concurrency + +Sessions are safe to use concurrently as long as the Session is not being +modified. The SDK will not modify the Session once the Session has been created. +Creating service clients concurrently from a shared Session is safe. + +Sessions from Shared Config + +Sessions can be created using the method above that will only load the +additional config if the AWS_SDK_LOAD_CONFIG environment variable is set. +Alternatively you can explicitly create a Session with shared config enabled. +To do this you can use NewSessionWithOptions to configure how the Session will +be created. Using the NewSessionWithOptions with SharedConfigState set to +SharedConfigEnable will create the session as if the AWS_SDK_LOAD_CONFIG +environment variable was set. + +Creating Sessions + +When creating Sessions optional aws.Config values can be passed in that will +override the default, or loaded config values the Session is being created +with. This allows you to provide additional, or case based, configuration +as needed. + +By default NewSession will only load credentials from the shared credentials +file (~/.aws/credentials). If the AWS_SDK_LOAD_CONFIG environment variable is +set to a truthy value the Session will be created from the configuration +values from the shared config (~/.aws/config) and shared credentials +(~/.aws/credentials) files. See the section Sessions from Shared Config for +more information. + +Create a Session with the default config and request handlers. With credentials +region, and profile loaded from the environment and shared config automatically. +Requires the AWS_PROFILE to be set, or "default" is used. + + // Create Session + sess := session.Must(session.NewSession()) + + // Create a Session with a custom region + sess := session.Must(session.NewSession(&aws.Config{ + Region: aws.String("us-east-1"), + })) + + // Create a S3 client instance from a session + sess := session.Must(session.NewSession()) + + svc := s3.New(sess) + +Create Session With Option Overrides + +In addition to NewSession, Sessions can be created using NewSessionWithOptions. +This func allows you to control and override how the Session will be created +through code instead of being driven by environment variables only. + +Use NewSessionWithOptions when you want to provide the config profile, or +override the shared config state (AWS_SDK_LOAD_CONFIG). + + // Equivalent to session.NewSession() + sess := session.Must(session.NewSessionWithOptions(session.Options{ + // Options + })) + + // Specify profile to load for the session's config + sess := session.Must(session.NewSessionWithOptions(session.Options{ + Profile: "profile_name", + })) + + // Specify profile for config and region for requests + sess := session.Must(session.NewSessionWithOptions(session.Options{ + Config: aws.Config{Region: aws.String("us-east-1")}, + Profile: "profile_name", + })) + + // Force enable Shared Config support + sess := session.Must(session.NewSessionWithOptions(session.Options{ + SharedConfigState: session.SharedConfigEnable, + })) + +Adding Handlers + +You can add handlers to a session for processing HTTP requests. All service +clients that use the session inherit the handlers. For example, the following +handler logs every request and its payload made by a service client: + + // Create a session, and add additional handlers for all service + // clients created with the Session to inherit. Adds logging handler. + sess := session.Must(session.NewSession()) + + sess.Handlers.Send.PushFront(func(r *request.Request) { + // Log every request made and its payload + logger.Println("Request: %s/%s, Payload: %s", + r.ClientInfo.ServiceName, r.Operation, r.Params) + }) + +Deprecated "New" function + +The New session function has been deprecated because it does not provide good +way to return errors that occur when loading the configuration files and values. +Because of this, NewSession was created so errors can be retrieved when +creating a session fails. + +Shared Config Fields + +By default the SDK will only load the shared credentials file's (~/.aws/credentials) +credentials values, and all other config is provided by the environment variables, +SDK defaults, and user provided aws.Config values. + +If the AWS_SDK_LOAD_CONFIG environment variable is set, or SharedConfigEnable +option is used to create the Session the full shared config values will be +loaded. This includes credentials, region, and support for assume role. In +addition the Session will load its configuration from both the shared config +file (~/.aws/config) and shared credentials file (~/.aws/credentials). Both +files have the same format. + +If both config files are present the configuration from both files will be +read. The Session will be created from configuration values from the shared +credentials file (~/.aws/credentials) over those in the shared config file (~/.aws/config). + +Credentials are the values the SDK should use for authenticating requests with +AWS Services. They arfrom a configuration file will need to include both +aws_access_key_id and aws_secret_access_key must be provided together in the +same file to be considered valid. The values will be ignored if not a complete +group. aws_session_token is an optional field that can be provided if both of +the other two fields are also provided. + + aws_access_key_id = AKID + aws_secret_access_key = SECRET + aws_session_token = TOKEN + +Assume Role values allow you to configure the SDK to assume an IAM role using +a set of credentials provided in a config file via the source_profile field. +Both "role_arn" and "source_profile" are required. The SDK supports assuming +a role with MFA token if the session option AssumeRoleTokenProvider +is set. + + role_arn = arn:aws:iam:::role/ + source_profile = profile_with_creds + external_id = 1234 + mfa_serial = + role_session_name = session_name + +Region is the region the SDK should use for looking up AWS service endpoints +and signing requests. + + region = us-east-1 + +Assume Role with MFA token + +To create a session with support for assuming an IAM role with MFA set the +session option AssumeRoleTokenProvider to a function that will prompt for the +MFA token code when the SDK assumes the role and refreshes the role's credentials. +This allows you to configure the SDK via the shared config to assumea role +with MFA tokens. + +In order for the SDK to assume a role with MFA the SharedConfigState +session option must be set to SharedConfigEnable, or AWS_SDK_LOAD_CONFIG +environment variable set. + +The shared configuration instructs the SDK to assume an IAM role with MFA +when the mfa_serial configuration field is set in the shared config +(~/.aws/config) or shared credentials (~/.aws/credentials) file. + +If mfa_serial is set in the configuration, the SDK will assume the role, and +the AssumeRoleTokenProvider session option is not set an an error will +be returned when creating the session. + + sess := session.Must(session.NewSessionWithOptions(session.Options{ + AssumeRoleTokenProvider: stscreds.StdinTokenProvider, + })) + + // Create service client value configured for credentials + // from assumed role. + svc := s3.New(sess) + +To setup assume role outside of a session see the stscrds.AssumeRoleProvider +documentation. + +Environment Variables + +When a Session is created several environment variables can be set to adjust +how the SDK functions, and what configuration data it loads when creating +Sessions. All environment values are optional, but some values like credentials +require multiple of the values to set or the partial values will be ignored. +All environment variable values are strings unless otherwise noted. + +Environment configuration values. If set both Access Key ID and Secret Access +Key must be provided. Session Token and optionally also be provided, but is +not required. + + # Access Key ID + AWS_ACCESS_KEY_ID=AKID + AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + + # Secret Access Key + AWS_SECRET_ACCESS_KEY=SECRET + AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + + # Session Token + AWS_SESSION_TOKEN=TOKEN + +Region value will instruct the SDK where to make service API requests to. If is +not provided in the environment the region must be provided before a service +client request is made. + + AWS_REGION=us-east-1 + + # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_REGION is not also set. + AWS_DEFAULT_REGION=us-east-1 + +Profile name the SDK should load use when loading shared config from the +configuration files. If not provided "default" will be used as the profile name. + + AWS_PROFILE=my_profile + + # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + # and AWS_PROFILE is not also set. + AWS_DEFAULT_PROFILE=my_profile + +SDK load config instructs the SDK to load the shared config in addition to +shared credentials. This also expands the configuration loaded so the shared +credentials will have parity with the shared config file. This also enables +Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE +env values as well. + + AWS_SDK_LOAD_CONFIG=1 + +Shared credentials file path can be set to instruct the SDK to use an alternative +file for the shared credentials. If not set the file will be loaded from +$HOME/.aws/credentials on Linux/Unix based systems, and +%USERPROFILE%\.aws\credentials on Windows. + + AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + +Shared config file path can be set to instruct the SDK to use an alternative +file for the shared config. If not set the file will be loaded from +$HOME/.aws/config on Linux/Unix based systems, and +%USERPROFILE%\.aws\config on Windows. + + AWS_CONFIG_FILE=$HOME/my_shared_config + +Path to a custom Credentials Authority (CA) bundle PEM file that the SDK +will use instead of the default system's root CA bundle. Use this only +if you want to replace the CA bundle the SDK uses for TLS requests. + + AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + +Enabling this option will attempt to merge the Transport into the SDK's HTTP +client. If the client's Transport is not a http.Transport an error will be +returned. If the Transport's TLS config is set this option will cause the SDK +to overwrite the Transport's TLS config's RootCAs value. If the CA bundle file +contains multiple certificates all of them will be loaded. + +The Session option CustomCABundle is also available when creating sessions +to also enable this feature. CustomCABundle session option field has priority +over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + +Setting a custom HTTPClient in the aws.Config options will override this setting. +To use this option and custom HTTP client, the HTTP client needs to be provided +when creating the session. Not the service client. +*/ +package session diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go new file mode 100644 index 0000000..e6278a7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -0,0 +1,208 @@ +package session + +import ( + "os" + "path/filepath" + "strconv" + + "github.com/aws/aws-sdk-go/aws/credentials" +) + +// envConfig is a collection of environment values the SDK will read +// setup config from. All environment values are optional. But some values +// such as credentials require multiple values to be complete or the values +// will be ignored. +type envConfig struct { + // Environment configuration values. If set both Access Key ID and Secret Access + // Key must be provided. Session Token and optionally also be provided, but is + // not required. + // + // # Access Key ID + // AWS_ACCESS_KEY_ID=AKID + // AWS_ACCESS_KEY=AKID # only read if AWS_ACCESS_KEY_ID is not set. + // + // # Secret Access Key + // AWS_SECRET_ACCESS_KEY=SECRET + // AWS_SECRET_KEY=SECRET=SECRET # only read if AWS_SECRET_ACCESS_KEY is not set. + // + // # Session Token + // AWS_SESSION_TOKEN=TOKEN + Creds credentials.Value + + // Region value will instruct the SDK where to make service API requests to. If is + // not provided in the environment the region must be provided before a service + // client request is made. + // + // AWS_REGION=us-east-1 + // + // # AWS_DEFAULT_REGION is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_REGION is not also set. + // AWS_DEFAULT_REGION=us-east-1 + Region string + + // Profile name the SDK should load use when loading shared configuration from the + // shared configuration files. If not provided "default" will be used as the + // profile name. + // + // AWS_PROFILE=my_profile + // + // # AWS_DEFAULT_PROFILE is only read if AWS_SDK_LOAD_CONFIG is also set, + // # and AWS_PROFILE is not also set. + // AWS_DEFAULT_PROFILE=my_profile + Profile string + + // SDK load config instructs the SDK to load the shared config in addition to + // shared credentials. This also expands the configuration loaded from the shared + // credentials to have parity with the shared config file. This also enables + // Region and Profile support for the AWS_DEFAULT_REGION and AWS_DEFAULT_PROFILE + // env values as well. + // + // AWS_SDK_LOAD_CONFIG=1 + EnableSharedConfig bool + + // Shared credentials file path can be set to instruct the SDK to use an alternate + // file for the shared credentials. If not set the file will be loaded from + // $HOME/.aws/credentials on Linux/Unix based systems, and + // %USERPROFILE%\.aws\credentials on Windows. + // + // AWS_SHARED_CREDENTIALS_FILE=$HOME/my_shared_credentials + SharedCredentialsFile string + + // Shared config file path can be set to instruct the SDK to use an alternate + // file for the shared config. If not set the file will be loaded from + // $HOME/.aws/config on Linux/Unix based systems, and + // %USERPROFILE%\.aws\config on Windows. + // + // AWS_CONFIG_FILE=$HOME/my_shared_config + SharedConfigFile string + + // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file + // that the SDK will use instead of the the system's root CA bundle. + // Only use this if you want to configure the SDK to use a custom set + // of CAs. + // + // Enabling this option will attempt to merge the Transport + // into the SDK's HTTP client. If the client's Transport is + // not a http.Transport an error will be returned. If the + // Transport's TLS config is set this option will cause the + // SDK to overwrite the Transport's TLS config's RootCAs value. + // + // Setting a custom HTTPClient in the aws.Config options will override this setting. + // To use this option and custom HTTP client, the HTTP client needs to be provided + // when creating the session. Not the service client. + // + // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle + CustomCABundle string +} + +var ( + credAccessEnvKey = []string{ + "AWS_ACCESS_KEY_ID", + "AWS_ACCESS_KEY", + } + credSecretEnvKey = []string{ + "AWS_SECRET_ACCESS_KEY", + "AWS_SECRET_KEY", + } + credSessionEnvKey = []string{ + "AWS_SESSION_TOKEN", + } + + regionEnvKeys = []string{ + "AWS_REGION", + "AWS_DEFAULT_REGION", // Only read if AWS_SDK_LOAD_CONFIG is also set + } + profileEnvKeys = []string{ + "AWS_PROFILE", + "AWS_DEFAULT_PROFILE", // Only read if AWS_SDK_LOAD_CONFIG is also set + } +) + +// loadEnvConfig retrieves the SDK's environment configuration. +// See `envConfig` for the values that will be retrieved. +// +// If the environment variable `AWS_SDK_LOAD_CONFIG` is set to a truthy value +// the shared SDK config will be loaded in addition to the SDK's specific +// configuration values. +func loadEnvConfig() envConfig { + enableSharedConfig, _ := strconv.ParseBool(os.Getenv("AWS_SDK_LOAD_CONFIG")) + return envConfigLoad(enableSharedConfig) +} + +// loadEnvSharedConfig retrieves the SDK's environment configuration, and the +// SDK shared config. See `envConfig` for the values that will be retrieved. +// +// Loads the shared configuration in addition to the SDK's specific configuration. +// This will load the same values as `loadEnvConfig` if the `AWS_SDK_LOAD_CONFIG` +// environment variable is set. +func loadSharedEnvConfig() envConfig { + return envConfigLoad(true) +} + +func envConfigLoad(enableSharedConfig bool) envConfig { + cfg := envConfig{} + + cfg.EnableSharedConfig = enableSharedConfig + + setFromEnvVal(&cfg.Creds.AccessKeyID, credAccessEnvKey) + setFromEnvVal(&cfg.Creds.SecretAccessKey, credSecretEnvKey) + setFromEnvVal(&cfg.Creds.SessionToken, credSessionEnvKey) + + // Require logical grouping of credentials + if len(cfg.Creds.AccessKeyID) == 0 || len(cfg.Creds.SecretAccessKey) == 0 { + cfg.Creds = credentials.Value{} + } else { + cfg.Creds.ProviderName = "EnvConfigCredentials" + } + + regionKeys := regionEnvKeys + profileKeys := profileEnvKeys + if !cfg.EnableSharedConfig { + regionKeys = regionKeys[:1] + profileKeys = profileKeys[:1] + } + + setFromEnvVal(&cfg.Region, regionKeys) + setFromEnvVal(&cfg.Profile, profileKeys) + + cfg.SharedCredentialsFile = sharedCredentialsFilename() + cfg.SharedConfigFile = sharedConfigFilename() + + cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") + + return cfg +} + +func setFromEnvVal(dst *string, keys []string) { + for _, k := range keys { + if v := os.Getenv(k); len(v) > 0 { + *dst = v + break + } + } +} + +func sharedCredentialsFilename() string { + if name := os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); len(name) > 0 { + return name + } + + return filepath.Join(userHomeDir(), ".aws", "credentials") +} + +func sharedConfigFilename() string { + if name := os.Getenv("AWS_CONFIG_FILE"); len(name) > 0 { + return name + } + + return filepath.Join(userHomeDir(), ".aws", "config") +} + +func userHomeDir() string { + homeDir := os.Getenv("HOME") // *nix + if len(homeDir) == 0 { // windows + homeDir = os.Getenv("USERPROFILE") + } + + return homeDir +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go new file mode 100644 index 0000000..34a2920 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -0,0 +1,599 @@ +package session + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "io/ioutil" + "net/http" + "os" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/corehandlers" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/credentials/stscreds" + "github.com/aws/aws-sdk-go/aws/defaults" + "github.com/aws/aws-sdk-go/aws/endpoints" + "github.com/aws/aws-sdk-go/aws/request" +) + +// A Session provides a central location to create service clients from and +// store configurations and request handlers for those services. +// +// Sessions are safe to create service clients concurrently, but it is not safe +// to mutate the Session concurrently. +// +// The Session satisfies the service client's client.ClientConfigProvider. +type Session struct { + Config *aws.Config + Handlers request.Handlers +} + +// New creates a new instance of the handlers merging in the provided configs +// on top of the SDK's default configurations. Once the Session is created it +// can be mutated to modify the Config or Handlers. The Session is safe to be +// read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment is set to a truthy value, the New +// method could now encounter an error when loading the configuration. When +// The environment variable is set, and an error occurs, New will return a +// session that will fail all requests reporting the error that occurred while +// loading the session. Use NewSession to get the error when creating the +// session. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded, in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. +// +// Deprecated: Use NewSession functions to create sessions instead. NewSession +// has the same functionality as New except an error can be returned when the +// func is called instead of waiting to receive an error until a request is made. +func New(cfgs ...*aws.Config) *Session { + // load initial config from environment + envCfg := loadEnvConfig() + + if envCfg.EnableSharedConfig { + s, err := newSession(Options{}, envCfg, cfgs...) + if err != nil { + // Old session.New expected all errors to be discovered when + // a request is made, and would report the errors then. This + // needs to be replicated if an error occurs while creating + // the session. + msg := "failed to create session with AWS_SDK_LOAD_CONFIG enabled. " + + "Use session.NewSession to handle errors occurring during session creation." + + // Session creation failed, need to report the error and prevent + // any requests from succeeding. + s = &Session{Config: defaults.Config()} + s.Config.MergeIn(cfgs...) + s.Config.Logger.Log("ERROR:", msg, "Error:", err) + s.Handlers.Validate.PushBack(func(r *request.Request) { + r.Error = err + }) + } + return s + } + + return deprecatedNewSession(cfgs...) +} + +// NewSession returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. Once the Session is created +// it can be mutated to modify the Config or Handlers. The Session is safe to +// be read concurrently, but it should not be written to concurrently. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// See the NewSessionWithOptions func for information on how to override or +// control through code how the Session will be created. Such as specifying the +// config profile, and controlling if shared config is enabled or not. +func NewSession(cfgs ...*aws.Config) (*Session, error) { + opts := Options{} + opts.Config.MergeIn(cfgs...) + + return NewSessionWithOptions(opts) +} + +// SharedConfigState provides the ability to optionally override the state +// of the session's creation based on the shared config being enabled or +// disabled. +type SharedConfigState int + +const ( + // SharedConfigStateFromEnv does not override any state of the + // AWS_SDK_LOAD_CONFIG env var. It is the default value of the + // SharedConfigState type. + SharedConfigStateFromEnv SharedConfigState = iota + + // SharedConfigDisable overrides the AWS_SDK_LOAD_CONFIG env var value + // and disables the shared config functionality. + SharedConfigDisable + + // SharedConfigEnable overrides the AWS_SDK_LOAD_CONFIG env var value + // and enables the shared config functionality. + SharedConfigEnable +) + +// Options provides the means to control how a Session is created and what +// configuration values will be loaded. +// +type Options struct { + // Provides config values for the SDK to use when creating service clients + // and making API requests to services. Any value set in with this field + // will override the associated value provided by the SDK defaults, + // environment or config files where relevant. + // + // If not set, configuration values from from SDK defaults, environment, + // config will be used. + Config aws.Config + + // Overrides the config profile the Session should be created from. If not + // set the value of the environment variable will be loaded (AWS_PROFILE, + // or AWS_DEFAULT_PROFILE if the Shared Config is enabled). + // + // If not set and environment variables are not set the "default" + // (DefaultSharedConfigProfile) will be used as the profile to load the + // session config from. + Profile string + + // Instructs how the Session will be created based on the AWS_SDK_LOAD_CONFIG + // environment variable. By default a Session will be created using the + // value provided by the AWS_SDK_LOAD_CONFIG environment variable. + // + // Setting this value to SharedConfigEnable or SharedConfigDisable + // will allow you to override the AWS_SDK_LOAD_CONFIG environment variable + // and enable or disable the shared config functionality. + SharedConfigState SharedConfigState + + // Ordered list of files the session will load configuration from. + // It will override environment variable AWS_SHARED_CREDENTIALS_FILE, AWS_CONFIG_FILE. + SharedConfigFiles []string + + // When the SDK's shared config is configured to assume a role with MFA + // this option is required in order to provide the mechanism that will + // retrieve the MFA token. There is no default value for this field. If + // it is not set an error will be returned when creating the session. + // + // This token provider will be called when ever the assumed role's + // credentials need to be refreshed. Within the context of service clients + // all sharing the same session the SDK will ensure calls to the token + // provider are atomic. When sharing a token provider across multiple + // sessions additional synchronization logic is needed to ensure the + // token providers do not introduce race conditions. It is recommend to + // share the session where possible. + // + // stscreds.StdinTokenProvider is a basic implementation that will prompt + // from stdin for the MFA token code. + // + // This field is only used if the shared configuration is enabled, and + // the config enables assume role wit MFA via the mfa_serial field. + AssumeRoleTokenProvider func() (string, error) + + // Reader for a custom Credentials Authority (CA) bundle in PEM format that + // the SDK will use instead of the default system's root CA bundle. Use this + // only if you want to replace the CA bundle the SDK uses for TLS requests. + // + // Enabling this option will attempt to merge the Transport into the SDK's HTTP + // client. If the client's Transport is not a http.Transport an error will be + // returned. If the Transport's TLS config is set this option will cause the SDK + // to overwrite the Transport's TLS config's RootCAs value. If the CA + // bundle reader contains multiple certificates all of them will be loaded. + // + // The Session option CustomCABundle is also available when creating sessions + // to also enable this feature. CustomCABundle session option field has priority + // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + CustomCABundle io.Reader +} + +// NewSessionWithOptions returns a new Session created from SDK defaults, config files, +// environment, and user provided config files. This func uses the Options +// values to configure how the Session is created. +// +// If the AWS_SDK_LOAD_CONFIG environment variable is set to a truthy value +// the shared config file (~/.aws/config) will also be loaded in addition to +// the shared credentials file (~/.aws/credentials). Values set in both the +// shared config, and shared credentials will be taken from the shared +// credentials file. Enabling the Shared Config will also allow the Session +// to be built with retrieving credentials with AssumeRole set in the config. +// +// // Equivalent to session.New +// sess := session.Must(session.NewSessionWithOptions(session.Options{})) +// +// // Specify profile to load for the session's config +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Profile: "profile_name", +// })) +// +// // Specify profile for config and region for requests +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// Config: aws.Config{Region: aws.String("us-east-1")}, +// Profile: "profile_name", +// })) +// +// // Force enable Shared Config support +// sess := session.Must(session.NewSessionWithOptions(session.Options{ +// SharedConfigState: session.SharedConfigEnable, +// })) +func NewSessionWithOptions(opts Options) (*Session, error) { + var envCfg envConfig + if opts.SharedConfigState == SharedConfigEnable { + envCfg = loadSharedEnvConfig() + } else { + envCfg = loadEnvConfig() + } + + if len(opts.Profile) > 0 { + envCfg.Profile = opts.Profile + } + + switch opts.SharedConfigState { + case SharedConfigDisable: + envCfg.EnableSharedConfig = false + case SharedConfigEnable: + envCfg.EnableSharedConfig = true + } + + // Only use AWS_CA_BUNDLE if session option is not provided. + if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { + f, err := os.Open(envCfg.CustomCABundle) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to open custom CA bundle PEM file", err) + } + defer f.Close() + opts.CustomCABundle = f + } + + return newSession(opts, envCfg, &opts.Config) +} + +// Must is a helper function to ensure the Session is valid and there was no +// error when calling a NewSession function. +// +// This helper is intended to be used in variable initialization to load the +// Session and configuration at startup. Such as: +// +// var sess = session.Must(session.NewSession()) +func Must(sess *Session, err error) *Session { + if err != nil { + panic(err) + } + + return sess +} + +func deprecatedNewSession(cfgs ...*aws.Config) *Session { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + if cfg.EndpointResolver == nil { + // An endpoint resolver is required for a session to be able to provide + // endpoints for service client configurations. + cfg.EndpointResolver = endpoints.DefaultResolver() + } + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + return s +} + +func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, error) { + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Get a merged version of the user provided config to determine if + // credentials were. + userCfg := &aws.Config{} + userCfg.MergeIn(cfgs...) + + // Ordered config files will be loaded in with later files overwriting + // previous config file values. + var cfgFiles []string + if opts.SharedConfigFiles != nil { + cfgFiles = opts.SharedConfigFiles + } else { + cfgFiles = []string{envCfg.SharedConfigFile, envCfg.SharedCredentialsFile} + if !envCfg.EnableSharedConfig { + // The shared config file (~/.aws/config) is only loaded if instructed + // to load via the envConfig.EnableSharedConfig (AWS_SDK_LOAD_CONFIG). + cfgFiles = cfgFiles[1:] + } + } + + // Load additional config from file(s) + sharedCfg, err := loadSharedConfig(envCfg.Profile, cfgFiles) + if err != nil { + return nil, err + } + + if err := mergeConfigSrcs(cfg, userCfg, envCfg, sharedCfg, handlers, opts); err != nil { + return nil, err + } + + s := &Session{ + Config: cfg, + Handlers: handlers, + } + + initHandlers(s) + + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil { + return nil, err + } + } + + return s, nil +} + +func loadCustomCABundle(s *Session, bundle io.Reader) error { + var t *http.Transport + switch v := s.Config.HTTPClient.Transport.(type) { + case *http.Transport: + t = v + default: + if s.Config.HTTPClient.Transport != nil { + return awserr.New("LoadCustomCABundleError", + "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil) + } + } + if t == nil { + t = &http.Transport{} + } + + p, err := loadCertPool(bundle) + if err != nil { + return err + } + if t.TLSClientConfig == nil { + t.TLSClientConfig = &tls.Config{} + } + t.TLSClientConfig.RootCAs = p + + s.Config.HTTPClient.Transport = t + + return nil +} + +func loadCertPool(r io.Reader) (*x509.CertPool, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, awserr.New("LoadCustomCABundleError", + "failed to read custom CA bundle PEM file", err) + } + + p := x509.NewCertPool() + if !p.AppendCertsFromPEM(b) { + return nil, awserr.New("LoadCustomCABundleError", + "failed to load custom CA bundle PEM file", err) + } + + return p, nil +} + +func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, sessOpts Options) error { + // Merge in user provided configuration + cfg.MergeIn(userCfg) + + // Region if not already set by user + if len(aws.StringValue(cfg.Region)) == 0 { + if len(envCfg.Region) > 0 { + cfg.WithRegion(envCfg.Region) + } else if envCfg.EnableSharedConfig && len(sharedCfg.Region) > 0 { + cfg.WithRegion(sharedCfg.Region) + } + } + + // Configure credentials if not already set + if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil { + if len(envCfg.Creds.AccessKeyID) > 0 { + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + envCfg.Creds, + ) + } else if envCfg.EnableSharedConfig && len(sharedCfg.AssumeRole.RoleARN) > 0 && sharedCfg.AssumeRoleSource != nil { + cfgCp := *cfg + cfgCp.Credentials = credentials.NewStaticCredentialsFromCreds( + sharedCfg.AssumeRoleSource.Creds, + ) + if len(sharedCfg.AssumeRole.MFASerial) > 0 && sessOpts.AssumeRoleTokenProvider == nil { + // AssumeRole Token provider is required if doing Assume Role + // with MFA. + return AssumeRoleTokenProviderNotSetError{} + } + cfg.Credentials = stscreds.NewCredentials( + &Session{ + Config: &cfgCp, + Handlers: handlers.Copy(), + }, + sharedCfg.AssumeRole.RoleARN, + func(opt *stscreds.AssumeRoleProvider) { + opt.RoleSessionName = sharedCfg.AssumeRole.RoleSessionName + + // Assume role with external ID + if len(sharedCfg.AssumeRole.ExternalID) > 0 { + opt.ExternalID = aws.String(sharedCfg.AssumeRole.ExternalID) + } + + // Assume role with MFA + if len(sharedCfg.AssumeRole.MFASerial) > 0 { + opt.SerialNumber = aws.String(sharedCfg.AssumeRole.MFASerial) + opt.TokenProvider = sessOpts.AssumeRoleTokenProvider + } + }, + ) + } else if len(sharedCfg.Creds.AccessKeyID) > 0 { + cfg.Credentials = credentials.NewStaticCredentialsFromCreds( + sharedCfg.Creds, + ) + } else { + // Fallback to default credentials provider, include mock errors + // for the credential chain so user can identify why credentials + // failed to be retrieved. + cfg.Credentials = credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ + &credProviderError{Err: awserr.New("EnvAccessKeyNotFound", "failed to find credentials in the environment.", nil)}, + &credProviderError{Err: awserr.New("SharedCredsLoad", fmt.Sprintf("failed to load profile, %s.", envCfg.Profile), nil)}, + defaults.RemoteCredProvider(*cfg, handlers), + }, + }) + } + } + + return nil +} + +// AssumeRoleTokenProviderNotSetError is an error returned when creating a session when the +// MFAToken option is not set when shared config is configured load assume a +// role with an MFA token. +type AssumeRoleTokenProviderNotSetError struct{} + +// Code is the short id of the error. +func (e AssumeRoleTokenProviderNotSetError) Code() string { + return "AssumeRoleTokenProviderNotSetError" +} + +// Message is the description of the error +func (e AssumeRoleTokenProviderNotSetError) Message() string { + return fmt.Sprintf("assume role with MFA enabled, but AssumeRoleTokenProvider session option not set.") +} + +// OrigErr is the underlying error that caused the failure. +func (e AssumeRoleTokenProviderNotSetError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e AssumeRoleTokenProviderNotSetError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} + +type credProviderError struct { + Err error +} + +var emptyCreds = credentials.Value{} + +func (c credProviderError) Retrieve() (credentials.Value, error) { + return credentials.Value{}, c.Err +} +func (c credProviderError) IsExpired() bool { + return true +} + +func initHandlers(s *Session) { + // Add the Validate parameter handler if it is not disabled. + s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) + if !aws.BoolValue(s.Config.DisableParamValidation) { + s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) + } +} + +// Copy creates and returns a copy of the current Session, coping the config +// and handlers. If any additional configs are provided they will be merged +// on top of the Session's copied config. +// +// // Create a copy of the current Session, configured for the us-west-2 region. +// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) +func (s *Session) Copy(cfgs ...*aws.Config) *Session { + newSession := &Session{ + Config: s.Config.Copy(cfgs...), + Handlers: s.Handlers.Copy(), + } + + initHandlers(newSession) + + return newSession +} + +// ClientConfig satisfies the client.ConfigProvider interface and is used to +// configure the service client instances. Passing the Session to the service +// client's constructor (New) will use this method to configure the client. +func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { + // Backwards compatibility, the error will be eaten if user calls ClientConfig + // directly. All SDK services will use ClientconfigWithError. + cfg, _ := s.clientConfigWithErr(serviceName, cfgs...) + + return cfg +} + +func (s *Session) clientConfigWithErr(serviceName string, cfgs ...*aws.Config) (client.Config, error) { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + var err error + + region := aws.StringValue(s.Config.Region) + + if endpoint := aws.StringValue(s.Config.Endpoint); len(endpoint) != 0 { + resolved.URL = endpoints.AddScheme(endpoint, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = region + } else { + resolved, err = s.Config.EndpointResolver.EndpointFor( + serviceName, region, + func(opt *endpoints.Options) { + opt.DisableSSL = aws.BoolValue(s.Config.DisableSSL) + opt.UseDualStack = aws.BoolValue(s.Config.UseDualStack) + + // Support the condition where the service is modeled but its + // endpoint metadata is not available. + opt.ResolveUnknownService = true + }, + ) + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningName: resolved.SigningName, + }, err +} + +// ClientConfigNoResolveEndpoint is the same as ClientConfig with the exception +// that the EndpointResolver will not be used to resolve the endpoint. The only +// endpoint set must come from the aws.Config.Endpoint field. +func (s *Session) ClientConfigNoResolveEndpoint(cfgs ...*aws.Config) client.Config { + s = s.Copy(cfgs...) + + var resolved endpoints.ResolvedEndpoint + + region := aws.StringValue(s.Config.Region) + + if ep := aws.StringValue(s.Config.Endpoint); len(ep) > 0 { + resolved.URL = endpoints.AddScheme(ep, aws.BoolValue(s.Config.DisableSSL)) + resolved.SigningRegion = region + } + + return client.Config{ + Config: s.Config, + Handlers: s.Handlers, + Endpoint: resolved.URL, + SigningRegion: resolved.SigningRegion, + SigningName: resolved.SigningName, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go new file mode 100644 index 0000000..b58076f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -0,0 +1,295 @@ +package session + +import ( + "fmt" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/go-ini/ini" +) + +const ( + // Static Credentials group + accessKeyIDKey = `aws_access_key_id` // group required + secretAccessKey = `aws_secret_access_key` // group required + sessionTokenKey = `aws_session_token` // optional + + // Assume Role Credentials group + roleArnKey = `role_arn` // group required + sourceProfileKey = `source_profile` // group required + externalIDKey = `external_id` // optional + mfaSerialKey = `mfa_serial` // optional + roleSessionNameKey = `role_session_name` // optional + + // Additional Config fields + regionKey = `region` + + // DefaultSharedConfigProfile is the default profile to be used when + // loading configuration from the config files if another profile name + // is not provided. + DefaultSharedConfigProfile = `default` +) + +type assumeRoleConfig struct { + RoleARN string + SourceProfile string + ExternalID string + MFASerial string + RoleSessionName string +} + +// sharedConfig represents the configuration fields of the SDK config files. +type sharedConfig struct { + // Credentials values from the config file. Both aws_access_key_id + // and aws_secret_access_key must be provided together in the same file + // to be considered valid. The values will be ignored if not a complete group. + // aws_session_token is an optional field that can be provided if both of the + // other two fields are also provided. + // + // aws_access_key_id + // aws_secret_access_key + // aws_session_token + Creds credentials.Value + + AssumeRole assumeRoleConfig + AssumeRoleSource *sharedConfig + + // Region is the region the SDK should use for looking up AWS service endpoints + // and signing requests. + // + // region + Region string +} + +type sharedConfigFile struct { + Filename string + IniData *ini.File +} + +// loadSharedConfig retrieves the configuration from the list of files +// using the profile provided. The order the files are listed will determine +// precedence. Values in subsequent files will overwrite values defined in +// earlier files. +// +// For example, given two files A and B. Both define credentials. If the order +// of the files are A then B, B's credential values will be used instead of A's. +// +// See sharedConfig.setFromFile for information how the config files +// will be loaded. +func loadSharedConfig(profile string, filenames []string) (sharedConfig, error) { + if len(profile) == 0 { + profile = DefaultSharedConfigProfile + } + + files, err := loadSharedConfigIniFiles(filenames) + if err != nil { + return sharedConfig{}, err + } + + cfg := sharedConfig{} + if err = cfg.setFromIniFiles(profile, files); err != nil { + return sharedConfig{}, err + } + + if len(cfg.AssumeRole.SourceProfile) > 0 { + if err := cfg.setAssumeRoleSource(profile, files); err != nil { + return sharedConfig{}, err + } + } + + return cfg, nil +} + +func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) { + files := make([]sharedConfigFile, 0, len(filenames)) + + for _, filename := range filenames { + b, err := ioutil.ReadFile(filename) + if err != nil { + // Skip files which can't be opened and read for whatever reason + continue + } + + f, err := ini.Load(b) + if err != nil { + return nil, SharedConfigLoadError{Filename: filename} + } + + files = append(files, sharedConfigFile{ + Filename: filename, IniData: f, + }) + } + + return files, nil +} + +func (cfg *sharedConfig) setAssumeRoleSource(origProfile string, files []sharedConfigFile) error { + var assumeRoleSrc sharedConfig + + // Multiple level assume role chains are not support + if cfg.AssumeRole.SourceProfile == origProfile { + assumeRoleSrc = *cfg + assumeRoleSrc.AssumeRole = assumeRoleConfig{} + } else { + err := assumeRoleSrc.setFromIniFiles(cfg.AssumeRole.SourceProfile, files) + if err != nil { + return err + } + } + + if len(assumeRoleSrc.Creds.AccessKeyID) == 0 { + return SharedConfigAssumeRoleError{RoleARN: cfg.AssumeRole.RoleARN} + } + + cfg.AssumeRoleSource = &assumeRoleSrc + + return nil +} + +func (cfg *sharedConfig) setFromIniFiles(profile string, files []sharedConfigFile) error { + // Trim files from the list that don't exist. + for _, f := range files { + if err := cfg.setFromIniFile(profile, f); err != nil { + if _, ok := err.(SharedConfigProfileNotExistsError); ok { + // Ignore proviles missings + continue + } + return err + } + } + + return nil +} + +// setFromFile loads the configuration from the file using +// the profile provided. A sharedConfig pointer type value is used so that +// multiple config file loadings can be chained. +// +// Only loads complete logically grouped values, and will not set fields in cfg +// for incomplete grouped values in the config. Such as credentials. For example +// if a config file only includes aws_access_key_id but no aws_secret_access_key +// the aws_access_key_id will be ignored. +func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile) error { + section, err := file.IniData.GetSection(profile) + if err != nil { + // Fallback to to alternate profile name: profile + section, err = file.IniData.GetSection(fmt.Sprintf("profile %s", profile)) + if err != nil { + return SharedConfigProfileNotExistsError{Profile: profile, Err: err} + } + } + + // Shared Credentials + akid := section.Key(accessKeyIDKey).String() + secret := section.Key(secretAccessKey).String() + if len(akid) > 0 && len(secret) > 0 { + cfg.Creds = credentials.Value{ + AccessKeyID: akid, + SecretAccessKey: secret, + SessionToken: section.Key(sessionTokenKey).String(), + ProviderName: fmt.Sprintf("SharedConfigCredentials: %s", file.Filename), + } + } + + // Assume Role + roleArn := section.Key(roleArnKey).String() + srcProfile := section.Key(sourceProfileKey).String() + if len(roleArn) > 0 && len(srcProfile) > 0 { + cfg.AssumeRole = assumeRoleConfig{ + RoleARN: roleArn, + SourceProfile: srcProfile, + ExternalID: section.Key(externalIDKey).String(), + MFASerial: section.Key(mfaSerialKey).String(), + RoleSessionName: section.Key(roleSessionNameKey).String(), + } + } + + // Region + if v := section.Key(regionKey).String(); len(v) > 0 { + cfg.Region = v + } + + return nil +} + +// SharedConfigLoadError is an error for the shared config file failed to load. +type SharedConfigLoadError struct { + Filename string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigLoadError) Code() string { + return "SharedConfigLoadError" +} + +// Message is the description of the error +func (e SharedConfigLoadError) Message() string { + return fmt.Sprintf("failed to load config file, %s", e.Filename) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigLoadError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigLoadError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigProfileNotExistsError is an error for the shared config when +// the profile was not find in the config file. +type SharedConfigProfileNotExistsError struct { + Profile string + Err error +} + +// Code is the short id of the error. +func (e SharedConfigProfileNotExistsError) Code() string { + return "SharedConfigProfileNotExistsError" +} + +// Message is the description of the error +func (e SharedConfigProfileNotExistsError) Message() string { + return fmt.Sprintf("failed to get profile, %s", e.Profile) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigProfileNotExistsError) OrigErr() error { + return e.Err +} + +// Error satisfies the error interface. +func (e SharedConfigProfileNotExistsError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", e.Err) +} + +// SharedConfigAssumeRoleError is an error for the shared config when the +// profile contains assume role information, but that information is invalid +// or not complete. +type SharedConfigAssumeRoleError struct { + RoleARN string +} + +// Code is the short id of the error. +func (e SharedConfigAssumeRoleError) Code() string { + return "SharedConfigAssumeRoleError" +} + +// Message is the description of the error +func (e SharedConfigAssumeRoleError) Message() string { + return fmt.Sprintf("failed to load assume role for %s, source profile has no shared credentials", + e.RoleARN) +} + +// OrigErr is the underlying error that caused the failure. +func (e SharedConfigAssumeRoleError) OrigErr() error { + return nil +} + +// Error satisfies the error interface. +func (e SharedConfigAssumeRoleError) Error() string { + return awserr.SprintError(e.Code(), e.Message(), "", nil) +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go new file mode 100644 index 0000000..244c86d --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/header_rules.go @@ -0,0 +1,82 @@ +package v4 + +import ( + "net/http" + "strings" +) + +// validator houses a set of rule needed for validation of a +// string value +type rules []rule + +// rule interface allows for more flexible rules and just simply +// checks whether or not a value adheres to that rule +type rule interface { + IsValid(value string) bool +} + +// IsValid will iterate through all rules and see if any rules +// apply to the value and supports nested rules +func (r rules) IsValid(value string) bool { + for _, rule := range r { + if rule.IsValid(value) { + return true + } + } + return false +} + +// mapRule generic rule for maps +type mapRule map[string]struct{} + +// IsValid for the map rule satisfies whether it exists in the map +func (m mapRule) IsValid(value string) bool { + _, ok := m[value] + return ok +} + +// whitelist is a generic rule for whitelisting +type whitelist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (w whitelist) IsValid(value string) bool { + return w.rule.IsValid(value) +} + +// blacklist is a generic rule for blacklisting +type blacklist struct { + rule +} + +// IsValid for whitelist checks if the value is within the whitelist +func (b blacklist) IsValid(value string) bool { + return !b.rule.IsValid(value) +} + +type patterns []string + +// IsValid for patterns checks each pattern and returns if a match has +// been found +func (p patterns) IsValid(value string) bool { + for _, pattern := range p { + if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { + return true + } + } + return false +} + +// inclusiveRules rules allow for rules to depend on one another +type inclusiveRules []rule + +// IsValid will return true if all rules are true +func (r inclusiveRules) IsValid(value string) bool { + for _, rule := range r { + if !rule.IsValid(value) { + return false + } + } + return true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go new file mode 100644 index 0000000..6aa2ed2 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/options.go @@ -0,0 +1,7 @@ +package v4 + +// WithUnsignedPayload will enable and set the UnsignedPayload field to +// true of the signer. +func WithUnsignedPayload(v4 *Signer) { + v4.UnsignedPayload = true +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go new file mode 100644 index 0000000..bd082e9 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/uri_path.go @@ -0,0 +1,24 @@ +// +build go1.5 + +package v4 + +import ( + "net/url" + "strings" +) + +func getURIPath(u *url.URL) string { + var uri string + + if len(u.Opaque) > 0 { + uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/") + } else { + uri = u.EscapedPath() + } + + if len(uri) == 0 { + uri = "/" + } + + return uri +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go new file mode 100644 index 0000000..434ac87 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go @@ -0,0 +1,761 @@ +// Package v4 implements signing for AWS V4 signer +// +// Provides request signing for request that need to be signed with +// AWS V4 Signatures. +// +// Standalone Signer +// +// Generally using the signer outside of the SDK should not require any additional +// logic when using Go v1.5 or higher. The signer does this by taking advantage +// of the URL.EscapedPath method. If your request URI requires additional escaping +// you many need to use the URL.Opaque to define what the raw URI should be sent +// to the service as. +// +// The signer will first check the URL.Opaque field, and use its value if set. +// The signer does require the URL.Opaque field to be set in the form of: +// +// "///" +// +// // e.g. +// "//example.com/some/path" +// +// The leading "//" and hostname are required or the URL.Opaque escaping will +// not work correctly. +// +// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath() +// method and using the returned value. If you're using Go v1.4 you must set +// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with +// Go v1.5 the signer will fallback to URL.Path. +// +// AWS v4 signature validation requires that the canonical string's URI path +// element must be the URI escaped form of the HTTP request's path. +// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html +// +// The Go HTTP client will perform escaping automatically on the request. Some +// of these escaping may cause signature validation errors because the HTTP +// request differs from the URI path or query that the signature was generated. +// https://golang.org/pkg/net/url/#URL.EscapedPath +// +// Because of this, it is recommended that when using the signer outside of the +// SDK that explicitly escaping the request prior to being signed is preferable, +// and will help prevent signature validation errors. This can be done by setting +// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then +// call URL.EscapedPath() if Opaque is not set. +// +// If signing a request intended for HTTP2 server, and you're using Go 1.6.2 +// through 1.7.4 you should use the URL.RawPath as the pre-escaped form of the +// request URL. https://github.com/golang/go/issues/16847 points to a bug in +// Go pre 1.8 that failes to make HTTP2 requests using absolute URL in the HTTP +// message. URL.Opaque generally will force Go to make requests with absolute URL. +// URL.RawPath does not do this, but RawPath must be a valid escaping of Path +// or url.EscapedPath will ignore the RawPath escaping. +// +// Test `TestStandaloneSign` provides a complete example of using the signer +// outside of the SDK and pre-escaping the URI path. +package v4 + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/rest" +) + +const ( + authHeaderPrefix = "AWS4-HMAC-SHA256" + timeFormat = "20060102T150405Z" + shortTimeFormat = "20060102" + + // emptyStringSHA256 is a SHA256 of an empty string + emptyStringSHA256 = `e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855` +) + +var ignoredHeaders = rules{ + blacklist{ + mapRule{ + "Authorization": struct{}{}, + "User-Agent": struct{}{}, + "X-Amzn-Trace-Id": struct{}{}, + }, + }, +} + +// requiredSignedHeaders is a whitelist for build canonical headers. +var requiredSignedHeaders = rules{ + whitelist{ + mapRule{ + "Cache-Control": struct{}{}, + "Content-Disposition": struct{}{}, + "Content-Encoding": struct{}{}, + "Content-Language": struct{}{}, + "Content-Md5": struct{}{}, + "Content-Type": struct{}{}, + "Expires": struct{}{}, + "If-Match": struct{}{}, + "If-Modified-Since": struct{}{}, + "If-None-Match": struct{}{}, + "If-Unmodified-Since": struct{}{}, + "Range": struct{}{}, + "X-Amz-Acl": struct{}{}, + "X-Amz-Copy-Source": struct{}{}, + "X-Amz-Copy-Source-If-Match": struct{}{}, + "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, + "X-Amz-Copy-Source-If-None-Match": struct{}{}, + "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, + "X-Amz-Copy-Source-Range": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Grant-Full-control": struct{}{}, + "X-Amz-Grant-Read": struct{}{}, + "X-Amz-Grant-Read-Acp": struct{}{}, + "X-Amz-Grant-Write": struct{}{}, + "X-Amz-Grant-Write-Acp": struct{}{}, + "X-Amz-Metadata-Directive": struct{}{}, + "X-Amz-Mfa": struct{}{}, + "X-Amz-Request-Payer": struct{}{}, + "X-Amz-Server-Side-Encryption": struct{}{}, + "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, + "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, + "X-Amz-Storage-Class": struct{}{}, + "X-Amz-Website-Redirect-Location": struct{}{}, + }, + }, + patterns{"X-Amz-Meta-"}, +} + +// allowedHoisting is a whitelist for build query headers. The boolean value +// represents whether or not it is a pattern. +var allowedQueryHoisting = inclusiveRules{ + blacklist{requiredSignedHeaders}, + patterns{"X-Amz-"}, +} + +// Signer applies AWS v4 signing to given request. Use this to sign requests +// that need to be signed with AWS V4 Signatures. +type Signer struct { + // The authentication credentials the request will be signed against. + // This value must be set to sign requests. + Credentials *credentials.Credentials + + // Sets the log level the signer should use when reporting information to + // the logger. If the logger is nil nothing will be logged. See + // aws.LogLevelType for more information on available logging levels + // + // By default nothing will be logged. + Debug aws.LogLevelType + + // The logger loging information will be written to. If there the logger + // is nil, nothing will be logged. + Logger aws.Logger + + // Disables the Signer's moving HTTP header key/value pairs from the HTTP + // request header to the request's query string. This is most commonly used + // with pre-signed requests preventing headers from being added to the + // request's query string. + DisableHeaderHoisting bool + + // Disables the automatic escaping of the URI path of the request for the + // siganture's canonical string's path. For services that do not need additional + // escaping then use this to disable the signer escaping the path. + // + // S3 is an example of a service that does not need additional escaping. + // + // http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html + DisableURIPathEscaping bool + + // Disales the automatical setting of the HTTP request's Body field with the + // io.ReadSeeker passed in to the signer. This is useful if you're using a + // custom wrapper around the body for the io.ReadSeeker and want to preserve + // the Body value on the Request.Body. + // + // This does run the risk of signing a request with a body that will not be + // sent in the request. Need to ensure that the underlying data of the Body + // values are the same. + DisableRequestBodyOverwrite bool + + // currentTimeFn returns the time value which represents the current time. + // This value should only be used for testing. If it is nil the default + // time.Now will be used. + currentTimeFn func() time.Time + + // UnsignedPayload will prevent signing of the payload. This will only + // work for services that have support for this. + UnsignedPayload bool +} + +// NewSigner returns a Signer pointer configured with the credentials and optional +// option values provided. If not options are provided the Signer will use its +// default configuration. +func NewSigner(credentials *credentials.Credentials, options ...func(*Signer)) *Signer { + v4 := &Signer{ + Credentials: credentials, + } + + for _, option := range options { + option(v4) + } + + return v4 +} + +type signingCtx struct { + ServiceName string + Region string + Request *http.Request + Body io.ReadSeeker + Query url.Values + Time time.Time + ExpireTime time.Duration + SignedHeaderVals http.Header + + DisableURIPathEscaping bool + + credValues credentials.Value + isPresign bool + formattedTime string + formattedShortTime string + unsignedPayload bool + + bodyDigest string + signedHeaders string + canonicalHeaders string + canonicalString string + credentialString string + stringToSign string + signature string + authorization string +} + +// Sign signs AWS v4 requests with the provided body, service name, region the +// request is made to, and time the request is signed at. The signTime allows +// you to specify that a request is signed for the future, and cannot be +// used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. Generally for signed requests this value +// is not needed as the full request context will be captured by the http.Request +// value. It is included for reference though. +// +// Sign will set the request's Body to be the `body` parameter passed in. If +// the body is not already an io.ReadCloser, it will be wrapped within one. If +// a `nil` body parameter passed to Sign, the request's Body field will be +// also set to nil. Its important to note that this functionality will not +// change the request's ContentLength of the request. +// +// Sign differs from Presign in that it will sign the request using HTTP +// header values. This type of signing is intended for http.Request values that +// will not be shared, or are shared in a way the header values on the request +// will not be lost. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +func (v4 Signer) Sign(r *http.Request, body io.ReadSeeker, service, region string, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, 0, signTime) +} + +// Presign signs AWS v4 requests with the provided body, service name, region +// the request is made to, and time the request is signed at. The signTime +// allows you to specify that a request is signed for the future, and cannot +// be used until then. +// +// Returns a list of HTTP headers that were included in the signature or an +// error if signing the request failed. For presigned requests these headers +// and their values must be included on the HTTP request when it is made. This +// is helpful to know what header values need to be shared with the party the +// presigned request will be distributed to. +// +// Presign differs from Sign in that it will sign the request using query string +// instead of header values. This allows you to share the Presigned Request's +// URL with third parties, or distribute it throughout your system with minimal +// dependencies. +// +// Presign also takes an exp value which is the duration the +// signed request will be valid after the signing time. This is allows you to +// set when the request will expire. +// +// The requests body is an io.ReadSeeker so the SHA256 of the body can be +// generated. To bypass the signer computing the hash you can set the +// "X-Amz-Content-Sha256" header with a precomputed value. The signer will +// only compute the hash if the request header value is empty. +// +// Presigning a S3 request will not compute the body's SHA256 hash by default. +// This is done due to the general use case for S3 presigned URLs is to share +// PUT/GET capabilities. If you would like to include the body's SHA256 in the +// presigned request's signature you can set the "X-Amz-Content-Sha256" +// HTTP header and that will be included in the request's signature. +func (v4 Signer) Presign(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + return v4.signWithBody(r, body, service, region, exp, signTime) +} + +func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, region string, exp time.Duration, signTime time.Time) (http.Header, error) { + currentTimeFn := v4.currentTimeFn + if currentTimeFn == nil { + currentTimeFn = time.Now + } + + ctx := &signingCtx{ + Request: r, + Body: body, + Query: r.URL.Query(), + Time: signTime, + ExpireTime: exp, + isPresign: exp != 0, + ServiceName: service, + Region: region, + DisableURIPathEscaping: v4.DisableURIPathEscaping, + unsignedPayload: v4.UnsignedPayload, + } + + for key := range ctx.Query { + sort.Strings(ctx.Query[key]) + } + + if ctx.isRequestSigned() { + ctx.Time = currentTimeFn() + ctx.handlePresignRemoval() + } + + var err error + ctx.credValues, err = v4.Credentials.Get() + if err != nil { + return http.Header{}, err + } + + ctx.assignAmzQueryValues() + ctx.build(v4.DisableHeaderHoisting) + + // If the request is not presigned the body should be attached to it. This + // prevents the confusion of wanting to send a signed request without + // the body the request was signed for attached. + if !(v4.DisableRequestBodyOverwrite || ctx.isPresign) { + var reader io.ReadCloser + if body != nil { + var ok bool + if reader, ok = body.(io.ReadCloser); !ok { + reader = ioutil.NopCloser(body) + } + } + r.Body = reader + } + + if v4.Debug.Matches(aws.LogDebugWithSigning) { + v4.logSigningInfo(ctx) + } + + return ctx.SignedHeaderVals, nil +} + +func (ctx *signingCtx) handlePresignRemoval() { + if !ctx.isPresign { + return + } + + // The credentials have expired for this request. The current signing + // is invalid, and needs to be request because the request will fail. + ctx.removePresign() + + // Update the request's query string to ensure the values stays in + // sync in the case retrieving the new credentials fails. + ctx.Request.URL.RawQuery = ctx.Query.Encode() +} + +func (ctx *signingCtx) assignAmzQueryValues() { + if ctx.isPresign { + ctx.Query.Set("X-Amz-Algorithm", authHeaderPrefix) + if ctx.credValues.SessionToken != "" { + ctx.Query.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } else { + ctx.Query.Del("X-Amz-Security-Token") + } + + return + } + + if ctx.credValues.SessionToken != "" { + ctx.Request.Header.Set("X-Amz-Security-Token", ctx.credValues.SessionToken) + } +} + +// SignRequestHandler is a named request handler the SDK will use to sign +// service client request with using the V4 signature. +var SignRequestHandler = request.NamedHandler{ + Name: "v4.SignRequestHandler", Fn: SignSDKRequest, +} + +// SignSDKRequest signs an AWS request with the V4 signature. This +// request handler is bested used only with the SDK's built in service client's +// API operation requests. +// +// This function should not be used on its on its own, but in conjunction with +// an AWS service client's API operation call. To sign a standalone request +// not created by a service client's API operation method use the "Sign" or +// "Presign" functions of the "Signer" type. +// +// If the credentials of the request's config are set to +// credentials.AnonymousCredentials the request will not be signed. +func SignSDKRequest(req *request.Request) { + signSDKRequestWithCurrTime(req, time.Now) +} + +// BuildNamedHandler will build a generic handler for signing. +func BuildNamedHandler(name string, opts ...func(*Signer)) request.NamedHandler { + return request.NamedHandler{ + Name: name, + Fn: func(req *request.Request) { + signSDKRequestWithCurrTime(req, time.Now, opts...) + }, + } +} + +func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time, opts ...func(*Signer)) { + // If the request does not need to be signed ignore the signing of the + // request if the AnonymousCredentials object is used. + if req.Config.Credentials == credentials.AnonymousCredentials { + return + } + + region := req.ClientInfo.SigningRegion + if region == "" { + region = aws.StringValue(req.Config.Region) + } + + name := req.ClientInfo.SigningName + if name == "" { + name = req.ClientInfo.ServiceName + } + + v4 := NewSigner(req.Config.Credentials, func(v4 *Signer) { + v4.Debug = req.Config.LogLevel.Value() + v4.Logger = req.Config.Logger + v4.DisableHeaderHoisting = req.NotHoist + v4.currentTimeFn = curTimeFn + if name == "s3" { + // S3 service should not have any escaping applied + v4.DisableURIPathEscaping = true + } + // Prevents setting the HTTPRequest's Body. Since the Body could be + // wrapped in a custom io.Closer that we do not want to be stompped + // on top of by the signer. + v4.DisableRequestBodyOverwrite = true + }) + + for _, opt := range opts { + opt(v4) + } + + signingTime := req.Time + if !req.LastSignedAt.IsZero() { + signingTime = req.LastSignedAt + } + + signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(), + name, region, req.ExpireTime, signingTime, + ) + if err != nil { + req.Error = err + req.SignedHeaderVals = nil + return + } + + req.SignedHeaderVals = signedHeaders + req.LastSignedAt = curTimeFn() +} + +const logSignInfoMsg = `DEBUG: Request Signature: +---[ CANONICAL STRING ]----------------------------- +%s +---[ STRING TO SIGN ]-------------------------------- +%s%s +-----------------------------------------------------` +const logSignedURLMsg = ` +---[ SIGNED URL ]------------------------------------ +%s` + +func (v4 *Signer) logSigningInfo(ctx *signingCtx) { + signedURLMsg := "" + if ctx.isPresign { + signedURLMsg = fmt.Sprintf(logSignedURLMsg, ctx.Request.URL.String()) + } + msg := fmt.Sprintf(logSignInfoMsg, ctx.canonicalString, ctx.stringToSign, signedURLMsg) + v4.Logger.Log(msg) +} + +func (ctx *signingCtx) build(disableHeaderHoisting bool) { + ctx.buildTime() // no depends + ctx.buildCredentialString() // no depends + + unsignedHeaders := ctx.Request.Header + if ctx.isPresign { + if !disableHeaderHoisting { + urlValues := url.Values{} + urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends + for k := range urlValues { + ctx.Query[k] = urlValues[k] + } + } + } + + ctx.buildBodyDigest() + ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) + ctx.buildCanonicalString() // depends on canon headers / signed headers + ctx.buildStringToSign() // depends on canon string + ctx.buildSignature() // depends on string to sign + + if ctx.isPresign { + ctx.Request.URL.RawQuery += "&X-Amz-Signature=" + ctx.signature + } else { + parts := []string{ + authHeaderPrefix + " Credential=" + ctx.credValues.AccessKeyID + "/" + ctx.credentialString, + "SignedHeaders=" + ctx.signedHeaders, + "Signature=" + ctx.signature, + } + ctx.Request.Header.Set("Authorization", strings.Join(parts, ", ")) + } +} + +func (ctx *signingCtx) buildTime() { + ctx.formattedTime = ctx.Time.UTC().Format(timeFormat) + ctx.formattedShortTime = ctx.Time.UTC().Format(shortTimeFormat) + + if ctx.isPresign { + duration := int64(ctx.ExpireTime / time.Second) + ctx.Query.Set("X-Amz-Date", ctx.formattedTime) + ctx.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) + } else { + ctx.Request.Header.Set("X-Amz-Date", ctx.formattedTime) + } +} + +func (ctx *signingCtx) buildCredentialString() { + ctx.credentialString = strings.Join([]string{ + ctx.formattedShortTime, + ctx.Region, + ctx.ServiceName, + "aws4_request", + }, "/") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-Credential", ctx.credValues.AccessKeyID+"/"+ctx.credentialString) + } +} + +func buildQuery(r rule, header http.Header) (url.Values, http.Header) { + query := url.Values{} + unsignedHeaders := http.Header{} + for k, h := range header { + if r.IsValid(k) { + query[k] = h + } else { + unsignedHeaders[k] = h + } + } + + return query, unsignedHeaders +} +func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) { + var headers []string + headers = append(headers, "host") + for k, v := range header { + canonicalKey := http.CanonicalHeaderKey(k) + if !r.IsValid(canonicalKey) { + continue // ignored header + } + if ctx.SignedHeaderVals == nil { + ctx.SignedHeaderVals = make(http.Header) + } + + lowerCaseKey := strings.ToLower(k) + if _, ok := ctx.SignedHeaderVals[lowerCaseKey]; ok { + // include additional values + ctx.SignedHeaderVals[lowerCaseKey] = append(ctx.SignedHeaderVals[lowerCaseKey], v...) + continue + } + + headers = append(headers, lowerCaseKey) + ctx.SignedHeaderVals[lowerCaseKey] = v + } + sort.Strings(headers) + + ctx.signedHeaders = strings.Join(headers, ";") + + if ctx.isPresign { + ctx.Query.Set("X-Amz-SignedHeaders", ctx.signedHeaders) + } + + headerValues := make([]string, len(headers)) + for i, k := range headers { + if k == "host" { + headerValues[i] = "host:" + ctx.Request.URL.Host + } else { + headerValues[i] = k + ":" + + strings.Join(ctx.SignedHeaderVals[k], ",") + } + } + + ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") +} + +func (ctx *signingCtx) buildCanonicalString() { + ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1) + + uri := getURIPath(ctx.Request.URL) + + if !ctx.DisableURIPathEscaping { + uri = rest.EscapePath(uri, false) + } + + ctx.canonicalString = strings.Join([]string{ + ctx.Request.Method, + uri, + ctx.Request.URL.RawQuery, + ctx.canonicalHeaders + "\n", + ctx.signedHeaders, + ctx.bodyDigest, + }, "\n") +} + +func (ctx *signingCtx) buildStringToSign() { + ctx.stringToSign = strings.Join([]string{ + authHeaderPrefix, + ctx.formattedTime, + ctx.credentialString, + hex.EncodeToString(makeSha256([]byte(ctx.canonicalString))), + }, "\n") +} + +func (ctx *signingCtx) buildSignature() { + secret := ctx.credValues.SecretAccessKey + date := makeHmac([]byte("AWS4"+secret), []byte(ctx.formattedShortTime)) + region := makeHmac(date, []byte(ctx.Region)) + service := makeHmac(region, []byte(ctx.ServiceName)) + credentials := makeHmac(service, []byte("aws4_request")) + signature := makeHmac(credentials, []byte(ctx.stringToSign)) + ctx.signature = hex.EncodeToString(signature) +} + +func (ctx *signingCtx) buildBodyDigest() { + hash := ctx.Request.Header.Get("X-Amz-Content-Sha256") + if hash == "" { + if ctx.unsignedPayload || (ctx.isPresign && ctx.ServiceName == "s3") { + hash = "UNSIGNED-PAYLOAD" + } else if ctx.Body == nil { + hash = emptyStringSHA256 + } else { + hash = hex.EncodeToString(makeSha256Reader(ctx.Body)) + } + if ctx.unsignedPayload || ctx.ServiceName == "s3" || ctx.ServiceName == "glacier" { + ctx.Request.Header.Set("X-Amz-Content-Sha256", hash) + } + } + ctx.bodyDigest = hash +} + +// isRequestSigned returns if the request is currently signed or presigned +func (ctx *signingCtx) isRequestSigned() bool { + if ctx.isPresign && ctx.Query.Get("X-Amz-Signature") != "" { + return true + } + if ctx.Request.Header.Get("Authorization") != "" { + return true + } + + return false +} + +// unsign removes signing flags for both signed and presigned requests. +func (ctx *signingCtx) removePresign() { + ctx.Query.Del("X-Amz-Algorithm") + ctx.Query.Del("X-Amz-Signature") + ctx.Query.Del("X-Amz-Security-Token") + ctx.Query.Del("X-Amz-Date") + ctx.Query.Del("X-Amz-Expires") + ctx.Query.Del("X-Amz-Credential") + ctx.Query.Del("X-Amz-SignedHeaders") +} + +func makeHmac(key []byte, data []byte) []byte { + hash := hmac.New(sha256.New, key) + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256(data []byte) []byte { + hash := sha256.New() + hash.Write(data) + return hash.Sum(nil) +} + +func makeSha256Reader(reader io.ReadSeeker) []byte { + hash := sha256.New() + start, _ := reader.Seek(0, 1) + defer reader.Seek(start, 0) + + io.Copy(hash, reader) + return hash.Sum(nil) +} + +const doubleSpaces = " " + +var doubleSpaceBytes = []byte(doubleSpaces) + +func stripExcessSpaces(headerVals []string) []string { + vals := make([]string, len(headerVals)) + for i, str := range headerVals { + // Trim leading and trailing spaces + trimmed := strings.TrimSpace(str) + + idx := strings.Index(trimmed, doubleSpaces) + var buf []byte + for idx > -1 { + // Multiple adjacent spaces found + if buf == nil { + // first time create the buffer + buf = []byte(trimmed) + } + + stripToIdx := -1 + for j := idx + 1; j < len(buf); j++ { + if buf[j] != ' ' { + buf = append(buf[:idx+1], buf[j:]...) + stripToIdx = j + break + } + } + + if stripToIdx >= 0 { + idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes) + if idx >= 0 { + idx += stripToIdx + } + } else { + idx = -1 + } + } + + if buf != nil { + vals[i] = string(buf) + } else { + vals[i] = trimmed + } + } + return vals +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/types.go b/vendor/github.com/aws/aws-sdk-go/aws/types.go new file mode 100644 index 0000000..0e2d864 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/types.go @@ -0,0 +1,118 @@ +package aws + +import ( + "io" + "sync" +) + +// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser. Should +// only be used with an io.Reader that is also an io.Seeker. Doing so may +// cause request signature errors, or request body's not sent for GET, HEAD +// and DELETE HTTP methods. +// +// Deprecated: Should only be used with io.ReadSeeker. If using for +// S3 PutObject to stream content use s3manager.Uploader instead. +func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { + return ReaderSeekerCloser{r} +} + +// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and +// io.Closer interfaces to the underlying object if they are available. +type ReaderSeekerCloser struct { + r io.Reader +} + +// Read reads from the reader up to size of p. The number of bytes read, and +// error if it occurred will be returned. +// +// If the reader is not an io.Reader zero bytes read, and nil error will be returned. +// +// Performs the same functionality as io.Reader Read +func (r ReaderSeekerCloser) Read(p []byte) (int, error) { + switch t := r.r.(type) { + case io.Reader: + return t.Read(p) + } + return 0, nil +} + +// Seek sets the offset for the next Read to offset, interpreted according to +// whence: 0 means relative to the origin of the file, 1 means relative to the +// current offset, and 2 means relative to the end. Seek returns the new offset +// and an error, if any. +// +// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. +func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { + switch t := r.r.(type) { + case io.Seeker: + return t.Seek(offset, whence) + } + return int64(0), nil +} + +// IsSeeker returns if the underlying reader is also a seeker. +func (r ReaderSeekerCloser) IsSeeker() bool { + _, ok := r.r.(io.Seeker) + return ok +} + +// Close closes the ReaderSeekerCloser. +// +// If the ReaderSeekerCloser is not an io.Closer nothing will be done. +func (r ReaderSeekerCloser) Close() error { + switch t := r.r.(type) { + case io.Closer: + return t.Close() + } + return nil +} + +// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface +// Can be used with the s3manager.Downloader to download content to a buffer +// in memory. Safe to use concurrently. +type WriteAtBuffer struct { + buf []byte + m sync.Mutex + + // GrowthCoeff defines the growth rate of the internal buffer. By + // default, the growth rate is 1, where expanding the internal + // buffer will allocate only enough capacity to fit the new expected + // length. + GrowthCoeff float64 +} + +// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer +// provided by buf. +func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { + return &WriteAtBuffer{buf: buf} +} + +// WriteAt writes a slice of bytes to a buffer starting at the position provided +// The number of bytes written will be returned, or error. Can overwrite previous +// written slices if the write ats overlap. +func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { + pLen := len(p) + expLen := pos + int64(pLen) + b.m.Lock() + defer b.m.Unlock() + if int64(len(b.buf)) < expLen { + if int64(cap(b.buf)) < expLen { + if b.GrowthCoeff < 1 { + b.GrowthCoeff = 1 + } + newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) + copy(newBuf, b.buf) + b.buf = newBuf + } + b.buf = b.buf[:expLen] + } + copy(b.buf[pos:], p) + return pLen, nil +} + +// Bytes returns a slice of bytes written to the buffer. +func (b *WriteAtBuffer) Bytes() []byte { + b.m.Lock() + defer b.m.Unlock() + return b.buf +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url.go b/vendor/github.com/aws/aws-sdk-go/aws/url.go new file mode 100644 index 0000000..6192b24 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url.go @@ -0,0 +1,12 @@ +// +build go1.8 + +package aws + +import "net/url" + +// URLHostname will extract the Hostname without port from the URL value. +// +// Wrapper of net/url#URL.Hostname for backwards Go version compatibility. +func URLHostname(url *url.URL) string { + return url.Hostname() +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go new file mode 100644 index 0000000..0210d27 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/url_1_7.go @@ -0,0 +1,29 @@ +// +build !go1.8 + +package aws + +import ( + "net/url" + "strings" +) + +// URLHostname will extract the Hostname without port from the URL value. +// +// Copy of Go 1.8's net/url#URL.Hostname functionality. +func URLHostname(url *url.URL) string { + return stripPort(url.Host) + +} + +// stripPort is copy of Go 1.8 url#URL.Hostname functionality. +// https://golang.org/src/net/url/url.go +func stripPort(hostport string) string { + colon := strings.IndexByte(hostport, ':') + if colon == -1 { + return hostport + } + if i := strings.IndexByte(hostport, ']'); i != -1 { + return strings.TrimPrefix(hostport[:i], "[") + } + return hostport[:colon] +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go similarity index 87% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go rename to vendor/github.com/aws/aws-sdk-go/aws/version.go index 51c3358..24acbc5 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "0.7.1" +const SDKVersion = "1.8.30" diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go new file mode 100644 index 0000000..53831df --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/idempotency.go @@ -0,0 +1,75 @@ +package protocol + +import ( + "crypto/rand" + "fmt" + "reflect" +) + +// RandReader is the random reader the protocol package will use to read +// random bytes from. This is exported for testing, and should not be used. +var RandReader = rand.Reader + +const idempotencyTokenFillTag = `idempotencyToken` + +// CanSetIdempotencyToken returns true if the struct field should be +// automatically populated with a Idempotency token. +// +// Only *string and string type fields that are tagged with idempotencyToken +// which are not already set can be auto filled. +func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { + switch u := v.Interface().(type) { + // To auto fill an Idempotency token the field must be a string, + // tagged for auto fill, and have a zero value. + case *string: + return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + case string: + return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 + } + + return false +} + +// GetIdempotencyToken returns a randomly generated idempotency token. +func GetIdempotencyToken() string { + b := make([]byte, 16) + RandReader.Read(b) + + return UUIDVersion4(b) +} + +// SetIdempotencyToken will set the value provided with a Idempotency Token. +// Given that the value can be set. Will panic if value is not setable. +func SetIdempotencyToken(v reflect.Value) { + if v.Kind() == reflect.Ptr { + if v.IsNil() && v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = reflect.Indirect(v) + + if !v.CanSet() { + panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) + } + + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + // TODO handle error + return + } + + v.Set(reflect.ValueOf(UUIDVersion4(b))) +} + +// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided +func UUIDVersion4(u []byte) string { + // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 + // 13th character is "4" + u[6] = (u[6] | 0x40) & 0x4F + // 17th character is "8", "9", "a", or "b" + u[8] = (u[8] | 0x80) & 0xBF + + return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go similarity index 55% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build.go rename to vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go index c4d8dd2..18169f0 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/build.go @@ -1,21 +1,24 @@ -// Package query provides serialisation of AWS query requests, and responses. +// Package query provides serialization of AWS query requests, and responses. package query -//go:generate go run ../../fixtures/protocol/generate.go ../../fixtures/protocol/input/query.json build_test.go +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go import ( "net/url" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/internal/protocol/query/queryutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query/queryutil" ) +// BuildHandler is a named request handler for building query protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.query.Build", Fn: Build} + // Build builds a request for an AWS Query service. -func Build(r *aws.Request) { +func Build(r *request.Request) { body := url.Values{ "Action": {r.Operation.Name}, - "Version": {r.Service.APIVersion}, + "Version": {r.ClientInfo.APIVersion}, } if err := queryutil.Parse(body, r.Params, false); err != nil { r.Error = awserr.New("SerializationError", "failed encoding Query request", err) diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go similarity index 90% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go rename to vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go index 3b417a8..524ca95 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/query/queryutil/queryutil.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/queryutil/queryutil.go @@ -9,6 +9,8 @@ import ( "strconv" "strings" "time" + + "github.com/aws/aws-sdk-go/private/protocol" ) // Parse parses an object i and fills a url.Values object. The isEC2 flag @@ -68,14 +70,22 @@ func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix stri t := value.Type() for i := 0; i < value.NumField(); i++ { - if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c { + elemValue := elemOf(value.Field(i)) + field := t.Field(i) + + if field.PkgPath != "" { continue // ignore unexported fields } + if field.Tag.Get("ignore") != "" { + continue + } + + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + elemValue = reflect.ValueOf(token) + } - value := elemOf(value.Field(i)) - field := t.Field(i) var name string - if q.isEC2 { name = field.Tag.Get("queryName") } @@ -97,7 +107,7 @@ func (q *queryParser) parseStruct(v url.Values, value reflect.Value, prefix stri name = prefix + "." + name } - if err := q.parseValue(v, value, name, field.Tag); err != nil { + if err := q.parseValue(v, elemValue, name, field.Tag); err != nil { return err } } @@ -113,7 +123,11 @@ func (q *queryParser) parseList(v url.Values, value reflect.Value, prefix string // check for unflattened list member if !q.isEC2 && tag.Get("flattened") == "" { - prefix += ".member" + if listName := tag.Get("locationNameList"); listName == "" { + prefix += ".member" + } else { + prefix += "." + listName + } } for i := 0; i < value.Len(); i++ { diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go new file mode 100644 index 0000000..e0f4d5a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal.go @@ -0,0 +1,35 @@ +package query + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go + +import ( + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// UnmarshalHandler is a named request handler for unmarshaling query protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.query.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling query protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalMeta", Fn: UnmarshalMeta} + +// Unmarshal unmarshals a response for an AWS Query service. +func Unmarshal(r *request.Request) { + defer r.HTTPResponse.Body.Close() + if r.DataFilled() { + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, r.Operation.Name+"Result") + if err != nil { + r.Error = awserr.New("SerializationError", "failed decoding Query response", err) + return + } + } +} + +// UnmarshalMeta unmarshals header response values for an AWS Query service. +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go new file mode 100644 index 0000000..f214296 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/query/unmarshal_error.go @@ -0,0 +1,66 @@ +package query + +import ( + "encoding/xml" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"ErrorResponse"` + Code string `xml:"Error>Code"` + Message string `xml:"Error>Message"` + RequestID string `xml:"RequestId"` +} + +type xmlServiceUnavailableResponse struct { + XMLName xml.Name `xml:"ServiceUnavailableException"` +} + +// UnmarshalErrorHandler is a name request handler to unmarshal request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.query.UnmarshalError", Fn: UnmarshalError} + +// UnmarshalError unmarshals an error response for an AWS Query service. +func UnmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + + bodyBytes, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to read from query HTTP response body", err) + return + } + + // First check for specific error + resp := xmlErrorResponse{} + decodeErr := xml.Unmarshal(bodyBytes, &resp) + if decodeErr == nil { + reqID := resp.RequestID + if reqID == "" { + reqID = r.RequestID + } + r.Error = awserr.NewRequestFailure( + awserr.New(resp.Code, resp.Message, nil), + r.HTTPResponse.StatusCode, + reqID, + ) + return + } + + // Check for unhandled error + servUnavailResp := xmlServiceUnavailableResponse{} + unavailErr := xml.Unmarshal(bodyBytes, &servUnavailResp) + if unavailErr == nil { + r.Error = awserr.NewRequestFailure( + awserr.New("ServiceUnavailableException", "service is unavailable", nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ) + return + } + + // Failed to retrieve any error message from the response body + r.Error = awserr.New("SerializationError", + "failed to decode query XML error response", decodeErr) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go new file mode 100644 index 0000000..7161835 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/build.go @@ -0,0 +1,290 @@ +// Package rest provides RESTful serialization of AWS requests and responses. +package rest + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +// RFC822 returns an RFC822 formatted timestamp for AWS protocols +const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" + +// Whether the byte value can be sent without escaping in AWS URLs +var noEscape [256]bool + +var errValueNotSet = fmt.Errorf("value not set") + +func init() { + for i := 0; i < len(noEscape); i++ { + // AWS expects every character except these to be escaped + noEscape[i] = (i >= 'A' && i <= 'Z') || + (i >= 'a' && i <= 'z') || + (i >= '0' && i <= '9') || + i == '-' || + i == '.' || + i == '_' || + i == '~' + } +} + +// BuildHandler is a named request handler for building rest protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} + +// Build builds the REST component of a service request. +func Build(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, false) + buildBody(r, v) + } +} + +// BuildAsGET builds the REST component of a service request with the ability to hoist +// data from the body. +func BuildAsGET(r *request.Request) { + if r.ParamsFilled() { + v := reflect.ValueOf(r.Params).Elem() + buildLocationElements(r, v, true) + buildBody(r, v) + } +} + +func buildLocationElements(r *request.Request, v reflect.Value, buildGETQuery bool) { + query := r.HTTPRequest.URL.Query() + + // Setup the raw path to match the base path pattern. This is needed + // so that when the path is mutated a custom escaped version can be + // stored in RawPath that will be used by the Go client. + r.HTTPRequest.URL.RawPath = r.HTTPRequest.URL.Path + + for i := 0; i < v.NumField(); i++ { + m := v.Field(i) + if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { + continue + } + + if m.IsValid() { + field := v.Type().Field(i) + name := field.Tag.Get("locationName") + if name == "" { + name = field.Name + } + if kind := m.Kind(); kind == reflect.Ptr { + m = m.Elem() + } else if kind == reflect.Interface { + if !m.Elem().IsValid() { + continue + } + } + if !m.IsValid() { + continue + } + if field.Tag.Get("ignore") != "" { + continue + } + + var err error + switch field.Tag.Get("location") { + case "headers": // header maps + err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag) + case "header": + err = buildHeader(&r.HTTPRequest.Header, m, name, field.Tag) + case "uri": + err = buildURI(r.HTTPRequest.URL, m, name, field.Tag) + case "querystring": + err = buildQueryString(query, m, name, field.Tag) + default: + if buildGETQuery { + err = buildQueryString(query, m, name, field.Tag) + } + } + r.Error = err + } + if r.Error != nil { + return + } + } + + r.HTTPRequest.URL.RawQuery = query.Encode() + if !aws.BoolValue(r.Config.DisableRestProtocolURICleaning) { + cleanPath(r.HTTPRequest.URL) + } +} + +func buildBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { + if payloadName := field.Tag.Get("payload"); payloadName != "" { + pfield, _ := v.Type().FieldByName(payloadName) + if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { + payload := reflect.Indirect(v.FieldByName(payloadName)) + if payload.IsValid() && payload.Interface() != nil { + switch reader := payload.Interface().(type) { + case io.ReadSeeker: + r.SetReaderBody(reader) + case []byte: + r.SetBufferBody(reader) + case string: + r.SetStringBody(reader) + default: + r.Error = awserr.New("SerializationError", + "failed to encode REST request", + fmt.Errorf("unknown payload type %s", payload.Type())) + } + } + } + } + } +} + +func buildHeader(header *http.Header, v reflect.Value, name string, tag reflect.StructTag) error { + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + header.Add(name, str) + + return nil +} + +func buildHeaderMap(header *http.Header, v reflect.Value, tag reflect.StructTag) error { + prefix := tag.Get("locationName") + for _, key := range v.MapKeys() { + str, err := convertType(v.MapIndex(key), tag) + if err == errValueNotSet { + continue + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + + } + + header.Add(prefix+key.String(), str) + } + return nil +} + +func buildURI(u *url.URL, v reflect.Value, name string, tag reflect.StructTag) error { + value, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + + u.Path = strings.Replace(u.Path, "{"+name+"}", value, -1) + u.Path = strings.Replace(u.Path, "{"+name+"+}", value, -1) + + u.RawPath = strings.Replace(u.RawPath, "{"+name+"}", EscapePath(value, true), -1) + u.RawPath = strings.Replace(u.RawPath, "{"+name+"+}", EscapePath(value, false), -1) + + return nil +} + +func buildQueryString(query url.Values, v reflect.Value, name string, tag reflect.StructTag) error { + switch value := v.Interface().(type) { + case []*string: + for _, item := range value { + query.Add(name, *item) + } + case map[string]*string: + for key, item := range value { + query.Add(key, *item) + } + case map[string][]*string: + for key, items := range value { + for _, item := range items { + query.Add(key, *item) + } + } + default: + str, err := convertType(v, tag) + if err == errValueNotSet { + return nil + } else if err != nil { + return awserr.New("SerializationError", "failed to encode REST request", err) + } + query.Set(name, str) + } + + return nil +} + +func cleanPath(u *url.URL) { + hasSlash := strings.HasSuffix(u.Path, "/") + + // clean up path, removing duplicate `/` + u.Path = path.Clean(u.Path) + u.RawPath = path.Clean(u.RawPath) + + if hasSlash && !strings.HasSuffix(u.Path, "/") { + u.Path += "/" + u.RawPath += "/" + } +} + +// EscapePath escapes part of a URL path in Amazon style +func EscapePath(path string, encodeSep bool) string { + var buf bytes.Buffer + for i := 0; i < len(path); i++ { + c := path[i] + if noEscape[c] || (c == '/' && !encodeSep) { + buf.WriteByte(c) + } else { + fmt.Fprintf(&buf, "%%%02X", c) + } + } + return buf.String() +} + +func convertType(v reflect.Value, tag reflect.StructTag) (string, error) { + v = reflect.Indirect(v) + if !v.IsValid() { + return "", errValueNotSet + } + + var str string + switch value := v.Interface().(type) { + case string: + str = value + case []byte: + str = base64.StdEncoding.EncodeToString(value) + case bool: + str = strconv.FormatBool(value) + case int64: + str = strconv.FormatInt(value, 10) + case float64: + str = strconv.FormatFloat(value, 'f', -1, 64) + case time.Time: + str = value.UTC().Format(RFC822) + case aws.JSONValue: + b, err := json.Marshal(value) + if err != nil { + return "", err + } + if tag.Get("location") == "header" { + str = base64.StdEncoding.EncodeToString(b) + } else { + str = string(b) + } + default: + err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) + return "", err + } + return str, nil +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/payload.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go similarity index 89% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/payload.go rename to vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go index 1f603bb..4366de2 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/payload.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go @@ -12,7 +12,7 @@ func PayloadMember(i interface{}) interface{} { if !v.IsValid() { return nil } - if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { + if field, ok := v.Type().FieldByName("_"); ok { if payloadName := field.Tag.Get("payload"); payloadName != "" { field, _ := v.Type().FieldByName(payloadName) if field.Tag.Get("type") != "structure" { @@ -34,7 +34,7 @@ func PayloadType(i interface{}) string { if !v.IsValid() { return "" } - if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { + if field, ok := v.Type().FieldByName("_"); ok { if payloadName := field.Tag.Get("payload"); payloadName != "" { if member, ok := v.Type().FieldByName(payloadName); ok { return member.Tag.Get("type") diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go similarity index 63% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/unmarshal.go rename to vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index a4155f1..7a779ee 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -1,8 +1,11 @@ package rest import ( + "bytes" "encoding/base64" + "encoding/json" "fmt" + "io" "io/ioutil" "net/http" "reflect" @@ -12,19 +15,38 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" ) +// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} + // Unmarshal unmarshals the REST component of a response in a REST service. -func Unmarshal(r *aws.Request) { +func Unmarshal(r *request.Request) { if r.DataFilled() { v := reflect.Indirect(reflect.ValueOf(r.Data)) unmarshalBody(r, v) + } +} + +// UnmarshalMeta unmarshals the REST metadata of a response in a REST service +func UnmarshalMeta(r *request.Request) { + r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } + if r.DataFilled() { + v := reflect.Indirect(reflect.ValueOf(r.Data)) unmarshalLocationElements(r, v) } } -func unmarshalBody(r *aws.Request, v reflect.Value) { - if field, ok := v.Type().FieldByName("SDKShapeTraits"); ok { +func unmarshalBody(r *request.Request, v reflect.Value) { + if field, ok := v.Type().FieldByName("_"); ok { if payloadName := field.Tag.Get("payload"); payloadName != "" { pfield, _ := v.Type().FieldByName(payloadName) if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { @@ -32,6 +54,7 @@ func unmarshalBody(r *aws.Request, v reflect.Value) { if payload.IsValid() { switch payload.Interface().(type) { case []byte: + defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { r.Error = awserr.New("SerializationError", "failed to decode REST response", err) @@ -39,6 +62,7 @@ func unmarshalBody(r *aws.Request, v reflect.Value) { payload.Set(reflect.ValueOf(b)) } case *string: + defer r.HTTPResponse.Body.Close() b, err := ioutil.ReadAll(r.HTTPResponse.Body) if err != nil { r.Error = awserr.New("SerializationError", "failed to decode REST response", err) @@ -48,11 +72,19 @@ func unmarshalBody(r *aws.Request, v reflect.Value) { } default: switch payload.Type().String() { - case "io.ReadSeeker": - payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body))) - case "aws.ReadSeekCloser", "io.ReadCloser": + case "io.ReadCloser": payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) + case "io.ReadSeeker": + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", + "failed to read response body", err) + return + } + payload.Set(reflect.ValueOf(ioutil.NopCloser(bytes.NewReader(b)))) default: + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + defer r.HTTPResponse.Body.Close() r.Error = awserr.New("SerializationError", "failed to decode REST response", fmt.Errorf("unknown payload type %s", payload.Type())) @@ -64,7 +96,7 @@ func unmarshalBody(r *aws.Request, v reflect.Value) { } } -func unmarshalLocationElements(r *aws.Request, v reflect.Value) { +func unmarshalLocationElements(r *request.Request, v reflect.Value) { for i := 0; i < v.NumField(); i++ { m, field := v.Field(i), v.Type().Field(i) if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { @@ -81,7 +113,7 @@ func unmarshalLocationElements(r *aws.Request, v reflect.Value) { case "statusCode": unmarshalStatusCode(m, r.HTTPResponse.StatusCode) case "header": - err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) + err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name), field.Tag) if err != nil { r.Error = awserr.New("SerializationError", "failed to decode REST response", err) break @@ -128,8 +160,13 @@ func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) err return nil } -func unmarshalHeader(v reflect.Value, header string) error { - if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { +func unmarshalHeader(v reflect.Value, header string, tag reflect.StructTag) error { + isJSONValue := tag.Get("type") == "jsonvalue" + if isJSONValue { + if len(header) == 0 { + return nil + } + } else if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { return nil } @@ -166,6 +203,22 @@ func unmarshalHeader(v reflect.Value, header string) error { return err } v.Set(reflect.ValueOf(&t)) + case aws.JSONValue: + b := []byte(header) + var err error + if tag.Get("location") == "header" { + b, err = base64.StdEncoding.DecodeString(header) + if err != nil { + return err + } + } + + m := aws.JSONValue{} + err = json.Unmarshal(b, &m) + if err != nil { + return err + } + v.Set(reflect.ValueOf(m)) default: err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) return err diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go new file mode 100644 index 0000000..7bdf4c8 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/restxml/restxml.go @@ -0,0 +1,69 @@ +// Package restxml provides RESTful XML serialization of AWS +// requests and responses. +package restxml + +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go +//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go + +import ( + "bytes" + "encoding/xml" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol/query" + "github.com/aws/aws-sdk-go/private/protocol/rest" + "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil" +) + +// BuildHandler is a named request handler for building restxml protocol requests +var BuildHandler = request.NamedHandler{Name: "awssdk.restxml.Build", Fn: Build} + +// UnmarshalHandler is a named request handler for unmarshaling restxml protocol requests +var UnmarshalHandler = request.NamedHandler{Name: "awssdk.restxml.Unmarshal", Fn: Unmarshal} + +// UnmarshalMetaHandler is a named request handler for unmarshaling restxml protocol request metadata +var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalMeta", Fn: UnmarshalMeta} + +// UnmarshalErrorHandler is a named request handler for unmarshaling restxml protocol request errors +var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.restxml.UnmarshalError", Fn: UnmarshalError} + +// Build builds a request payload for the REST XML protocol. +func Build(r *request.Request) { + rest.Build(r) + + if t := rest.PayloadType(r.Params); t == "structure" || t == "" { + var buf bytes.Buffer + err := xmlutil.BuildXML(r.Params, xml.NewEncoder(&buf)) + if err != nil { + r.Error = awserr.New("SerializationError", "failed to encode rest XML request", err) + return + } + r.SetBufferBody(buf.Bytes()) + } +} + +// Unmarshal unmarshals a payload response for the REST XML protocol. +func Unmarshal(r *request.Request) { + if t := rest.PayloadType(r.Data); t == "structure" || t == "" { + defer r.HTTPResponse.Body.Close() + decoder := xml.NewDecoder(r.HTTPResponse.Body) + err := xmlutil.UnmarshalXML(r.Data, decoder, "") + if err != nil { + r.Error = awserr.New("SerializationError", "failed to decode REST XML response", err) + return + } + } else { + rest.Unmarshal(r) + } +} + +// UnmarshalMeta unmarshals response headers for the REST XML protocol. +func UnmarshalMeta(r *request.Request) { + rest.UnmarshalMeta(r) +} + +// UnmarshalError unmarshals a response error for the REST XML protocol. +func UnmarshalError(r *request.Request) { + query.UnmarshalError(r) +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go new file mode 100644 index 0000000..da1a681 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go @@ -0,0 +1,21 @@ +package protocol + +import ( + "io" + "io/ioutil" + + "github.com/aws/aws-sdk-go/aws/request" +) + +// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body +var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} + +// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. +func UnmarshalDiscardBody(r *request.Request) { + if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { + return + } + + io.Copy(ioutil.Discard, r.HTTPResponse.Body) + r.HTTPResponse.Body.Close() +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/build.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go similarity index 95% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/build.go rename to vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go index d3db250..7091b45 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/build.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/build.go @@ -1,4 +1,4 @@ -// Package xmlutil provides XML serialisation of AWS requests and responses. +// Package xmlutil provides XML serialization of AWS requests and responses. package xmlutil import ( @@ -8,8 +8,9 @@ import ( "reflect" "sort" "strconv" - "strings" "time" + + "github.com/aws/aws-sdk-go/private/protocol" ) // BuildXML will serialize params into an xml.Encoder. @@ -69,7 +70,7 @@ func (b *xmlBuilder) buildValue(value reflect.Value, current *XMLNode, tag refle switch t { case "structure": - if field, ok := value.Type().FieldByName("SDKShapeTraits"); ok { + if field, ok := value.Type().FieldByName("_"); ok { tag = tag + reflect.StructTag(" ") + field.Tag } return b.buildStruct(value, current, tag) @@ -120,18 +121,26 @@ func (b *xmlBuilder) buildStruct(value reflect.Value, current *XMLNode, tag refl t := value.Type() for i := 0; i < value.NumField(); i++ { - if c := t.Field(i).Name[0:1]; strings.ToLower(c) == c { - continue // ignore unexported fields - } - member := elemOf(value.Field(i)) field := t.Field(i) - mTag := field.Tag + if field.PkgPath != "" { + continue // ignore unexported fields + } + if field.Tag.Get("ignore") != "" { + continue + } + + mTag := field.Tag if mTag.Get("location") != "" { // skip non-body members continue } + if protocol.CanSetIdempotencyToken(value.Field(i), field) { + token := protocol.GetIdempotencyToken() + member = reflect.ValueOf(token) + } + memberName := mTag.Get("locationName") if memberName == "" { memberName = field.Name diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go similarity index 95% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go rename to vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go index 5e4fe21..8758462 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/unmarshal.go @@ -15,7 +15,10 @@ import ( // needs to match the shape of the XML expected to be decoded. // If the shape doesn't match unmarshaling will fail. func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { - n, _ := XMLToStruct(d, nil) + n, err := XMLToStruct(d, nil) + if err != nil { + return err + } if n.Children != nil { for _, root := range n.Children { for _, c := range root { @@ -23,7 +26,7 @@ func UnmarshalXML(v interface{}, d *xml.Decoder, wrapper string) error { c = wrappedChild[0] // pull out wrapped element } - err := parse(reflect.ValueOf(v), c, "") + err = parse(reflect.ValueOf(v), c, "") if err != nil { if err == io.EOF { return nil @@ -59,7 +62,7 @@ func parse(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { switch t { case "structure": - if field, ok := rtype.FieldByName("SDKShapeTraits"); ok { + if field, ok := rtype.FieldByName("_"); ok { tag = field.Tag } return parseStruct(r, node, tag) @@ -111,11 +114,8 @@ func parseStruct(r reflect.Value, node *XMLNode, tag reflect.StructTag) error { elems := node.Children[name] if elems == nil { // try to find the field in attributes - for _, a := range node.Attr { - if name == a.Name.Local { - // turn this into a text node for de-serializing - elems = []*XMLNode{{Text: a.Value}} - } + if val, ok := node.findElem(name); ok { + elems = []*XMLNode{{Text: val}} } } diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go similarity index 73% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go rename to vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go index 72c198a..3e970b6 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil/xml_to_struct.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil/xml_to_struct.go @@ -2,6 +2,7 @@ package xmlutil import ( "encoding/xml" + "fmt" "io" "sort" ) @@ -12,6 +13,9 @@ type XMLNode struct { Children map[string][]*XMLNode `json:",omitempty"` Text string `json:",omitempty"` Attr []xml.Attr `json:",omitempty"` + + namespaces map[string]string + parent *XMLNode } // NewXMLElement returns a pointer to a new XMLNode initialized to default values. @@ -36,11 +40,16 @@ func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { out := &XMLNode{} for { tok, err := d.Token() - if tok == nil || err == io.EOF { - break - } if err != nil { - return out, err + if err == io.EOF { + break + } else { + return out, err + } + } + + if tok == nil { + break } switch typed := tok.(type) { @@ -59,21 +68,54 @@ func XMLToStruct(d *xml.Decoder, s *xml.StartElement) (*XMLNode, error) { slice = []*XMLNode{} } node, e := XMLToStruct(d, &el) + out.findNamespaces() if e != nil { return out, e } node.Name = typed.Name + node.findNamespaces() + tempOut := *out + // Save into a temp variable, simply because out gets squashed during + // loop iterations + node.parent = &tempOut slice = append(slice, node) out.Children[name] = slice case xml.EndElement: if s != nil && s.Name.Local == typed.Name.Local { // matching end token return out, nil } + out = &XMLNode{} } } return out, nil } +func (n *XMLNode) findNamespaces() { + ns := map[string]string{} + for _, a := range n.Attr { + if a.Name.Space == "xmlns" { + ns[a.Value] = a.Name.Local + } + } + + n.namespaces = ns +} + +func (n *XMLNode) findElem(name string) (string, bool) { + for node := n; node != nil; node = node.parent { + for _, a := range node.Attr { + namespace := a.Name.Space + if v, ok := node.namespaces[namespace]; ok { + namespace = v + } + if name == fmt.Sprintf("%s:%s", namespace, a.Name.Local) { + return a.Value, true + } + } + } + return "", false +} + // StructToXML writes an XMLNode to a xml.Encoder as tokens. func StructToXML(e *xml.Encoder, node *XMLNode, sorted bool) error { e.EncodeToken(xml.StartElement{Name: node.Name, Attr: node.Attr}) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go new file mode 100644 index 0000000..52ac02c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -0,0 +1,19245 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "fmt" + "io" + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/private/protocol" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +const opAbortMultipartUpload = "AbortMultipartUpload" + +// AbortMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the AbortMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See AbortMultipartUpload for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AbortMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AbortMultipartUploadRequest method. +// req, resp := client.AbortMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +func (c *S3) AbortMultipartUploadRequest(input *AbortMultipartUploadInput) (req *request.Request, output *AbortMultipartUploadOutput) { + op := &request.Operation{ + Name: opAbortMultipartUpload, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &AbortMultipartUploadInput{} + } + + output = &AbortMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// AbortMultipartUpload API operation for Amazon Simple Storage Service. +// +// Aborts a multipart upload. +// +// To verify that all parts have been removed, so you don't get charged for +// the part storage, you should call the List Parts operation and ensure the +// parts list is empty. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation AbortMultipartUpload for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchUpload "NoSuchUpload" +// The specified multipart upload does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload +func (c *S3) AbortMultipartUpload(input *AbortMultipartUploadInput) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + return out, req.Send() +} + +// AbortMultipartUploadWithContext is the same as AbortMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See AbortMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) AbortMultipartUploadWithContext(ctx aws.Context, input *AbortMultipartUploadInput, opts ...request.Option) (*AbortMultipartUploadOutput, error) { + req, out := c.AbortMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCompleteMultipartUpload = "CompleteMultipartUpload" + +// CompleteMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CompleteMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CompleteMultipartUpload for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CompleteMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CompleteMultipartUploadRequest method. +// req, resp := client.CompleteMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +func (c *S3) CompleteMultipartUploadRequest(input *CompleteMultipartUploadInput) (req *request.Request, output *CompleteMultipartUploadOutput) { + op := &request.Operation{ + Name: opCompleteMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CompleteMultipartUploadInput{} + } + + output = &CompleteMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// CompleteMultipartUpload API operation for Amazon Simple Storage Service. +// +// Completes a multipart upload by assembling previously uploaded parts. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CompleteMultipartUpload for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload +func (c *S3) CompleteMultipartUpload(input *CompleteMultipartUploadInput) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + return out, req.Send() +} + +// CompleteMultipartUploadWithContext is the same as CompleteMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CompleteMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CompleteMultipartUploadWithContext(ctx aws.Context, input *CompleteMultipartUploadInput, opts ...request.Option) (*CompleteMultipartUploadOutput, error) { + req, out := c.CompleteMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCopyObject = "CopyObject" + +// CopyObjectRequest generates a "aws/request.Request" representing the +// client's request for the CopyObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CopyObject for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CopyObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CopyObjectRequest method. +// req, resp := client.CopyObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, output *CopyObjectOutput) { + op := &request.Operation{ + Name: opCopyObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &CopyObjectInput{} + } + + output = &CopyObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// CopyObject API operation for Amazon Simple Storage Service. +// +// Creates a copy of an object that is already stored in Amazon S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CopyObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectNotInActiveTierError "ObjectNotInActiveTierError" +// The source object of the COPY operation is not in the active tier and is +// only stored in Amazon Glacier. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject +func (c *S3) CopyObject(input *CopyObjectInput) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + return out, req.Send() +} + +// CopyObjectWithContext is the same as CopyObject with the addition of +// the ability to pass a context and additional request options. +// +// See CopyObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CopyObjectWithContext(ctx aws.Context, input *CopyObjectInput, opts ...request.Option) (*CopyObjectOutput, error) { + req, out := c.CopyObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateBucket = "CreateBucket" + +// CreateBucketRequest generates a "aws/request.Request" representing the +// client's request for the CreateBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateBucket for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateBucketRequest method. +// req, resp := client.CreateBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +func (c *S3) CreateBucketRequest(input *CreateBucketInput) (req *request.Request, output *CreateBucketOutput) { + op := &request.Operation{ + Name: opCreateBucket, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &CreateBucketInput{} + } + + output = &CreateBucketOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateBucket API operation for Amazon Simple Storage Service. +// +// Creates a new bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeBucketAlreadyExists "BucketAlreadyExists" +// The requested bucket name is not available. The bucket namespace is shared +// by all users of the system. Please select a different name and try again. +// +// * ErrCodeBucketAlreadyOwnedByYou "BucketAlreadyOwnedByYou" +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket +func (c *S3) CreateBucket(input *CreateBucketInput) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + return out, req.Send() +} + +// CreateBucketWithContext is the same as CreateBucket with the addition of +// the ability to pass a context and additional request options. +// +// See CreateBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateBucketWithContext(ctx aws.Context, input *CreateBucketInput, opts ...request.Option) (*CreateBucketOutput, error) { + req, out := c.CreateBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opCreateMultipartUpload = "CreateMultipartUpload" + +// CreateMultipartUploadRequest generates a "aws/request.Request" representing the +// client's request for the CreateMultipartUpload operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See CreateMultipartUpload for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the CreateMultipartUpload method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the CreateMultipartUploadRequest method. +// req, resp := client.CreateMultipartUploadRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +func (c *S3) CreateMultipartUploadRequest(input *CreateMultipartUploadInput) (req *request.Request, output *CreateMultipartUploadOutput) { + op := &request.Operation{ + Name: opCreateMultipartUpload, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?uploads", + } + + if input == nil { + input = &CreateMultipartUploadInput{} + } + + output = &CreateMultipartUploadOutput{} + req = c.newRequest(op, input, output) + return +} + +// CreateMultipartUpload API operation for Amazon Simple Storage Service. +// +// Initiates a multipart upload and returns an upload ID. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation CreateMultipartUpload for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload +func (c *S3) CreateMultipartUpload(input *CreateMultipartUploadInput) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + return out, req.Send() +} + +// CreateMultipartUploadWithContext is the same as CreateMultipartUpload with the addition of +// the ability to pass a context and additional request options. +// +// See CreateMultipartUpload for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) CreateMultipartUploadWithContext(ctx aws.Context, input *CreateMultipartUploadInput, opts ...request.Option) (*CreateMultipartUploadOutput, error) { + req, out := c.CreateMultipartUploadRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucket = "DeleteBucket" + +// DeleteBucketRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucket for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketRequest method. +// req, resp := client.DeleteBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +func (c *S3) DeleteBucketRequest(input *DeleteBucketInput) (req *request.Request, output *DeleteBucketOutput) { + op := &request.Operation{ + Name: opDeleteBucket, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &DeleteBucketInput{} + } + + output = &DeleteBucketOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucket API operation for Amazon Simple Storage Service. +// +// Deletes the bucket. All objects (including all object versions and Delete +// Markers) in the bucket must be deleted before the bucket itself can be deleted. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucket for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket +func (c *S3) DeleteBucket(input *DeleteBucketInput) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + return out, req.Send() +} + +// DeleteBucketWithContext is the same as DeleteBucket with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWithContext(ctx aws.Context, input *DeleteBucketInput, opts ...request.Option) (*DeleteBucketOutput, error) { + req, out := c.DeleteBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketAnalyticsConfiguration = "DeleteBucketAnalyticsConfiguration" + +// DeleteBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketAnalyticsConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketAnalyticsConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketAnalyticsConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketAnalyticsConfigurationRequest method. +// req, resp := client.DeleteBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +func (c *S3) DeleteBucketAnalyticsConfigurationRequest(input *DeleteBucketAnalyticsConfigurationInput) (req *request.Request, output *DeleteBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketAnalyticsConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &DeleteBucketAnalyticsConfigurationInput{} + } + + output = &DeleteBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketAnalyticsConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration +func (c *S3) DeleteBucketAnalyticsConfiguration(input *DeleteBucketAnalyticsConfigurationInput) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketAnalyticsConfigurationWithContext is the same as DeleteBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *DeleteBucketAnalyticsConfigurationInput, opts ...request.Option) (*DeleteBucketAnalyticsConfigurationOutput, error) { + req, out := c.DeleteBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketCors = "DeleteBucketCors" + +// DeleteBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketCors for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketCorsRequest method. +// req, resp := client.DeleteBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCorsRequest(input *DeleteBucketCorsInput) (req *request.Request, output *DeleteBucketCorsOutput) { + op := &request.Operation{ + Name: opDeleteBucketCors, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &DeleteBucketCorsInput{} + } + + output = &DeleteBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketCors API operation for Amazon Simple Storage Service. +// +// Deletes the cors configuration information set for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketCors for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors +func (c *S3) DeleteBucketCors(input *DeleteBucketCorsInput) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + return out, req.Send() +} + +// DeleteBucketCorsWithContext is the same as DeleteBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketCorsWithContext(ctx aws.Context, input *DeleteBucketCorsInput, opts ...request.Option) (*DeleteBucketCorsOutput, error) { + req, out := c.DeleteBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketInventoryConfiguration = "DeleteBucketInventoryConfiguration" + +// DeleteBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketInventoryConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketInventoryConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketInventoryConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketInventoryConfigurationRequest method. +// req, resp := client.DeleteBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +func (c *S3) DeleteBucketInventoryConfigurationRequest(input *DeleteBucketInventoryConfigurationInput) (req *request.Request, output *DeleteBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketInventoryConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &DeleteBucketInventoryConfigurationInput{} + } + + output = &DeleteBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes an inventory configuration (identified by the inventory ID) from +// the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketInventoryConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration +func (c *S3) DeleteBucketInventoryConfiguration(input *DeleteBucketInventoryConfigurationInput) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketInventoryConfigurationWithContext is the same as DeleteBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketInventoryConfigurationWithContext(ctx aws.Context, input *DeleteBucketInventoryConfigurationInput, opts ...request.Option) (*DeleteBucketInventoryConfigurationOutput, error) { + req, out := c.DeleteBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketLifecycle = "DeleteBucketLifecycle" + +// DeleteBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketLifecycle for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketLifecycleRequest method. +// req, resp := client.DeleteBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycleRequest(input *DeleteBucketLifecycleInput) (req *request.Request, output *DeleteBucketLifecycleOutput) { + op := &request.Operation{ + Name: opDeleteBucketLifecycle, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &DeleteBucketLifecycleInput{} + } + + output = &DeleteBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketLifecycle API operation for Amazon Simple Storage Service. +// +// Deletes the lifecycle configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketLifecycle for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle +func (c *S3) DeleteBucketLifecycle(input *DeleteBucketLifecycleInput) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + return out, req.Send() +} + +// DeleteBucketLifecycleWithContext is the same as DeleteBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketLifecycleWithContext(ctx aws.Context, input *DeleteBucketLifecycleInput, opts ...request.Option) (*DeleteBucketLifecycleOutput, error) { + req, out := c.DeleteBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketMetricsConfiguration = "DeleteBucketMetricsConfiguration" + +// DeleteBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketMetricsConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketMetricsConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketMetricsConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketMetricsConfigurationRequest method. +// req, resp := client.DeleteBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +func (c *S3) DeleteBucketMetricsConfigurationRequest(input *DeleteBucketMetricsConfigurationInput) (req *request.Request, output *DeleteBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opDeleteBucketMetricsConfiguration, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &DeleteBucketMetricsConfigurationInput{} + } + + output = &DeleteBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Deletes a metrics configuration (specified by the metrics configuration ID) +// from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketMetricsConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration +func (c *S3) DeleteBucketMetricsConfiguration(input *DeleteBucketMetricsConfigurationInput) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// DeleteBucketMetricsConfigurationWithContext is the same as DeleteBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketMetricsConfigurationWithContext(ctx aws.Context, input *DeleteBucketMetricsConfigurationInput, opts ...request.Option) (*DeleteBucketMetricsConfigurationOutput, error) { + req, out := c.DeleteBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketPolicy = "DeleteBucketPolicy" + +// DeleteBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketPolicy for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketPolicyRequest method. +// req, resp := client.DeleteBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +func (c *S3) DeleteBucketPolicyRequest(input *DeleteBucketPolicyInput) (req *request.Request, output *DeleteBucketPolicyOutput) { + op := &request.Operation{ + Name: opDeleteBucketPolicy, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &DeleteBucketPolicyInput{} + } + + output = &DeleteBucketPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketPolicy API operation for Amazon Simple Storage Service. +// +// Deletes the policy from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketPolicy for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy +func (c *S3) DeleteBucketPolicy(input *DeleteBucketPolicyInput) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + return out, req.Send() +} + +// DeleteBucketPolicyWithContext is the same as DeleteBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketPolicyWithContext(ctx aws.Context, input *DeleteBucketPolicyInput, opts ...request.Option) (*DeleteBucketPolicyOutput, error) { + req, out := c.DeleteBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketReplication = "DeleteBucketReplication" + +// DeleteBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketReplication for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketReplicationRequest method. +// req, resp := client.DeleteBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +func (c *S3) DeleteBucketReplicationRequest(input *DeleteBucketReplicationInput) (req *request.Request, output *DeleteBucketReplicationOutput) { + op := &request.Operation{ + Name: opDeleteBucketReplication, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &DeleteBucketReplicationInput{} + } + + output = &DeleteBucketReplicationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketReplication API operation for Amazon Simple Storage Service. +// +// Deletes the replication configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketReplication for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication +func (c *S3) DeleteBucketReplication(input *DeleteBucketReplicationInput) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + return out, req.Send() +} + +// DeleteBucketReplicationWithContext is the same as DeleteBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketReplicationWithContext(ctx aws.Context, input *DeleteBucketReplicationInput, opts ...request.Option) (*DeleteBucketReplicationOutput, error) { + req, out := c.DeleteBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketTagging = "DeleteBucketTagging" + +// DeleteBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketTagging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketTaggingRequest method. +// req, resp := client.DeleteBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +func (c *S3) DeleteBucketTaggingRequest(input *DeleteBucketTaggingInput) (req *request.Request, output *DeleteBucketTaggingOutput) { + op := &request.Operation{ + Name: opDeleteBucketTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &DeleteBucketTaggingInput{} + } + + output = &DeleteBucketTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketTagging API operation for Amazon Simple Storage Service. +// +// Deletes the tags from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketTagging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging +func (c *S3) DeleteBucketTagging(input *DeleteBucketTaggingInput) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + return out, req.Send() +} + +// DeleteBucketTaggingWithContext is the same as DeleteBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketTaggingWithContext(ctx aws.Context, input *DeleteBucketTaggingInput, opts ...request.Option) (*DeleteBucketTaggingOutput, error) { + req, out := c.DeleteBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteBucketWebsite = "DeleteBucketWebsite" + +// DeleteBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the DeleteBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteBucketWebsite for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteBucketWebsiteRequest method. +// req, resp := client.DeleteBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsiteRequest(input *DeleteBucketWebsiteInput) (req *request.Request, output *DeleteBucketWebsiteOutput) { + op := &request.Operation{ + Name: opDeleteBucketWebsite, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &DeleteBucketWebsiteInput{} + } + + output = &DeleteBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// DeleteBucketWebsite API operation for Amazon Simple Storage Service. +// +// This operation removes the website configuration from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteBucketWebsite for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite +func (c *S3) DeleteBucketWebsite(input *DeleteBucketWebsiteInput) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + return out, req.Send() +} + +// DeleteBucketWebsiteWithContext is the same as DeleteBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteBucketWebsiteWithContext(ctx aws.Context, input *DeleteBucketWebsiteInput, opts ...request.Option) (*DeleteBucketWebsiteOutput, error) { + req, out := c.DeleteBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObject = "DeleteObject" + +// DeleteObjectRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteObject for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteObjectRequest method. +// req, resp := client.DeleteObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObjectRequest(input *DeleteObjectInput) (req *request.Request, output *DeleteObjectOutput) { + op := &request.Operation{ + Name: opDeleteObject, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &DeleteObjectInput{} + } + + output = &DeleteObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObject API operation for Amazon Simple Storage Service. +// +// Removes the null version (if there is one) of an object and inserts a delete +// marker, which becomes the latest version of the object. If there isn't a +// null version, Amazon S3 does not remove any objects. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObject for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject +func (c *S3) DeleteObject(input *DeleteObjectInput) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + return out, req.Send() +} + +// DeleteObjectWithContext is the same as DeleteObject with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectWithContext(ctx aws.Context, input *DeleteObjectInput, opts ...request.Option) (*DeleteObjectOutput, error) { + req, out := c.DeleteObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObjectTagging = "DeleteObjectTagging" + +// DeleteObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjectTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteObjectTagging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteObjectTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteObjectTaggingRequest method. +// req, resp := client.DeleteObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTaggingRequest(input *DeleteObjectTaggingInput) (req *request.Request, output *DeleteObjectTaggingOutput) { + op := &request.Operation{ + Name: opDeleteObjectTagging, + HTTPMethod: "DELETE", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &DeleteObjectTaggingInput{} + } + + output = &DeleteObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObjectTagging API operation for Amazon Simple Storage Service. +// +// Removes the tag-set from an existing object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjectTagging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging +func (c *S3) DeleteObjectTagging(input *DeleteObjectTaggingInput) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + return out, req.Send() +} + +// DeleteObjectTaggingWithContext is the same as DeleteObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectTaggingWithContext(ctx aws.Context, input *DeleteObjectTaggingInput, opts ...request.Option) (*DeleteObjectTaggingOutput, error) { + req, out := c.DeleteObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDeleteObjects = "DeleteObjects" + +// DeleteObjectsRequest generates a "aws/request.Request" representing the +// client's request for the DeleteObjects operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DeleteObjects for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DeleteObjects method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DeleteObjectsRequest method. +// req, resp := client.DeleteObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjectsRequest(input *DeleteObjectsInput) (req *request.Request, output *DeleteObjectsOutput) { + op := &request.Operation{ + Name: opDeleteObjects, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}?delete", + } + + if input == nil { + input = &DeleteObjectsInput{} + } + + output = &DeleteObjectsOutput{} + req = c.newRequest(op, input, output) + return +} + +// DeleteObjects API operation for Amazon Simple Storage Service. +// +// This operation enables you to delete multiple objects from a bucket using +// a single HTTP request. You may specify up to 1000 keys. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation DeleteObjects for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects +func (c *S3) DeleteObjects(input *DeleteObjectsInput) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + return out, req.Send() +} + +// DeleteObjectsWithContext is the same as DeleteObjects with the addition of +// the ability to pass a context and additional request options. +// +// See DeleteObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) DeleteObjectsWithContext(ctx aws.Context, input *DeleteObjectsInput, opts ...request.Option) (*DeleteObjectsOutput, error) { + req, out := c.DeleteObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAccelerateConfiguration = "GetBucketAccelerateConfiguration" + +// GetBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAccelerateConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketAccelerateConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketAccelerateConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketAccelerateConfigurationRequest method. +// req, resp := client.GetBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +func (c *S3) GetBucketAccelerateConfigurationRequest(input *GetBucketAccelerateConfigurationInput) (req *request.Request, output *GetBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAccelerateConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &GetBucketAccelerateConfigurationInput{} + } + + output = &GetBucketAccelerateConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// Returns the accelerate configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAccelerateConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration +func (c *S3) GetBucketAccelerateConfiguration(input *GetBucketAccelerateConfigurationInput) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketAccelerateConfigurationWithContext is the same as GetBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAccelerateConfigurationWithContext(ctx aws.Context, input *GetBucketAccelerateConfigurationInput, opts ...request.Option) (*GetBucketAccelerateConfigurationOutput, error) { + req, out := c.GetBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAcl = "GetBucketAcl" + +// GetBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketAcl for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketAclRequest method. +// req, resp := client.GetBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAclRequest(input *GetBucketAclInput) (req *request.Request, output *GetBucketAclOutput) { + op := &request.Operation{ + Name: opGetBucketAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &GetBucketAclInput{} + } + + output = &GetBucketAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAcl API operation for Amazon Simple Storage Service. +// +// Gets the access control policy for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAcl for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl +func (c *S3) GetBucketAcl(input *GetBucketAclInput) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + return out, req.Send() +} + +// GetBucketAclWithContext is the same as GetBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAclWithContext(ctx aws.Context, input *GetBucketAclInput, opts ...request.Option) (*GetBucketAclOutput, error) { + req, out := c.GetBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketAnalyticsConfiguration = "GetBucketAnalyticsConfiguration" + +// GetBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketAnalyticsConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketAnalyticsConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketAnalyticsConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketAnalyticsConfigurationRequest method. +// req, resp := client.GetBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +func (c *S3) GetBucketAnalyticsConfigurationRequest(input *GetBucketAnalyticsConfigurationInput) (req *request.Request, output *GetBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketAnalyticsConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &GetBucketAnalyticsConfigurationInput{} + } + + output = &GetBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Gets an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketAnalyticsConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration +func (c *S3) GetBucketAnalyticsConfiguration(input *GetBucketAnalyticsConfigurationInput) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketAnalyticsConfigurationWithContext is the same as GetBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *GetBucketAnalyticsConfigurationInput, opts ...request.Option) (*GetBucketAnalyticsConfigurationOutput, error) { + req, out := c.GetBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketCors = "GetBucketCors" + +// GetBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketCors for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketCorsRequest method. +// req, resp := client.GetBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCorsRequest(input *GetBucketCorsInput) (req *request.Request, output *GetBucketCorsOutput) { + op := &request.Operation{ + Name: opGetBucketCors, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &GetBucketCorsInput{} + } + + output = &GetBucketCorsOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketCors API operation for Amazon Simple Storage Service. +// +// Returns the cors configuration for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketCors for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors +func (c *S3) GetBucketCors(input *GetBucketCorsInput) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + return out, req.Send() +} + +// GetBucketCorsWithContext is the same as GetBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketCorsWithContext(ctx aws.Context, input *GetBucketCorsInput, opts ...request.Option) (*GetBucketCorsOutput, error) { + req, out := c.GetBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketInventoryConfiguration = "GetBucketInventoryConfiguration" + +// GetBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketInventoryConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketInventoryConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketInventoryConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketInventoryConfigurationRequest method. +// req, resp := client.GetBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +func (c *S3) GetBucketInventoryConfigurationRequest(input *GetBucketInventoryConfigurationInput) (req *request.Request, output *GetBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketInventoryConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &GetBucketInventoryConfigurationInput{} + } + + output = &GetBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Returns an inventory configuration (identified by the inventory ID) from +// the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketInventoryConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration +func (c *S3) GetBucketInventoryConfiguration(input *GetBucketInventoryConfigurationInput) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketInventoryConfigurationWithContext is the same as GetBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketInventoryConfigurationWithContext(ctx aws.Context, input *GetBucketInventoryConfigurationInput, opts ...request.Option) (*GetBucketInventoryConfigurationOutput, error) { + req, out := c.GetBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLifecycle = "GetBucketLifecycle" + +// GetBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketLifecycle for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLifecycleRequest method. +// req, resp := client.GetBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +func (c *S3) GetBucketLifecycleRequest(input *GetBucketLifecycleInput) (req *request.Request, output *GetBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketLifecycle, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleInput{} + } + + output = &GetBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLifecycle API operation for Amazon Simple Storage Service. +// +// Deprecated, see the GetBucketLifecycleConfiguration operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycle for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycle +func (c *S3) GetBucketLifecycle(input *GetBucketLifecycleInput) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + return out, req.Send() +} + +// GetBucketLifecycleWithContext is the same as GetBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLifecycleWithContext(ctx aws.Context, input *GetBucketLifecycleInput, opts ...request.Option) (*GetBucketLifecycleOutput, error) { + req, out := c.GetBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLifecycleConfiguration = "GetBucketLifecycleConfiguration" + +// GetBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLifecycleConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketLifecycleConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLifecycleConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLifecycleConfigurationRequest method. +// req, resp := client.GetBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfigurationRequest(input *GetBucketLifecycleConfigurationInput) (req *request.Request, output *GetBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketLifecycleConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &GetBucketLifecycleConfigurationInput{} + } + + output = &GetBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// Returns the lifecycle configuration information set on the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLifecycleConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration +func (c *S3) GetBucketLifecycleConfiguration(input *GetBucketLifecycleConfigurationInput) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketLifecycleConfigurationWithContext is the same as GetBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLifecycleConfigurationWithContext(ctx aws.Context, input *GetBucketLifecycleConfigurationInput, opts ...request.Option) (*GetBucketLifecycleConfigurationOutput, error) { + req, out := c.GetBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLocation = "GetBucketLocation" + +// GetBucketLocationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLocation operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketLocation for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLocation method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLocationRequest method. +// req, resp := client.GetBucketLocationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocationRequest(input *GetBucketLocationInput) (req *request.Request, output *GetBucketLocationOutput) { + op := &request.Operation{ + Name: opGetBucketLocation, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?location", + } + + if input == nil { + input = &GetBucketLocationInput{} + } + + output = &GetBucketLocationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLocation API operation for Amazon Simple Storage Service. +// +// Returns the region the bucket resides in. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLocation for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation +func (c *S3) GetBucketLocation(input *GetBucketLocationInput) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + return out, req.Send() +} + +// GetBucketLocationWithContext is the same as GetBucketLocation with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLocation for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLocationWithContext(ctx aws.Context, input *GetBucketLocationInput, opts ...request.Option) (*GetBucketLocationOutput, error) { + req, out := c.GetBucketLocationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketLogging = "GetBucketLogging" + +// GetBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketLogging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketLoggingRequest method. +// req, resp := client.GetBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLoggingRequest(input *GetBucketLoggingInput) (req *request.Request, output *GetBucketLoggingOutput) { + op := &request.Operation{ + Name: opGetBucketLogging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &GetBucketLoggingInput{} + } + + output = &GetBucketLoggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketLogging API operation for Amazon Simple Storage Service. +// +// Returns the logging status of a bucket and the permissions users have to +// view and modify that status. To use GET, you must be the bucket owner. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketLogging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging +func (c *S3) GetBucketLogging(input *GetBucketLoggingInput) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + return out, req.Send() +} + +// GetBucketLoggingWithContext is the same as GetBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketLoggingWithContext(ctx aws.Context, input *GetBucketLoggingInput, opts ...request.Option) (*GetBucketLoggingOutput, error) { + req, out := c.GetBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketMetricsConfiguration = "GetBucketMetricsConfiguration" + +// GetBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketMetricsConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketMetricsConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketMetricsConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketMetricsConfigurationRequest method. +// req, resp := client.GetBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +func (c *S3) GetBucketMetricsConfigurationRequest(input *GetBucketMetricsConfigurationInput) (req *request.Request, output *GetBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opGetBucketMetricsConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &GetBucketMetricsConfigurationInput{} + } + + output = &GetBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Gets a metrics configuration (specified by the metrics configuration ID) +// from the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketMetricsConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration +func (c *S3) GetBucketMetricsConfiguration(input *GetBucketMetricsConfigurationInput) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketMetricsConfigurationWithContext is the same as GetBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketMetricsConfigurationWithContext(ctx aws.Context, input *GetBucketMetricsConfigurationInput, opts ...request.Option) (*GetBucketMetricsConfigurationOutput, error) { + req, out := c.GetBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketNotification = "GetBucketNotification" + +// GetBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotification operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketNotification for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketNotification method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketNotificationRequest method. +// req, resp := client.GetBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +func (c *S3) GetBucketNotificationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfigurationDeprecated) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, GetBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opGetBucketNotification, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + output = &NotificationConfigurationDeprecated{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketNotification API operation for Amazon Simple Storage Service. +// +// Deprecated, see the GetBucketNotificationConfiguration operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketNotification for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotification +func (c *S3) GetBucketNotification(input *GetBucketNotificationConfigurationRequest) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + return out, req.Send() +} + +// GetBucketNotificationWithContext is the same as GetBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketNotificationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfigurationDeprecated, error) { + req, out := c.GetBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketNotificationConfiguration = "GetBucketNotificationConfiguration" + +// GetBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketNotificationConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketNotificationConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketNotificationConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketNotificationConfigurationRequest method. +// req, resp := client.GetBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +func (c *S3) GetBucketNotificationConfigurationRequest(input *GetBucketNotificationConfigurationRequest) (req *request.Request, output *NotificationConfiguration) { + op := &request.Operation{ + Name: opGetBucketNotificationConfiguration, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &GetBucketNotificationConfigurationRequest{} + } + + output = &NotificationConfiguration{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// +// Returns the notification configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketNotificationConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration +func (c *S3) GetBucketNotificationConfiguration(input *GetBucketNotificationConfigurationRequest) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + return out, req.Send() +} + +// GetBucketNotificationConfigurationWithContext is the same as GetBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketNotificationConfigurationWithContext(ctx aws.Context, input *GetBucketNotificationConfigurationRequest, opts ...request.Option) (*NotificationConfiguration, error) { + req, out := c.GetBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketPolicy = "GetBucketPolicy" + +// GetBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketPolicy for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketPolicyRequest method. +// req, resp := client.GetBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +func (c *S3) GetBucketPolicyRequest(input *GetBucketPolicyInput) (req *request.Request, output *GetBucketPolicyOutput) { + op := &request.Operation{ + Name: opGetBucketPolicy, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &GetBucketPolicyInput{} + } + + output = &GetBucketPolicyOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketPolicy API operation for Amazon Simple Storage Service. +// +// Returns the policy of a specified bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketPolicy for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy +func (c *S3) GetBucketPolicy(input *GetBucketPolicyInput) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + return out, req.Send() +} + +// GetBucketPolicyWithContext is the same as GetBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketPolicyWithContext(ctx aws.Context, input *GetBucketPolicyInput, opts ...request.Option) (*GetBucketPolicyOutput, error) { + req, out := c.GetBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketReplication = "GetBucketReplication" + +// GetBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketReplication for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketReplicationRequest method. +// req, resp := client.GetBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +func (c *S3) GetBucketReplicationRequest(input *GetBucketReplicationInput) (req *request.Request, output *GetBucketReplicationOutput) { + op := &request.Operation{ + Name: opGetBucketReplication, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &GetBucketReplicationInput{} + } + + output = &GetBucketReplicationOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketReplication API operation for Amazon Simple Storage Service. +// +// Returns the replication configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketReplication for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication +func (c *S3) GetBucketReplication(input *GetBucketReplicationInput) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + return out, req.Send() +} + +// GetBucketReplicationWithContext is the same as GetBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketReplicationWithContext(ctx aws.Context, input *GetBucketReplicationInput, opts ...request.Option) (*GetBucketReplicationOutput, error) { + req, out := c.GetBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketRequestPayment = "GetBucketRequestPayment" + +// GetBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketRequestPayment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketRequestPayment for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketRequestPayment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketRequestPaymentRequest method. +// req, resp := client.GetBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +func (c *S3) GetBucketRequestPaymentRequest(input *GetBucketRequestPaymentInput) (req *request.Request, output *GetBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opGetBucketRequestPayment, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &GetBucketRequestPaymentInput{} + } + + output = &GetBucketRequestPaymentOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// Returns the request payment configuration of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketRequestPayment for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment +func (c *S3) GetBucketRequestPayment(input *GetBucketRequestPaymentInput) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + return out, req.Send() +} + +// GetBucketRequestPaymentWithContext is the same as GetBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketRequestPaymentWithContext(ctx aws.Context, input *GetBucketRequestPaymentInput, opts ...request.Option) (*GetBucketRequestPaymentOutput, error) { + req, out := c.GetBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketTagging = "GetBucketTagging" + +// GetBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketTagging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketTaggingRequest method. +// req, resp := client.GetBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +func (c *S3) GetBucketTaggingRequest(input *GetBucketTaggingInput) (req *request.Request, output *GetBucketTaggingOutput) { + op := &request.Operation{ + Name: opGetBucketTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &GetBucketTaggingInput{} + } + + output = &GetBucketTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketTagging API operation for Amazon Simple Storage Service. +// +// Returns the tag set associated with the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketTagging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging +func (c *S3) GetBucketTagging(input *GetBucketTaggingInput) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + return out, req.Send() +} + +// GetBucketTaggingWithContext is the same as GetBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketTaggingWithContext(ctx aws.Context, input *GetBucketTaggingInput, opts ...request.Option) (*GetBucketTaggingOutput, error) { + req, out := c.GetBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketVersioning = "GetBucketVersioning" + +// GetBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketVersioning operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketVersioning for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketVersioning method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketVersioningRequest method. +// req, resp := client.GetBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioningRequest(input *GetBucketVersioningInput) (req *request.Request, output *GetBucketVersioningOutput) { + op := &request.Operation{ + Name: opGetBucketVersioning, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &GetBucketVersioningInput{} + } + + output = &GetBucketVersioningOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketVersioning API operation for Amazon Simple Storage Service. +// +// Returns the versioning state of a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketVersioning for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning +func (c *S3) GetBucketVersioning(input *GetBucketVersioningInput) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + return out, req.Send() +} + +// GetBucketVersioningWithContext is the same as GetBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketVersioningWithContext(ctx aws.Context, input *GetBucketVersioningInput, opts ...request.Option) (*GetBucketVersioningOutput, error) { + req, out := c.GetBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetBucketWebsite = "GetBucketWebsite" + +// GetBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the GetBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetBucketWebsite for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetBucketWebsiteRequest method. +// req, resp := client.GetBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsiteRequest(input *GetBucketWebsiteInput) (req *request.Request, output *GetBucketWebsiteOutput) { + op := &request.Operation{ + Name: opGetBucketWebsite, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &GetBucketWebsiteInput{} + } + + output = &GetBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetBucketWebsite API operation for Amazon Simple Storage Service. +// +// Returns the website configuration for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetBucketWebsite for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite +func (c *S3) GetBucketWebsite(input *GetBucketWebsiteInput) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + return out, req.Send() +} + +// GetBucketWebsiteWithContext is the same as GetBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See GetBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetBucketWebsiteWithContext(ctx aws.Context, input *GetBucketWebsiteInput, opts ...request.Option) (*GetBucketWebsiteOutput, error) { + req, out := c.GetBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObject = "GetObject" + +// GetObjectRequest generates a "aws/request.Request" representing the +// client's request for the GetObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetObject for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectRequest method. +// req, resp := client.GetObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, output *GetObjectOutput) { + op := &request.Operation{ + Name: opGetObject, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &GetObjectInput{} + } + + output = &GetObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObject API operation for Amazon Simple Storage Service. +// +// Retrieves objects from Amazon S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject +func (c *S3) GetObject(input *GetObjectInput) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + return out, req.Send() +} + +// GetObjectWithContext is the same as GetObject with the addition of +// the ability to pass a context and additional request options. +// +// See GetObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectWithContext(ctx aws.Context, input *GetObjectInput, opts ...request.Option) (*GetObjectOutput, error) { + req, out := c.GetObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectAcl = "GetObjectAcl" + +// GetObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetObjectAcl for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObjectAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectAclRequest method. +// req, resp := client.GetObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAclRequest(input *GetObjectAclInput) (req *request.Request, output *GetObjectAclOutput) { + op := &request.Operation{ + Name: opGetObjectAcl, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &GetObjectAclInput{} + } + + output = &GetObjectAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectAcl API operation for Amazon Simple Storage Service. +// +// Returns the access control list (ACL) of an object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectAcl for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl +func (c *S3) GetObjectAcl(input *GetObjectAclInput) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + return out, req.Send() +} + +// GetObjectAclWithContext is the same as GetObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectAclWithContext(ctx aws.Context, input *GetObjectAclInput, opts ...request.Option) (*GetObjectAclOutput, error) { + req, out := c.GetObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTagging = "GetObjectTagging" + +// GetObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetObjectTagging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObjectTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectTaggingRequest method. +// req, resp := client.GetObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTaggingRequest(input *GetObjectTaggingInput) (req *request.Request, output *GetObjectTaggingOutput) { + op := &request.Operation{ + Name: opGetObjectTagging, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &GetObjectTaggingInput{} + } + + output = &GetObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectTagging API operation for Amazon Simple Storage Service. +// +// Returns the tag-set of an object. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTagging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging +func (c *S3) GetObjectTagging(input *GetObjectTaggingInput) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + return out, req.Send() +} + +// GetObjectTaggingWithContext is the same as GetObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTaggingWithContext(ctx aws.Context, input *GetObjectTaggingInput, opts ...request.Option) (*GetObjectTaggingOutput, error) { + req, out := c.GetObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetObjectTorrent = "GetObjectTorrent" + +// GetObjectTorrentRequest generates a "aws/request.Request" representing the +// client's request for the GetObjectTorrent operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetObjectTorrent for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetObjectTorrent method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetObjectTorrentRequest method. +// req, resp := client.GetObjectTorrentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrentRequest(input *GetObjectTorrentInput) (req *request.Request, output *GetObjectTorrentOutput) { + op := &request.Operation{ + Name: opGetObjectTorrent, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}?torrent", + } + + if input == nil { + input = &GetObjectTorrentInput{} + } + + output = &GetObjectTorrentOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetObjectTorrent API operation for Amazon Simple Storage Service. +// +// Return torrent files from a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation GetObjectTorrent for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent +func (c *S3) GetObjectTorrent(input *GetObjectTorrentInput) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + return out, req.Send() +} + +// GetObjectTorrentWithContext is the same as GetObjectTorrent with the addition of +// the ability to pass a context and additional request options. +// +// See GetObjectTorrent for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) GetObjectTorrentWithContext(ctx aws.Context, input *GetObjectTorrentInput, opts ...request.Option) (*GetObjectTorrentOutput, error) { + req, out := c.GetObjectTorrentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadBucket = "HeadBucket" + +// HeadBucketRequest generates a "aws/request.Request" representing the +// client's request for the HeadBucket operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See HeadBucket for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the HeadBucket method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the HeadBucketRequest method. +// req, resp := client.HeadBucketRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucketRequest(input *HeadBucketInput) (req *request.Request, output *HeadBucketOutput) { + op := &request.Operation{ + Name: opHeadBucket, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}", + } + + if input == nil { + input = &HeadBucketInput{} + } + + output = &HeadBucketOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// HeadBucket API operation for Amazon Simple Storage Service. +// +// This operation is useful to determine if a bucket exists and you have permission +// to access it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadBucket for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket +func (c *S3) HeadBucket(input *HeadBucketInput) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + return out, req.Send() +} + +// HeadBucketWithContext is the same as HeadBucket with the addition of +// the ability to pass a context and additional request options. +// +// See HeadBucket for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadBucketWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.Option) (*HeadBucketOutput, error) { + req, out := c.HeadBucketRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opHeadObject = "HeadObject" + +// HeadObjectRequest generates a "aws/request.Request" representing the +// client's request for the HeadObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See HeadObject for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the HeadObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the HeadObjectRequest method. +// req, resp := client.HeadObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObjectRequest(input *HeadObjectInput) (req *request.Request, output *HeadObjectOutput) { + op := &request.Operation{ + Name: opHeadObject, + HTTPMethod: "HEAD", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &HeadObjectInput{} + } + + output = &HeadObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// HeadObject API operation for Amazon Simple Storage Service. +// +// The HEAD operation retrieves metadata from an object without returning the +// object itself. This operation is useful if you're only interested in an object's +// metadata. To use HEAD, you must have READ access to the object. +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses +// for more information on returned errors. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation HeadObject for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject +func (c *S3) HeadObject(input *HeadObjectInput) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + return out, req.Send() +} + +// HeadObjectWithContext is the same as HeadObject with the addition of +// the ability to pass a context and additional request options. +// +// See HeadObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) HeadObjectWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.Option) (*HeadObjectOutput, error) { + req, out := c.HeadObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketAnalyticsConfigurations = "ListBucketAnalyticsConfigurations" + +// ListBucketAnalyticsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketAnalyticsConfigurations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListBucketAnalyticsConfigurations for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBucketAnalyticsConfigurations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBucketAnalyticsConfigurationsRequest method. +// req, resp := client.ListBucketAnalyticsConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurationsRequest(input *ListBucketAnalyticsConfigurationsInput) (req *request.Request, output *ListBucketAnalyticsConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketAnalyticsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &ListBucketAnalyticsConfigurationsInput{} + } + + output = &ListBucketAnalyticsConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketAnalyticsConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the analytics configurations for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketAnalyticsConfigurations for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations +func (c *S3) ListBucketAnalyticsConfigurations(input *ListBucketAnalyticsConfigurationsInput) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketAnalyticsConfigurationsWithContext is the same as ListBucketAnalyticsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketAnalyticsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketAnalyticsConfigurationsWithContext(ctx aws.Context, input *ListBucketAnalyticsConfigurationsInput, opts ...request.Option) (*ListBucketAnalyticsConfigurationsOutput, error) { + req, out := c.ListBucketAnalyticsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketInventoryConfigurations = "ListBucketInventoryConfigurations" + +// ListBucketInventoryConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketInventoryConfigurations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListBucketInventoryConfigurations for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBucketInventoryConfigurations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBucketInventoryConfigurationsRequest method. +// req, resp := client.ListBucketInventoryConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurationsRequest(input *ListBucketInventoryConfigurationsInput) (req *request.Request, output *ListBucketInventoryConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketInventoryConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &ListBucketInventoryConfigurationsInput{} + } + + output = &ListBucketInventoryConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketInventoryConfigurations API operation for Amazon Simple Storage Service. +// +// Returns a list of inventory configurations for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketInventoryConfigurations for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations +func (c *S3) ListBucketInventoryConfigurations(input *ListBucketInventoryConfigurationsInput) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketInventoryConfigurationsWithContext is the same as ListBucketInventoryConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketInventoryConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketInventoryConfigurationsWithContext(ctx aws.Context, input *ListBucketInventoryConfigurationsInput, opts ...request.Option) (*ListBucketInventoryConfigurationsOutput, error) { + req, out := c.ListBucketInventoryConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBucketMetricsConfigurations = "ListBucketMetricsConfigurations" + +// ListBucketMetricsConfigurationsRequest generates a "aws/request.Request" representing the +// client's request for the ListBucketMetricsConfigurations operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListBucketMetricsConfigurations for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBucketMetricsConfigurations method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBucketMetricsConfigurationsRequest method. +// req, resp := client.ListBucketMetricsConfigurationsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurationsRequest(input *ListBucketMetricsConfigurationsInput) (req *request.Request, output *ListBucketMetricsConfigurationsOutput) { + op := &request.Operation{ + Name: opListBucketMetricsConfigurations, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &ListBucketMetricsConfigurationsInput{} + } + + output = &ListBucketMetricsConfigurationsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBucketMetricsConfigurations API operation for Amazon Simple Storage Service. +// +// Lists the metrics configurations for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBucketMetricsConfigurations for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations +func (c *S3) ListBucketMetricsConfigurations(input *ListBucketMetricsConfigurationsInput) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + return out, req.Send() +} + +// ListBucketMetricsConfigurationsWithContext is the same as ListBucketMetricsConfigurations with the addition of +// the ability to pass a context and additional request options. +// +// See ListBucketMetricsConfigurations for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketMetricsConfigurationsWithContext(ctx aws.Context, input *ListBucketMetricsConfigurationsInput, opts ...request.Option) (*ListBucketMetricsConfigurationsOutput, error) { + req, out := c.ListBucketMetricsConfigurationsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListBuckets = "ListBuckets" + +// ListBucketsRequest generates a "aws/request.Request" representing the +// client's request for the ListBuckets operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListBuckets for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListBuckets method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListBucketsRequest method. +// req, resp := client.ListBucketsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBucketsRequest(input *ListBucketsInput) (req *request.Request, output *ListBucketsOutput) { + op := &request.Operation{ + Name: opListBuckets, + HTTPMethod: "GET", + HTTPPath: "/", + } + + if input == nil { + input = &ListBucketsInput{} + } + + output = &ListBucketsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListBuckets API operation for Amazon Simple Storage Service. +// +// Returns a list of all buckets owned by the authenticated sender of the request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListBuckets for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets +func (c *S3) ListBuckets(input *ListBucketsInput) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + return out, req.Send() +} + +// ListBucketsWithContext is the same as ListBuckets with the addition of +// the ability to pass a context and additional request options. +// +// See ListBuckets for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListBucketsWithContext(ctx aws.Context, input *ListBucketsInput, opts ...request.Option) (*ListBucketsOutput, error) { + req, out := c.ListBucketsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opListMultipartUploads = "ListMultipartUploads" + +// ListMultipartUploadsRequest generates a "aws/request.Request" representing the +// client's request for the ListMultipartUploads operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListMultipartUploads for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListMultipartUploads method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListMultipartUploadsRequest method. +// req, resp := client.ListMultipartUploadsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploadsRequest(input *ListMultipartUploadsInput) (req *request.Request, output *ListMultipartUploadsOutput) { + op := &request.Operation{ + Name: opListMultipartUploads, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?uploads", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "UploadIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextUploadIdMarker"}, + LimitToken: "MaxUploads", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListMultipartUploadsInput{} + } + + output = &ListMultipartUploadsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListMultipartUploads API operation for Amazon Simple Storage Service. +// +// This operation lists in-progress multipart uploads. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListMultipartUploads for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads +func (c *S3) ListMultipartUploads(input *ListMultipartUploadsInput) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + return out, req.Send() +} + +// ListMultipartUploadsWithContext is the same as ListMultipartUploads with the addition of +// the ability to pass a context and additional request options. +// +// See ListMultipartUploads for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsWithContext(ctx aws.Context, input *ListMultipartUploadsInput, opts ...request.Option) (*ListMultipartUploadsOutput, error) { + req, out := c.ListMultipartUploadsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListMultipartUploadsPages iterates over the pages of a ListMultipartUploads operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListMultipartUploads method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListMultipartUploads operation. +// pageNum := 0 +// err := client.ListMultipartUploadsPages(params, +// func(page *ListMultipartUploadsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListMultipartUploadsPages(input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool) error { + return c.ListMultipartUploadsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListMultipartUploadsPagesWithContext same as ListMultipartUploadsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListMultipartUploadsPagesWithContext(ctx aws.Context, input *ListMultipartUploadsInput, fn func(*ListMultipartUploadsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListMultipartUploadsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListMultipartUploadsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListMultipartUploadsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListObjectVersions = "ListObjectVersions" + +// ListObjectVersionsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjectVersions operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListObjectVersions for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjectVersions method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectVersionsRequest method. +// req, resp := client.ListObjectVersionsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersionsRequest(input *ListObjectVersionsInput) (req *request.Request, output *ListObjectVersionsOutput) { + op := &request.Operation{ + Name: opListObjectVersions, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?versions", + Paginator: &request.Paginator{ + InputTokens: []string{"KeyMarker", "VersionIdMarker"}, + OutputTokens: []string{"NextKeyMarker", "NextVersionIdMarker"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectVersionsInput{} + } + + output = &ListObjectVersionsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListObjectVersions API operation for Amazon Simple Storage Service. +// +// Returns metadata about all of the versions of objects in a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectVersions for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions +func (c *S3) ListObjectVersions(input *ListObjectVersionsInput) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + return out, req.Send() +} + +// ListObjectVersionsWithContext is the same as ListObjectVersions with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectVersions for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsWithContext(ctx aws.Context, input *ListObjectVersionsInput, opts ...request.Option) (*ListObjectVersionsOutput, error) { + req, out := c.ListObjectVersionsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectVersionsPages iterates over the pages of a ListObjectVersions operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectVersions method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectVersions operation. +// pageNum := 0 +// err := client.ListObjectVersionsPages(params, +// func(page *ListObjectVersionsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectVersionsPages(input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool) error { + return c.ListObjectVersionsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectVersionsPagesWithContext same as ListObjectVersionsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectVersionsPagesWithContext(ctx aws.Context, input *ListObjectVersionsInput, fn func(*ListObjectVersionsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectVersionsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectVersionsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectVersionsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListObjects = "ListObjects" + +// ListObjectsRequest generates a "aws/request.Request" representing the +// client's request for the ListObjects operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListObjects for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjects method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectsRequest method. +// req, resp := client.ListObjectsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjectsRequest(input *ListObjectsInput) (req *request.Request, output *ListObjectsOutput) { + op := &request.Operation{ + Name: opListObjects, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}", + Paginator: &request.Paginator{ + InputTokens: []string{"Marker"}, + OutputTokens: []string{"NextMarker || Contents[-1].Key"}, + LimitToken: "MaxKeys", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListObjectsInput{} + } + + output = &ListObjectsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListObjects API operation for Amazon Simple Storage Service. +// +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjects for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects +func (c *S3) ListObjects(input *ListObjectsInput) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + return out, req.Send() +} + +// ListObjectsWithContext is the same as ListObjects with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjects for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsWithContext(ctx aws.Context, input *ListObjectsInput, opts ...request.Option) (*ListObjectsOutput, error) { + req, out := c.ListObjectsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsPages iterates over the pages of a ListObjects operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjects method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjects operation. +// pageNum := 0 +// err := client.ListObjectsPages(params, +// func(page *ListObjectsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsPages(input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool) error { + return c.ListObjectsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsPagesWithContext same as ListObjectsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsPagesWithContext(ctx aws.Context, input *ListObjectsInput, fn func(*ListObjectsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opListObjectsV2 = "ListObjectsV2" + +// ListObjectsV2Request generates a "aws/request.Request" representing the +// client's request for the ListObjectsV2 operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListObjectsV2 for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListObjectsV2 method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListObjectsV2Request method. +// req, resp := client.ListObjectsV2Request(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2Request(input *ListObjectsV2Input) (req *request.Request, output *ListObjectsV2Output) { + op := &request.Operation{ + Name: opListObjectsV2, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}?list-type=2", + Paginator: &request.Paginator{ + InputTokens: []string{"ContinuationToken"}, + OutputTokens: []string{"NextContinuationToken"}, + LimitToken: "MaxKeys", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListObjectsV2Input{} + } + + output = &ListObjectsV2Output{} + req = c.newRequest(op, input, output) + return +} + +// ListObjectsV2 API operation for Amazon Simple Storage Service. +// +// Returns some or all (up to 1000) of the objects in a bucket. You can use +// the request parameters as selection criteria to return a subset of the objects +// in a bucket. Note: ListObjectsV2 is the revised List Objects API and we recommend +// you use this revised API for new application development. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListObjectsV2 for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchBucket "NoSuchBucket" +// The specified bucket does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2 +func (c *S3) ListObjectsV2(input *ListObjectsV2Input) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + return out, req.Send() +} + +// ListObjectsV2WithContext is the same as ListObjectsV2 with the addition of +// the ability to pass a context and additional request options. +// +// See ListObjectsV2 for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2WithContext(ctx aws.Context, input *ListObjectsV2Input, opts ...request.Option) (*ListObjectsV2Output, error) { + req, out := c.ListObjectsV2Request(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListObjectsV2Pages iterates over the pages of a ListObjectsV2 operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListObjectsV2 method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListObjectsV2 operation. +// pageNum := 0 +// err := client.ListObjectsV2Pages(params, +// func(page *ListObjectsV2Output, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListObjectsV2Pages(input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool) error { + return c.ListObjectsV2PagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListObjectsV2PagesWithContext same as ListObjectsV2Pages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListObjectsV2PagesWithContext(ctx aws.Context, input *ListObjectsV2Input, fn func(*ListObjectsV2Output, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListObjectsV2Input + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListObjectsV2Request(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListObjectsV2Output), !p.HasNextPage()) + } + return p.Err() +} + +const opListParts = "ListParts" + +// ListPartsRequest generates a "aws/request.Request" representing the +// client's request for the ListParts operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See ListParts for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the ListParts method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the ListPartsRequest method. +// req, resp := client.ListPartsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListPartsRequest(input *ListPartsInput) (req *request.Request, output *ListPartsOutput) { + op := &request.Operation{ + Name: opListParts, + HTTPMethod: "GET", + HTTPPath: "/{Bucket}/{Key+}", + Paginator: &request.Paginator{ + InputTokens: []string{"PartNumberMarker"}, + OutputTokens: []string{"NextPartNumberMarker"}, + LimitToken: "MaxParts", + TruncationToken: "IsTruncated", + }, + } + + if input == nil { + input = &ListPartsInput{} + } + + output = &ListPartsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListParts API operation for Amazon Simple Storage Service. +// +// Lists the parts that have been uploaded for a specific multipart upload. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation ListParts for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts +func (c *S3) ListParts(input *ListPartsInput) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + return out, req.Send() +} + +// ListPartsWithContext is the same as ListParts with the addition of +// the ability to pass a context and additional request options. +// +// See ListParts for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsWithContext(ctx aws.Context, input *ListPartsInput, opts ...request.Option) (*ListPartsOutput, error) { + req, out := c.ListPartsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListPartsPages iterates over the pages of a ListParts operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See ListParts method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a ListParts operation. +// pageNum := 0 +// err := client.ListPartsPages(params, +// func(page *ListPartsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *S3) ListPartsPages(input *ListPartsInput, fn func(*ListPartsOutput, bool) bool) error { + return c.ListPartsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListPartsPagesWithContext same as ListPartsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) ListPartsPagesWithContext(ctx aws.Context, input *ListPartsInput, fn func(*ListPartsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListPartsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListPartsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + cont := true + for p.Next() && cont { + cont = fn(p.Page().(*ListPartsOutput), !p.HasNextPage()) + } + return p.Err() +} + +const opPutBucketAccelerateConfiguration = "PutBucketAccelerateConfiguration" + +// PutBucketAccelerateConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAccelerateConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketAccelerateConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketAccelerateConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketAccelerateConfigurationRequest method. +// req, resp := client.PutBucketAccelerateConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfigurationRequest(input *PutBucketAccelerateConfigurationInput) (req *request.Request, output *PutBucketAccelerateConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAccelerateConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?accelerate", + } + + if input == nil { + input = &PutBucketAccelerateConfigurationInput{} + } + + output = &PutBucketAccelerateConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAccelerateConfiguration API operation for Amazon Simple Storage Service. +// +// Sets the accelerate configuration of an existing bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAccelerateConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration +func (c *S3) PutBucketAccelerateConfiguration(input *PutBucketAccelerateConfigurationInput) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketAccelerateConfigurationWithContext is the same as PutBucketAccelerateConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAccelerateConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAccelerateConfigurationWithContext(ctx aws.Context, input *PutBucketAccelerateConfigurationInput, opts ...request.Option) (*PutBucketAccelerateConfigurationOutput, error) { + req, out := c.PutBucketAccelerateConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketAcl = "PutBucketAcl" + +// PutBucketAclRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketAcl for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketAclRequest method. +// req, resp := client.PutBucketAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAclRequest(input *PutBucketAclInput) (req *request.Request, output *PutBucketAclOutput) { + op := &request.Operation{ + Name: opPutBucketAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?acl", + } + + if input == nil { + input = &PutBucketAclInput{} + } + + output = &PutBucketAclOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAcl API operation for Amazon Simple Storage Service. +// +// Sets the permissions on a bucket using access control lists (ACL). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAcl for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl +func (c *S3) PutBucketAcl(input *PutBucketAclInput) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + return out, req.Send() +} + +// PutBucketAclWithContext is the same as PutBucketAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAclWithContext(ctx aws.Context, input *PutBucketAclInput, opts ...request.Option) (*PutBucketAclOutput, error) { + req, out := c.PutBucketAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketAnalyticsConfiguration = "PutBucketAnalyticsConfiguration" + +// PutBucketAnalyticsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketAnalyticsConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketAnalyticsConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketAnalyticsConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketAnalyticsConfigurationRequest method. +// req, resp := client.PutBucketAnalyticsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfigurationRequest(input *PutBucketAnalyticsConfigurationInput) (req *request.Request, output *PutBucketAnalyticsConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketAnalyticsConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?analytics", + } + + if input == nil { + input = &PutBucketAnalyticsConfigurationInput{} + } + + output = &PutBucketAnalyticsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketAnalyticsConfiguration API operation for Amazon Simple Storage Service. +// +// Sets an analytics configuration for the bucket (specified by the analytics +// configuration ID). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketAnalyticsConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration +func (c *S3) PutBucketAnalyticsConfiguration(input *PutBucketAnalyticsConfigurationInput) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketAnalyticsConfigurationWithContext is the same as PutBucketAnalyticsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketAnalyticsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketAnalyticsConfigurationWithContext(ctx aws.Context, input *PutBucketAnalyticsConfigurationInput, opts ...request.Option) (*PutBucketAnalyticsConfigurationOutput, error) { + req, out := c.PutBucketAnalyticsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketCors = "PutBucketCors" + +// PutBucketCorsRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketCors operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketCors for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketCors method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketCorsRequest method. +// req, resp := client.PutBucketCorsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCorsRequest(input *PutBucketCorsInput) (req *request.Request, output *PutBucketCorsOutput) { + op := &request.Operation{ + Name: opPutBucketCors, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?cors", + } + + if input == nil { + input = &PutBucketCorsInput{} + } + + output = &PutBucketCorsOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketCors API operation for Amazon Simple Storage Service. +// +// Sets the cors configuration for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketCors for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors +func (c *S3) PutBucketCors(input *PutBucketCorsInput) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + return out, req.Send() +} + +// PutBucketCorsWithContext is the same as PutBucketCors with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketCors for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketCorsWithContext(ctx aws.Context, input *PutBucketCorsInput, opts ...request.Option) (*PutBucketCorsOutput, error) { + req, out := c.PutBucketCorsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketInventoryConfiguration = "PutBucketInventoryConfiguration" + +// PutBucketInventoryConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketInventoryConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketInventoryConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketInventoryConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketInventoryConfigurationRequest method. +// req, resp := client.PutBucketInventoryConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfigurationRequest(input *PutBucketInventoryConfigurationInput) (req *request.Request, output *PutBucketInventoryConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketInventoryConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?inventory", + } + + if input == nil { + input = &PutBucketInventoryConfigurationInput{} + } + + output = &PutBucketInventoryConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketInventoryConfiguration API operation for Amazon Simple Storage Service. +// +// Adds an inventory configuration (identified by the inventory ID) from the +// bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketInventoryConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration +func (c *S3) PutBucketInventoryConfiguration(input *PutBucketInventoryConfigurationInput) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketInventoryConfigurationWithContext is the same as PutBucketInventoryConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketInventoryConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketInventoryConfigurationWithContext(ctx aws.Context, input *PutBucketInventoryConfigurationInput, opts ...request.Option) (*PutBucketInventoryConfigurationOutput, error) { + req, out := c.PutBucketInventoryConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycle = "PutBucketLifecycle" + +// PutBucketLifecycleRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycle operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketLifecycle for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLifecycle method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLifecycleRequest method. +// req, resp := client.PutBucketLifecycleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +func (c *S3) PutBucketLifecycleRequest(input *PutBucketLifecycleInput) (req *request.Request, output *PutBucketLifecycleOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketLifecycle, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketLifecycle, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleInput{} + } + + output = &PutBucketLifecycleOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketLifecycle API operation for Amazon Simple Storage Service. +// +// Deprecated, see the PutBucketLifecycleConfiguration operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycle for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycle +func (c *S3) PutBucketLifecycle(input *PutBucketLifecycleInput) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleWithContext is the same as PutBucketLifecycle with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycle for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLifecycleWithContext(ctx aws.Context, input *PutBucketLifecycleInput, opts ...request.Option) (*PutBucketLifecycleOutput, error) { + req, out := c.PutBucketLifecycleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLifecycleConfiguration = "PutBucketLifecycleConfiguration" + +// PutBucketLifecycleConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLifecycleConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketLifecycleConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLifecycleConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLifecycleConfigurationRequest method. +// req, resp := client.PutBucketLifecycleConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfigurationRequest(input *PutBucketLifecycleConfigurationInput) (req *request.Request, output *PutBucketLifecycleConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketLifecycleConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?lifecycle", + } + + if input == nil { + input = &PutBucketLifecycleConfigurationInput{} + } + + output = &PutBucketLifecycleConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketLifecycleConfiguration API operation for Amazon Simple Storage Service. +// +// Sets lifecycle configuration for your bucket. If a lifecycle configuration +// exists, it replaces it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLifecycleConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration +func (c *S3) PutBucketLifecycleConfiguration(input *PutBucketLifecycleConfigurationInput) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketLifecycleConfigurationWithContext is the same as PutBucketLifecycleConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLifecycleConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLifecycleConfigurationWithContext(ctx aws.Context, input *PutBucketLifecycleConfigurationInput, opts ...request.Option) (*PutBucketLifecycleConfigurationOutput, error) { + req, out := c.PutBucketLifecycleConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketLogging = "PutBucketLogging" + +// PutBucketLoggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketLogging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketLogging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketLogging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketLoggingRequest method. +// req, resp := client.PutBucketLoggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLoggingRequest(input *PutBucketLoggingInput) (req *request.Request, output *PutBucketLoggingOutput) { + op := &request.Operation{ + Name: opPutBucketLogging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?logging", + } + + if input == nil { + input = &PutBucketLoggingInput{} + } + + output = &PutBucketLoggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketLogging API operation for Amazon Simple Storage Service. +// +// Set the logging parameters for a bucket and to specify permissions for who +// can view and modify the logging parameters. To set the logging status of +// a bucket, you must be the bucket owner. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketLogging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging +func (c *S3) PutBucketLogging(input *PutBucketLoggingInput) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + return out, req.Send() +} + +// PutBucketLoggingWithContext is the same as PutBucketLogging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketLogging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketLoggingWithContext(ctx aws.Context, input *PutBucketLoggingInput, opts ...request.Option) (*PutBucketLoggingOutput, error) { + req, out := c.PutBucketLoggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketMetricsConfiguration = "PutBucketMetricsConfiguration" + +// PutBucketMetricsConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketMetricsConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketMetricsConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketMetricsConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketMetricsConfigurationRequest method. +// req, resp := client.PutBucketMetricsConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfigurationRequest(input *PutBucketMetricsConfigurationInput) (req *request.Request, output *PutBucketMetricsConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketMetricsConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?metrics", + } + + if input == nil { + input = &PutBucketMetricsConfigurationInput{} + } + + output = &PutBucketMetricsConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketMetricsConfiguration API operation for Amazon Simple Storage Service. +// +// Sets a metrics configuration (specified by the metrics configuration ID) +// for the bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketMetricsConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration +func (c *S3) PutBucketMetricsConfiguration(input *PutBucketMetricsConfigurationInput) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketMetricsConfigurationWithContext is the same as PutBucketMetricsConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketMetricsConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketMetricsConfigurationWithContext(ctx aws.Context, input *PutBucketMetricsConfigurationInput, opts ...request.Option) (*PutBucketMetricsConfigurationOutput, error) { + req, out := c.PutBucketMetricsConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketNotification = "PutBucketNotification" + +// PutBucketNotificationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotification operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketNotification for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketNotification method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketNotificationRequest method. +// req, resp := client.PutBucketNotificationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +func (c *S3) PutBucketNotificationRequest(input *PutBucketNotificationInput) (req *request.Request, output *PutBucketNotificationOutput) { + if c.Client.Config.Logger != nil { + c.Client.Config.Logger.Log("This operation, PutBucketNotification, has been deprecated") + } + op := &request.Operation{ + Name: opPutBucketNotification, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationInput{} + } + + output = &PutBucketNotificationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketNotification API operation for Amazon Simple Storage Service. +// +// Deprecated, see the PutBucketNotificationConfiguraiton operation. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketNotification for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotification +func (c *S3) PutBucketNotification(input *PutBucketNotificationInput) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + return out, req.Send() +} + +// PutBucketNotificationWithContext is the same as PutBucketNotification with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotification for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketNotificationWithContext(ctx aws.Context, input *PutBucketNotificationInput, opts ...request.Option) (*PutBucketNotificationOutput, error) { + req, out := c.PutBucketNotificationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketNotificationConfiguration = "PutBucketNotificationConfiguration" + +// PutBucketNotificationConfigurationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketNotificationConfiguration operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketNotificationConfiguration for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketNotificationConfiguration method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketNotificationConfigurationRequest method. +// req, resp := client.PutBucketNotificationConfigurationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfigurationRequest(input *PutBucketNotificationConfigurationInput) (req *request.Request, output *PutBucketNotificationConfigurationOutput) { + op := &request.Operation{ + Name: opPutBucketNotificationConfiguration, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?notification", + } + + if input == nil { + input = &PutBucketNotificationConfigurationInput{} + } + + output = &PutBucketNotificationConfigurationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketNotificationConfiguration API operation for Amazon Simple Storage Service. +// +// Enables notifications of specified events for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketNotificationConfiguration for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration +func (c *S3) PutBucketNotificationConfiguration(input *PutBucketNotificationConfigurationInput) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + return out, req.Send() +} + +// PutBucketNotificationConfigurationWithContext is the same as PutBucketNotificationConfiguration with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketNotificationConfiguration for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketNotificationConfigurationWithContext(ctx aws.Context, input *PutBucketNotificationConfigurationInput, opts ...request.Option) (*PutBucketNotificationConfigurationOutput, error) { + req, out := c.PutBucketNotificationConfigurationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketPolicy = "PutBucketPolicy" + +// PutBucketPolicyRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketPolicy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketPolicy for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketPolicy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketPolicyRequest method. +// req, resp := client.PutBucketPolicyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicyRequest(input *PutBucketPolicyInput) (req *request.Request, output *PutBucketPolicyOutput) { + op := &request.Operation{ + Name: opPutBucketPolicy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?policy", + } + + if input == nil { + input = &PutBucketPolicyInput{} + } + + output = &PutBucketPolicyOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketPolicy API operation for Amazon Simple Storage Service. +// +// Replaces a policy on a bucket. If the bucket already has a policy, the one +// in this request completely replaces it. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketPolicy for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy +func (c *S3) PutBucketPolicy(input *PutBucketPolicyInput) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + return out, req.Send() +} + +// PutBucketPolicyWithContext is the same as PutBucketPolicy with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketPolicy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketPolicyWithContext(ctx aws.Context, input *PutBucketPolicyInput, opts ...request.Option) (*PutBucketPolicyOutput, error) { + req, out := c.PutBucketPolicyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketReplication = "PutBucketReplication" + +// PutBucketReplicationRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketReplication operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketReplication for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketReplication method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketReplicationRequest method. +// req, resp := client.PutBucketReplicationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req *request.Request, output *PutBucketReplicationOutput) { + op := &request.Operation{ + Name: opPutBucketReplication, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?replication", + } + + if input == nil { + input = &PutBucketReplicationInput{} + } + + output = &PutBucketReplicationOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketReplication API operation for Amazon Simple Storage Service. +// +// Creates a new replication configuration (or replaces an existing one, if +// present). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketReplication for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication +func (c *S3) PutBucketReplication(input *PutBucketReplicationInput) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + return out, req.Send() +} + +// PutBucketReplicationWithContext is the same as PutBucketReplication with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketReplication for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketReplicationWithContext(ctx aws.Context, input *PutBucketReplicationInput, opts ...request.Option) (*PutBucketReplicationOutput, error) { + req, out := c.PutBucketReplicationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketRequestPayment = "PutBucketRequestPayment" + +// PutBucketRequestPaymentRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketRequestPayment operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketRequestPayment for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketRequestPayment method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketRequestPaymentRequest method. +// req, resp := client.PutBucketRequestPaymentRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPaymentRequest(input *PutBucketRequestPaymentInput) (req *request.Request, output *PutBucketRequestPaymentOutput) { + op := &request.Operation{ + Name: opPutBucketRequestPayment, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?requestPayment", + } + + if input == nil { + input = &PutBucketRequestPaymentInput{} + } + + output = &PutBucketRequestPaymentOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketRequestPayment API operation for Amazon Simple Storage Service. +// +// Sets the request payment configuration for a bucket. By default, the bucket +// owner pays for downloads from the bucket. This configuration parameter enables +// the bucket owner (only) to specify that the person requesting the download +// will be charged for the download. Documentation on requester pays buckets +// can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketRequestPayment for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment +func (c *S3) PutBucketRequestPayment(input *PutBucketRequestPaymentInput) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + return out, req.Send() +} + +// PutBucketRequestPaymentWithContext is the same as PutBucketRequestPayment with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketRequestPayment for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketRequestPaymentWithContext(ctx aws.Context, input *PutBucketRequestPaymentInput, opts ...request.Option) (*PutBucketRequestPaymentOutput, error) { + req, out := c.PutBucketRequestPaymentRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketTagging = "PutBucketTagging" + +// PutBucketTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketTagging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketTaggingRequest method. +// req, resp := client.PutBucketTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTaggingRequest(input *PutBucketTaggingInput) (req *request.Request, output *PutBucketTaggingOutput) { + op := &request.Operation{ + Name: opPutBucketTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?tagging", + } + + if input == nil { + input = &PutBucketTaggingInput{} + } + + output = &PutBucketTaggingOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketTagging API operation for Amazon Simple Storage Service. +// +// Sets the tags for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketTagging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging +func (c *S3) PutBucketTagging(input *PutBucketTaggingInput) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + return out, req.Send() +} + +// PutBucketTaggingWithContext is the same as PutBucketTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketTaggingWithContext(ctx aws.Context, input *PutBucketTaggingInput, opts ...request.Option) (*PutBucketTaggingOutput, error) { + req, out := c.PutBucketTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketVersioning = "PutBucketVersioning" + +// PutBucketVersioningRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketVersioning operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketVersioning for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketVersioning method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketVersioningRequest method. +// req, resp := client.PutBucketVersioningRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioningRequest(input *PutBucketVersioningInput) (req *request.Request, output *PutBucketVersioningOutput) { + op := &request.Operation{ + Name: opPutBucketVersioning, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?versioning", + } + + if input == nil { + input = &PutBucketVersioningInput{} + } + + output = &PutBucketVersioningOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketVersioning API operation for Amazon Simple Storage Service. +// +// Sets the versioning state of an existing bucket. To set the versioning state, +// you must be the bucket owner. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketVersioning for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning +func (c *S3) PutBucketVersioning(input *PutBucketVersioningInput) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + return out, req.Send() +} + +// PutBucketVersioningWithContext is the same as PutBucketVersioning with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketVersioning for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketVersioningWithContext(ctx aws.Context, input *PutBucketVersioningInput, opts ...request.Option) (*PutBucketVersioningOutput, error) { + req, out := c.PutBucketVersioningRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutBucketWebsite = "PutBucketWebsite" + +// PutBucketWebsiteRequest generates a "aws/request.Request" representing the +// client's request for the PutBucketWebsite operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutBucketWebsite for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutBucketWebsite method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutBucketWebsiteRequest method. +// req, resp := client.PutBucketWebsiteRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsiteRequest(input *PutBucketWebsiteInput) (req *request.Request, output *PutBucketWebsiteOutput) { + op := &request.Operation{ + Name: opPutBucketWebsite, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}?website", + } + + if input == nil { + input = &PutBucketWebsiteInput{} + } + + output = &PutBucketWebsiteOutput{} + req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Remove(restxml.UnmarshalHandler) + req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) + return +} + +// PutBucketWebsite API operation for Amazon Simple Storage Service. +// +// Set the website configuration for a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutBucketWebsite for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite +func (c *S3) PutBucketWebsite(input *PutBucketWebsiteInput) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + return out, req.Send() +} + +// PutBucketWebsiteWithContext is the same as PutBucketWebsite with the addition of +// the ability to pass a context and additional request options. +// +// See PutBucketWebsite for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutBucketWebsiteWithContext(ctx aws.Context, input *PutBucketWebsiteInput, opts ...request.Option) (*PutBucketWebsiteOutput, error) { + req, out := c.PutBucketWebsiteRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObject = "PutObject" + +// PutObjectRequest generates a "aws/request.Request" representing the +// client's request for the PutObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutObject for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutObjectRequest method. +// req, resp := client.PutObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, output *PutObjectOutput) { + op := &request.Operation{ + Name: opPutObject, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &PutObjectInput{} + } + + output = &PutObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObject API operation for Amazon Simple Storage Service. +// +// Adds an object to a bucket. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObject for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject +func (c *S3) PutObject(input *PutObjectInput) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + return out, req.Send() +} + +// PutObjectWithContext is the same as PutObject with the addition of +// the ability to pass a context and additional request options. +// +// See PutObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectWithContext(ctx aws.Context, input *PutObjectInput, opts ...request.Option) (*PutObjectOutput, error) { + req, out := c.PutObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectAcl = "PutObjectAcl" + +// PutObjectAclRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectAcl operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutObjectAcl for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutObjectAcl method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutObjectAclRequest method. +// req, resp := client.PutObjectAclRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAclRequest(input *PutObjectAclInput) (req *request.Request, output *PutObjectAclOutput) { + op := &request.Operation{ + Name: opPutObjectAcl, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?acl", + } + + if input == nil { + input = &PutObjectAclInput{} + } + + output = &PutObjectAclOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObjectAcl API operation for Amazon Simple Storage Service. +// +// uses the acl subresource to set the access control list (ACL) permissions +// for an object that already exists in a bucket +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectAcl for usage and error information. +// +// Returned Error Codes: +// * ErrCodeNoSuchKey "NoSuchKey" +// The specified key does not exist. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl +func (c *S3) PutObjectAcl(input *PutObjectAclInput) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + return out, req.Send() +} + +// PutObjectAclWithContext is the same as PutObjectAcl with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectAcl for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectAclWithContext(ctx aws.Context, input *PutObjectAclInput, opts ...request.Option) (*PutObjectAclOutput, error) { + req, out := c.PutObjectAclRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opPutObjectTagging = "PutObjectTagging" + +// PutObjectTaggingRequest generates a "aws/request.Request" representing the +// client's request for the PutObjectTagging operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See PutObjectTagging for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the PutObjectTagging method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the PutObjectTaggingRequest method. +// req, resp := client.PutObjectTaggingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTaggingRequest(input *PutObjectTaggingInput) (req *request.Request, output *PutObjectTaggingOutput) { + op := &request.Operation{ + Name: opPutObjectTagging, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}?tagging", + } + + if input == nil { + input = &PutObjectTaggingInput{} + } + + output = &PutObjectTaggingOutput{} + req = c.newRequest(op, input, output) + return +} + +// PutObjectTagging API operation for Amazon Simple Storage Service. +// +// Sets the supplied tag-set to an object that already exists in a bucket +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation PutObjectTagging for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging +func (c *S3) PutObjectTagging(input *PutObjectTaggingInput) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + return out, req.Send() +} + +// PutObjectTaggingWithContext is the same as PutObjectTagging with the addition of +// the ability to pass a context and additional request options. +// +// See PutObjectTagging for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) PutObjectTaggingWithContext(ctx aws.Context, input *PutObjectTaggingInput, opts ...request.Option) (*PutObjectTaggingOutput, error) { + req, out := c.PutObjectTaggingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opRestoreObject = "RestoreObject" + +// RestoreObjectRequest generates a "aws/request.Request" representing the +// client's request for the RestoreObject operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See RestoreObject for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the RestoreObject method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the RestoreObjectRequest method. +// req, resp := client.RestoreObjectRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Request, output *RestoreObjectOutput) { + op := &request.Operation{ + Name: opRestoreObject, + HTTPMethod: "POST", + HTTPPath: "/{Bucket}/{Key+}?restore", + } + + if input == nil { + input = &RestoreObjectInput{} + } + + output = &RestoreObjectOutput{} + req = c.newRequest(op, input, output) + return +} + +// RestoreObject API operation for Amazon Simple Storage Service. +// +// Restores an archived copy of an object back into Amazon S3 +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation RestoreObject for usage and error information. +// +// Returned Error Codes: +// * ErrCodeObjectAlreadyInActiveTierError "ObjectAlreadyInActiveTierError" +// This operation is not allowed against this storage tier +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject +func (c *S3) RestoreObject(input *RestoreObjectInput) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + return out, req.Send() +} + +// RestoreObjectWithContext is the same as RestoreObject with the addition of +// the ability to pass a context and additional request options. +// +// See RestoreObject for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) RestoreObjectWithContext(ctx aws.Context, input *RestoreObjectInput, opts ...request.Option) (*RestoreObjectOutput, error) { + req, out := c.RestoreObjectRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPart = "UploadPart" + +// UploadPartRequest generates a "aws/request.Request" representing the +// client's request for the UploadPart operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UploadPart for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadPart method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadPartRequest method. +// req, resp := client.UploadPartRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPartRequest(input *UploadPartInput) (req *request.Request, output *UploadPartOutput) { + op := &request.Operation{ + Name: opUploadPart, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartInput{} + } + + output = &UploadPartOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPart API operation for Amazon Simple Storage Service. +// +// Uploads a part in a multipart upload. +// +// Note: After you initiate multipart upload and upload one or more parts, you +// must either complete or abort multipart upload in order to stop getting charged +// for storage of the uploaded parts. Only after you either complete or abort +// multipart upload, Amazon S3 frees up the parts storage and stops charging +// you for the parts storage. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPart for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart +func (c *S3) UploadPart(input *UploadPartInput) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + return out, req.Send() +} + +// UploadPartWithContext is the same as UploadPart with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPart for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartWithContext(ctx aws.Context, input *UploadPartInput, opts ...request.Option) (*UploadPartOutput, error) { + req, out := c.UploadPartRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUploadPartCopy = "UploadPartCopy" + +// UploadPartCopyRequest generates a "aws/request.Request" representing the +// client's request for the UploadPartCopy operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See UploadPartCopy for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the UploadPartCopy method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the UploadPartCopyRequest method. +// req, resp := client.UploadPartCopyRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopyRequest(input *UploadPartCopyInput) (req *request.Request, output *UploadPartCopyOutput) { + op := &request.Operation{ + Name: opUploadPartCopy, + HTTPMethod: "PUT", + HTTPPath: "/{Bucket}/{Key+}", + } + + if input == nil { + input = &UploadPartCopyInput{} + } + + output = &UploadPartCopyOutput{} + req = c.newRequest(op, input, output) + return +} + +// UploadPartCopy API operation for Amazon Simple Storage Service. +// +// Uploads a part by copying data from an existing object as data source. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon Simple Storage Service's +// API operation UploadPartCopy for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy +func (c *S3) UploadPartCopy(input *UploadPartCopyInput) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + return out, req.Send() +} + +// UploadPartCopyWithContext is the same as UploadPartCopy with the addition of +// the ability to pass a context and additional request options. +// +// See UploadPartCopy for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) UploadPartCopyWithContext(ctx aws.Context, input *UploadPartCopyInput, opts ...request.Option) (*UploadPartCopyOutput, error) { + req, out := c.UploadPartCopyRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Specifies the days since the initiation of an Incomplete Multipart Upload +// that Lifecycle will wait before permanently removing all parts of the upload. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortIncompleteMultipartUpload +type AbortIncompleteMultipartUpload struct { + _ struct{} `type:"structure"` + + // Indicates the number of days that must pass since initiation for Lifecycle + // to abort an Incomplete Multipart Upload. + DaysAfterInitiation *int64 `type:"integer"` +} + +// String returns the string representation +func (s AbortIncompleteMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortIncompleteMultipartUpload) GoString() string { + return s.String() +} + +// SetDaysAfterInitiation sets the DaysAfterInitiation field's value. +func (s *AbortIncompleteMultipartUpload) SetDaysAfterInitiation(v int64) *AbortIncompleteMultipartUpload { + s.DaysAfterInitiation = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadRequest +type AbortMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s AbortMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AbortMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AbortMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AbortMultipartUploadInput) SetBucket(v string) *AbortMultipartUploadInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *AbortMultipartUploadInput) SetKey(v string) *AbortMultipartUploadInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *AbortMultipartUploadInput) SetRequestPayer(v string) *AbortMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *AbortMultipartUploadInput) SetUploadId(v string) *AbortMultipartUploadInput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUploadOutput +type AbortMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s AbortMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AbortMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *AbortMultipartUploadOutput) SetRequestCharged(v string) *AbortMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccelerateConfiguration +type AccelerateConfiguration struct { + _ struct{} `type:"structure"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s AccelerateConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccelerateConfiguration) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *AccelerateConfiguration) SetStatus(v string) *AccelerateConfiguration { + s.Status = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlPolicy +type AccessControlPolicy struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s AccessControlPolicy) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AccessControlPolicy) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AccessControlPolicy) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AccessControlPolicy"} + if s.Grants != nil { + for i, v := range s.Grants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Grants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrants sets the Grants field's value. +func (s *AccessControlPolicy) SetGrants(v []*Grant) *AccessControlPolicy { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *AccessControlPolicy) SetOwner(v *Owner) *AccessControlPolicy { + s.Owner = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsAndOperator +type AnalyticsAndOperator struct { + _ struct{} `type:"structure"` + + // The prefix to use when evaluating an AND predicate. + Prefix *string `type:"string"` + + // The list of tags to use when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s AnalyticsAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsAndOperator) SetPrefix(v string) *AnalyticsAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *AnalyticsAndOperator) SetTags(v []*Tag) *AnalyticsAndOperator { + s.Tags = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsConfiguration +type AnalyticsConfiguration struct { + _ struct{} `type:"structure"` + + // The filter used to describe a set of objects for analyses. A filter must + // have exactly one prefix, one tag, or one conjunction (AnalyticsAndOperator). + // If no filter is provided, all objects will be considered in any analysis. + Filter *AnalyticsFilter `type:"structure"` + + // The identifier used to represent an analytics configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // If present, it indicates that data related to access patterns will be collected + // and made available to analyze the tradeoffs between different storage classes. + // + // StorageClassAnalysis is a required field + StorageClassAnalysis *StorageClassAnalysis `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AnalyticsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.StorageClassAnalysis == nil { + invalidParams.Add(request.NewErrParamRequired("StorageClassAnalysis")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.StorageClassAnalysis != nil { + if err := s.StorageClassAnalysis.Validate(); err != nil { + invalidParams.AddNested("StorageClassAnalysis", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *AnalyticsConfiguration) SetFilter(v *AnalyticsFilter) *AnalyticsConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *AnalyticsConfiguration) SetId(v string) *AnalyticsConfiguration { + s.Id = &v + return s +} + +// SetStorageClassAnalysis sets the StorageClassAnalysis field's value. +func (s *AnalyticsConfiguration) SetStorageClassAnalysis(v *StorageClassAnalysis) *AnalyticsConfiguration { + s.StorageClassAnalysis = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsExportDestination +type AnalyticsExportDestination struct { + _ struct{} `type:"structure"` + + // A destination signifying output to an S3 bucket. + // + // S3BucketDestination is a required field + S3BucketDestination *AnalyticsS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation +func (s AnalyticsExportDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsExportDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsExportDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsExportDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *AnalyticsExportDestination) SetS3BucketDestination(v *AnalyticsS3BucketDestination) *AnalyticsExportDestination { + s.S3BucketDestination = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsFilter +type AnalyticsFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating an + // analytics filter. The operator must have at least two predicates. + And *AnalyticsAndOperator `type:"structure"` + + // The prefix to use when evaluating an analytics filter. + Prefix *string `type:"string"` + + // The tag to use when evaluating an analytics filter. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s AnalyticsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *AnalyticsFilter) SetAnd(v *AnalyticsAndOperator) *AnalyticsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsFilter) SetPrefix(v string) *AnalyticsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *AnalyticsFilter) SetTag(v *Tag) *AnalyticsFilter { + s.Tag = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsS3BucketDestination +type AnalyticsS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The Amazon resource name (ARN) of the bucket to which data is exported. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // The account ID that owns the destination bucket. If no account ID is provided, + // the owner will not be validated prior to exporting data. + BucketAccountId *string `type:"string"` + + // The file format used when exporting data to Amazon S3. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"AnalyticsS3ExportFileFormat"` + + // The prefix to use when exporting data. The exported data begins with this + // prefix. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s AnalyticsS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AnalyticsS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AnalyticsS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AnalyticsS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *AnalyticsS3BucketDestination) SetBucket(v string) *AnalyticsS3BucketDestination { + s.Bucket = &v + return s +} + +// SetBucketAccountId sets the BucketAccountId field's value. +func (s *AnalyticsS3BucketDestination) SetBucketAccountId(v string) *AnalyticsS3BucketDestination { + s.BucketAccountId = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *AnalyticsS3BucketDestination) SetFormat(v string) *AnalyticsS3BucketDestination { + s.Format = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDestination { + s.Prefix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Bucket +type Bucket struct { + _ struct{} `type:"structure"` + + // Date the bucket was created. + CreationDate *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The name of the bucket. + Name *string `type:"string"` +} + +// String returns the string representation +func (s Bucket) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Bucket) GoString() string { + return s.String() +} + +// SetCreationDate sets the CreationDate field's value. +func (s *Bucket) SetCreationDate(v time.Time) *Bucket { + s.CreationDate = &v + return s +} + +// SetName sets the Name field's value. +func (s *Bucket) SetName(v string) *Bucket { + s.Name = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLifecycleConfiguration +type BucketLifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // Rules is a required field + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s BucketLifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *BucketLifecycleConfiguration) SetRules(v []*LifecycleRule) *BucketLifecycleConfiguration { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLoggingStatus +type BucketLoggingStatus struct { + _ struct{} `type:"structure"` + + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s BucketLoggingStatus) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BucketLoggingStatus) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BucketLoggingStatus) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BucketLoggingStatus"} + if s.LoggingEnabled != nil { + if err := s.LoggingEnabled.Validate(); err != nil { + invalidParams.AddNested("LoggingEnabled", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *BucketLoggingStatus) SetLoggingEnabled(v *LoggingEnabled) *BucketLoggingStatus { + s.LoggingEnabled = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSConfiguration +type CORSConfiguration struct { + _ struct{} `type:"structure"` + + // CORSRules is a required field + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s CORSConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSConfiguration"} + if s.CORSRules == nil { + invalidParams.Add(request.NewErrParamRequired("CORSRules")) + } + if s.CORSRules != nil { + for i, v := range s.CORSRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "CORSRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCORSRules sets the CORSRules field's value. +func (s *CORSConfiguration) SetCORSRules(v []*CORSRule) *CORSConfiguration { + s.CORSRules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSRule +type CORSRule struct { + _ struct{} `type:"structure"` + + // Specifies which headers are allowed in a pre-flight OPTIONS request. + AllowedHeaders []*string `locationName:"AllowedHeader" type:"list" flattened:"true"` + + // Identifies HTTP methods that the domain/origin specified in the rule is allowed + // to execute. + // + // AllowedMethods is a required field + AllowedMethods []*string `locationName:"AllowedMethod" type:"list" flattened:"true" required:"true"` + + // One or more origins you want customers to be able to access the bucket from. + // + // AllowedOrigins is a required field + AllowedOrigins []*string `locationName:"AllowedOrigin" type:"list" flattened:"true" required:"true"` + + // One or more headers in the response that you want customers to be able to + // access from their applications (for example, from a JavaScript XMLHttpRequest + // object). + ExposeHeaders []*string `locationName:"ExposeHeader" type:"list" flattened:"true"` + + // The time in seconds that your browser is to cache the preflight response + // for the specified resource. + MaxAgeSeconds *int64 `type:"integer"` +} + +// String returns the string representation +func (s CORSRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CORSRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CORSRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CORSRule"} + if s.AllowedMethods == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedMethods")) + } + if s.AllowedOrigins == nil { + invalidParams.Add(request.NewErrParamRequired("AllowedOrigins")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAllowedHeaders sets the AllowedHeaders field's value. +func (s *CORSRule) SetAllowedHeaders(v []*string) *CORSRule { + s.AllowedHeaders = v + return s +} + +// SetAllowedMethods sets the AllowedMethods field's value. +func (s *CORSRule) SetAllowedMethods(v []*string) *CORSRule { + s.AllowedMethods = v + return s +} + +// SetAllowedOrigins sets the AllowedOrigins field's value. +func (s *CORSRule) SetAllowedOrigins(v []*string) *CORSRule { + s.AllowedOrigins = v + return s +} + +// SetExposeHeaders sets the ExposeHeaders field's value. +func (s *CORSRule) SetExposeHeaders(v []*string) *CORSRule { + s.ExposeHeaders = v + return s +} + +// SetMaxAgeSeconds sets the MaxAgeSeconds field's value. +func (s *CORSRule) SetMaxAgeSeconds(v int64) *CORSRule { + s.MaxAgeSeconds = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CloudFunctionConfiguration +type CloudFunctionConfiguration struct { + _ struct{} `type:"structure"` + + CloudFunction *string `type:"string"` + + // Bucket event for which to send notifications. + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + InvocationRole *string `type:"string"` +} + +// String returns the string representation +func (s CloudFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CloudFunctionConfiguration) GoString() string { + return s.String() +} + +// SetCloudFunction sets the CloudFunction field's value. +func (s *CloudFunctionConfiguration) SetCloudFunction(v string) *CloudFunctionConfiguration { + s.CloudFunction = &v + return s +} + +// SetEvent sets the Event field's value. +func (s *CloudFunctionConfiguration) SetEvent(v string) *CloudFunctionConfiguration { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *CloudFunctionConfiguration) SetEvents(v []*string) *CloudFunctionConfiguration { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *CloudFunctionConfiguration) SetId(v string) *CloudFunctionConfiguration { + s.Id = &v + return s +} + +// SetInvocationRole sets the InvocationRole field's value. +func (s *CloudFunctionConfiguration) SetInvocationRole(v string) *CloudFunctionConfiguration { + s.InvocationRole = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CommonPrefix +type CommonPrefix struct { + _ struct{} `type:"structure"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s CommonPrefix) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CommonPrefix) GoString() string { + return s.String() +} + +// SetPrefix sets the Prefix field's value. +func (s *CommonPrefix) SetPrefix(v string) *CommonPrefix { + s.Prefix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadRequest +type CompleteMultipartUploadInput struct { + _ struct{} `type:"structure" payload:"MultipartUpload"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s CompleteMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CompleteMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CompleteMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadInput) SetBucket(v string) *CompleteMultipartUploadInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadInput) SetKey(v string) *CompleteMultipartUploadInput { + s.Key = &v + return s +} + +// SetMultipartUpload sets the MultipartUpload field's value. +func (s *CompleteMultipartUploadInput) SetMultipartUpload(v *CompletedMultipartUpload) *CompleteMultipartUploadInput { + s.MultipartUpload = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CompleteMultipartUploadInput) SetRequestPayer(v string) *CompleteMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CompleteMultipartUploadInput) SetUploadId(v string) *CompleteMultipartUploadInput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUploadOutput +type CompleteMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + Bucket *string `type:"string"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + Key *string `min:"1" type:"string"` + + Location *string `type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CompleteMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompleteMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *CompleteMultipartUploadOutput) SetBucket(v string) *CompleteMultipartUploadOutput { + s.Bucket = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CompleteMultipartUploadOutput) SetExpiration(v string) *CompleteMultipartUploadOutput { + s.Expiration = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CompleteMultipartUploadOutput) SetKey(v string) *CompleteMultipartUploadOutput { + s.Key = &v + return s +} + +// SetLocation sets the Location field's value. +func (s *CompleteMultipartUploadOutput) SetLocation(v string) *CompleteMultipartUploadOutput { + s.Location = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CompleteMultipartUploadOutput) SetRequestCharged(v string) *CompleteMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CompleteMultipartUploadOutput) SetSSEKMSKeyId(v string) *CompleteMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CompleteMultipartUploadOutput) SetServerSideEncryption(v string) *CompleteMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CompleteMultipartUploadOutput) SetVersionId(v string) *CompleteMultipartUploadOutput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedMultipartUpload +type CompletedMultipartUpload struct { + _ struct{} `type:"structure"` + + Parts []*CompletedPart `locationName:"Part" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s CompletedMultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedMultipartUpload) GoString() string { + return s.String() +} + +// SetParts sets the Parts field's value. +func (s *CompletedMultipartUpload) SetParts(v []*CompletedPart) *CompletedMultipartUpload { + s.Parts = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedPart +type CompletedPart struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Part number that identifies the part. This is a positive integer between + // 1 and 10,000. + PartNumber *int64 `type:"integer"` +} + +// String returns the string representation +func (s CompletedPart) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CompletedPart) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CompletedPart) SetETag(v string) *CompletedPart { + s.ETag = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *CompletedPart) SetPartNumber(v int64) *CompletedPart { + s.PartNumber = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Condition +type Condition struct { + _ struct{} `type:"structure"` + + // The HTTP error code when the redirect is applied. In the event of an error, + // if the error code equals this value, then the specified redirect is applied. + // Required when parent element Condition is specified and sibling KeyPrefixEquals + // is not specified. If both are specified, then both must be true for the redirect + // to be applied. + HttpErrorCodeReturnedEquals *string `type:"string"` + + // The object key name prefix when the redirect is applied. For example, to + // redirect requests for ExamplePage.html, the key prefix will be ExamplePage.html. + // To redirect request for all pages with the prefix docs/, the key prefix will + // be /docs, which identifies all objects in the docs/ folder. Required when + // the parent element Condition is specified and sibling HttpErrorCodeReturnedEquals + // is not specified. If both conditions are specified, both must be true for + // the redirect to be applied. + KeyPrefixEquals *string `type:"string"` +} + +// String returns the string representation +func (s Condition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Condition) GoString() string { + return s.String() +} + +// SetHttpErrorCodeReturnedEquals sets the HttpErrorCodeReturnedEquals field's value. +func (s *Condition) SetHttpErrorCodeReturnedEquals(v string) *Condition { + s.HttpErrorCodeReturnedEquals = &v + return s +} + +// SetKeyPrefixEquals sets the KeyPrefixEquals field's value. +func (s *Condition) SetKeyPrefixEquals(v string) *Condition { + s.KeyPrefixEquals = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectRequest +type CopyObjectInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Specifies whether the metadata is copied from the source object or replaced + // with metadata provided in the request. + MetadataDirective *string `location:"header" locationName:"x-amz-metadata-directive" type:"string" enum:"MetadataDirective"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object destination object this value must be used in + // conjunction with the TaggingDirective. The tag-set must be encoded as URL + // Query parameters + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // Specifies whether the object tag-set are copied from the source object or + // replaced with tag-set provided in the request. + TaggingDirective *string `location:"header" locationName:"x-amz-tagging-directive" type:"string" enum:"TaggingDirective"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CopyObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CopyObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CopyObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CopyObjectInput) SetACL(v string) *CopyObjectInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CopyObjectInput) SetBucket(v string) *CopyObjectInput { + s.Bucket = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CopyObjectInput) SetContentDisposition(v string) *CopyObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CopyObjectInput) SetContentEncoding(v string) *CopyObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CopyObjectInput) SetContentLanguage(v string) *CopyObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CopyObjectInput) SetContentType(v string) *CopyObjectInput { + s.ContentType = &v + return s +} + +// SetCopySource sets the CopySource field's value. +func (s *CopyObjectInput) SetCopySource(v string) *CopyObjectInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfMatch(v string) *CopyObjectInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfModifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *CopyObjectInput) SetCopySourceIfNoneMatch(v string) *CopyObjectInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *CopyObjectInput) SetCopySourceIfUnmodifiedSince(v time.Time) *CopyObjectInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerAlgorithm(v string) *CopyObjectInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKey(v string) *CopyObjectInput { + s.CopySourceSSECustomerKey = &v + return s +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetCopySourceSSECustomerKeyMD5(v string) *CopyObjectInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CopyObjectInput) SetExpires(v time.Time) *CopyObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CopyObjectInput) SetGrantFullControl(v string) *CopyObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CopyObjectInput) SetGrantRead(v string) *CopyObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CopyObjectInput) SetGrantReadACP(v string) *CopyObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CopyObjectInput) SetGrantWriteACP(v string) *CopyObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CopyObjectInput) SetKey(v string) *CopyObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CopyObjectInput) SetMetadata(v map[string]*string) *CopyObjectInput { + s.Metadata = v + return s +} + +// SetMetadataDirective sets the MetadataDirective field's value. +func (s *CopyObjectInput) SetMetadataDirective(v string) *CopyObjectInput { + s.MetadataDirective = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CopyObjectInput) SetRequestPayer(v string) *CopyObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectInput) SetSSECustomerAlgorithm(v string) *CopyObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CopyObjectInput) SetSSECustomerKey(v string) *CopyObjectInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectInput) SetSSECustomerKeyMD5(v string) *CopyObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectInput) SetSSEKMSKeyId(v string) *CopyObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectInput) SetServerSideEncryption(v string) *CopyObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CopyObjectInput) SetStorageClass(v string) *CopyObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *CopyObjectInput) SetTagging(v string) *CopyObjectInput { + s.Tagging = &v + return s +} + +// SetTaggingDirective sets the TaggingDirective field's value. +func (s *CopyObjectInput) SetTaggingDirective(v string) *CopyObjectInput { + s.TaggingDirective = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CopyObjectInput) SetWebsiteRedirectLocation(v string) *CopyObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectOutput +type CopyObjectOutput struct { + _ struct{} `type:"structure" payload:"CopyObjectResult"` + + CopyObjectResult *CopyObjectResult `type:"structure"` + + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If the object expiration is configured, the response includes this header. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version ID of the newly created copy. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s CopyObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectOutput) GoString() string { + return s.String() +} + +// SetCopyObjectResult sets the CopyObjectResult field's value. +func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput { + s.CopyObjectResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *CopyObjectOutput) SetCopySourceVersionId(v string) *CopyObjectOutput { + s.CopySourceVersionId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *CopyObjectOutput) SetExpiration(v string) *CopyObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CopyObjectOutput) SetRequestCharged(v string) *CopyObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CopyObjectOutput) SetSSECustomerAlgorithm(v string) *CopyObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CopyObjectOutput) SetSSECustomerKeyMD5(v string) *CopyObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CopyObjectOutput) SetSSEKMSKeyId(v string) *CopyObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CopyObjectOutput) SetServerSideEncryption(v string) *CopyObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *CopyObjectOutput) SetVersionId(v string) *CopyObjectOutput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectResult +type CopyObjectResult struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CopyObjectResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyObjectResult) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CopyObjectResult) SetETag(v string) *CopyObjectResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyObjectResult) SetLastModified(v time.Time) *CopyObjectResult { + s.LastModified = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyPartResult +type CopyPartResult struct { + _ struct{} `type:"structure"` + + // Entity tag of the object. + ETag *string `type:"string"` + + // Date and time at which the object was uploaded. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s CopyPartResult) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CopyPartResult) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *CopyPartResult) SetETag(v string) *CopyPartResult { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *CopyPartResult) SetLastModified(v time.Time) *CopyPartResult { + s.LastModified = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketConfiguration +type CreateBucketConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies the region where the bucket will be created. If you don't specify + // a region, the bucket will be created in US Standard. + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s CreateBucketConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketConfiguration) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *CreateBucketConfiguration) SetLocationConstraint(v string) *CreateBucketConfiguration { + s.LocationConstraint = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketRequest +type CreateBucketInput struct { + _ struct{} `type:"structure" payload:"CreateBucketConfiguration"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s CreateBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateBucketInput) SetACL(v string) *CreateBucketInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateBucketInput) SetBucket(v string) *CreateBucketInput { + s.Bucket = &v + return s +} + +// SetCreateBucketConfiguration sets the CreateBucketConfiguration field's value. +func (s *CreateBucketInput) SetCreateBucketConfiguration(v *CreateBucketConfiguration) *CreateBucketInput { + s.CreateBucketConfiguration = v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateBucketInput) SetGrantFullControl(v string) *CreateBucketInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateBucketInput) SetGrantRead(v string) *CreateBucketInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateBucketInput) SetGrantReadACP(v string) *CreateBucketInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *CreateBucketInput) SetGrantWrite(v string) *CreateBucketInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateBucketInput) SetGrantWriteACP(v string) *CreateBucketInput { + s.GrantWriteACP = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketOutput +type CreateBucketOutput struct { + _ struct{} `type:"structure"` + + Location *string `location:"header" locationName:"Location" type:"string"` +} + +// String returns the string representation +func (s CreateBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateBucketOutput) GoString() string { + return s.String() +} + +// SetLocation sets the Location field's value. +func (s *CreateBucketOutput) SetLocation(v string) *CreateBucketOutput { + s.Location = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadRequest +type CreateMultipartUploadInput struct { + _ struct{} `type:"structure"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *CreateMultipartUploadInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "CreateMultipartUploadInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *CreateMultipartUploadInput) SetACL(v string) *CreateMultipartUploadInput { + s.ACL = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadInput) SetBucket(v string) *CreateMultipartUploadInput { + s.Bucket = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *CreateMultipartUploadInput) SetContentDisposition(v string) *CreateMultipartUploadInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *CreateMultipartUploadInput) SetContentEncoding(v string) *CreateMultipartUploadInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *CreateMultipartUploadInput) SetContentLanguage(v string) *CreateMultipartUploadInput { + s.ContentLanguage = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *CreateMultipartUploadInput) SetContentType(v string) *CreateMultipartUploadInput { + s.ContentType = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *CreateMultipartUploadInput) SetExpires(v time.Time) *CreateMultipartUploadInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *CreateMultipartUploadInput) SetGrantFullControl(v string) *CreateMultipartUploadInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *CreateMultipartUploadInput) SetGrantRead(v string) *CreateMultipartUploadInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *CreateMultipartUploadInput) SetGrantReadACP(v string) *CreateMultipartUploadInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *CreateMultipartUploadInput) SetGrantWriteACP(v string) *CreateMultipartUploadInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadInput) SetKey(v string) *CreateMultipartUploadInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *CreateMultipartUploadInput) SetMetadata(v map[string]*string) *CreateMultipartUploadInput { + s.Metadata = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *CreateMultipartUploadInput) SetRequestPayer(v string) *CreateMultipartUploadInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKey(v string) *CreateMultipartUploadInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadInput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadInput) SetSSEKMSKeyId(v string) *CreateMultipartUploadInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadInput) SetServerSideEncryption(v string) *CreateMultipartUploadInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *CreateMultipartUploadInput) SetStorageClass(v string) *CreateMultipartUploadInput { + s.StorageClass = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *CreateMultipartUploadInput) SetWebsiteRedirectLocation(v string) *CreateMultipartUploadInput { + s.WebsiteRedirectLocation = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUploadOutput +type CreateMultipartUploadOutput struct { + _ struct{} `type:"structure"` + + // Date when multipart upload will become eligible for abort operation by lifecycle. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` + + // Id of the lifecycle rule that makes a multipart upload eligible for abort + // operation. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `locationName:"Bucket" type:"string"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // ID for the initiated multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s CreateMultipartUploadOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateMultipartUploadOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *CreateMultipartUploadOutput) SetAbortDate(v time.Time) *CreateMultipartUploadOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *CreateMultipartUploadOutput) SetAbortRuleId(v string) *CreateMultipartUploadOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *CreateMultipartUploadOutput) SetBucket(v string) *CreateMultipartUploadOutput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { + s.Key = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *CreateMultipartUploadOutput) SetRequestCharged(v string) *CreateMultipartUploadOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerAlgorithm(v string) *CreateMultipartUploadOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *CreateMultipartUploadOutput) SetSSECustomerKeyMD5(v string) *CreateMultipartUploadOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *CreateMultipartUploadOutput) SetSSEKMSKeyId(v string) *CreateMultipartUploadOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *CreateMultipartUploadOutput) SetServerSideEncryption(v string) *CreateMultipartUploadOutput { + s.ServerSideEncryption = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *CreateMultipartUploadOutput) SetUploadId(v string) *CreateMultipartUploadOutput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Delete +type Delete struct { + _ struct{} `type:"structure"` + + // Objects is a required field + Objects []*ObjectIdentifier `locationName:"Object" type:"list" flattened:"true" required:"true"` + + // Element to enable quiet mode for the request. When you add this element, + // you must set its value to true. + Quiet *bool `type:"boolean"` +} + +// String returns the string representation +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Delete) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Delete) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Delete"} + if s.Objects == nil { + invalidParams.Add(request.NewErrParamRequired("Objects")) + } + if s.Objects != nil { + for i, v := range s.Objects { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Objects", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetObjects sets the Objects field's value. +func (s *Delete) SetObjects(v []*ObjectIdentifier) *Delete { + s.Objects = v + return s +} + +// SetQuiet sets the Quiet field's value. +func (s *Delete) SetQuiet(v bool) *Delete { + s.Quiet = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationRequest +type DeleteBucketAnalyticsConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket from which an analytics configuration is deleted. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The identifier used to represent an analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetBucket(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketAnalyticsConfigurationInput) SetId(v string) *DeleteBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfigurationOutput +type DeleteBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsRequest +type DeleteBucketCorsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketCorsInput) SetBucket(v string) *DeleteBucketCorsInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCorsOutput +type DeleteBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketCorsOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketRequest +type DeleteBucketInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInput) SetBucket(v string) *DeleteBucketInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationRequest +type DeleteBucketInventoryConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the inventory configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetBucket(v string) *DeleteBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketInventoryConfigurationInput) SetId(v string) *DeleteBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfigurationOutput +type DeleteBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleRequest +type DeleteBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketLifecycleInput) SetBucket(v string) *DeleteBucketLifecycleInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycleOutput +type DeleteBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketLifecycleOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationRequest +type DeleteBucketMetricsConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the metrics configuration to delete. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetBucket(v string) *DeleteBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *DeleteBucketMetricsConfigurationInput) SetId(v string) *DeleteBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfigurationOutput +type DeleteBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOutput +type DeleteBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyRequest +type DeleteBucketPolicyInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketPolicyInput) SetBucket(v string) *DeleteBucketPolicyInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicyOutput +type DeleteBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketPolicyOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationRequest +type DeleteBucketReplicationInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketReplicationInput) SetBucket(v string) *DeleteBucketReplicationInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplicationOutput +type DeleteBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketReplicationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingRequest +type DeleteBucketTaggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketTaggingInput) SetBucket(v string) *DeleteBucketTaggingInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTaggingOutput +type DeleteBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketTaggingOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteRequest +type DeleteBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteBucketWebsiteInput) SetBucket(v string) *DeleteBucketWebsiteInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsiteOutput +type DeleteBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DeleteBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBucketWebsiteOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerEntry +type DeleteMarkerEntry struct { + _ struct{} `type:"structure"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeleteMarkerEntry) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteMarkerEntry) GoString() string { + return s.String() +} + +// SetIsLatest sets the IsLatest field's value. +func (s *DeleteMarkerEntry) SetIsLatest(v bool) *DeleteMarkerEntry { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteMarkerEntry) SetKey(v string) *DeleteMarkerEntry { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *DeleteMarkerEntry) SetLastModified(v time.Time) *DeleteMarkerEntry { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *DeleteMarkerEntry) SetOwner(v *Owner) *DeleteMarkerEntry { + s.Owner = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteMarkerEntry) SetVersionId(v string) *DeleteMarkerEntry { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectRequest +type DeleteObjectInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectInput) SetBucket(v string) *DeleteObjectInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectInput) SetKey(v string) *DeleteObjectInput { + s.Key = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectInput) SetMFA(v string) *DeleteObjectInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectInput) SetRequestPayer(v string) *DeleteObjectInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectInput) SetVersionId(v string) *DeleteObjectInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectOutput +type DeleteObjectOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether the versioned object that was permanently deleted was (true) + // or was not (false) a delete marker. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Returns the version ID of the delete marker created as a result of the DELETE + // operation. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectOutput) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeleteObjectOutput) SetDeleteMarker(v bool) *DeleteObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectOutput) SetRequestCharged(v string) *DeleteObjectOutput { + s.RequestCharged = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectOutput) SetVersionId(v string) *DeleteObjectOutput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingRequest +type DeleteObjectTaggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // The versionId of the object that the tag-set will be removed from. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectTaggingInput) SetBucket(v string) *DeleteObjectTaggingInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeleteObjectTaggingInput) SetKey(v string) *DeleteObjectTaggingInput { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingInput) SetVersionId(v string) *DeleteObjectTaggingInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTaggingOutput +type DeleteObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // The versionId of the object the tag-set was removed from. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s DeleteObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *DeleteObjectTaggingOutput) SetVersionId(v string) *DeleteObjectTaggingOutput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsRequest +type DeleteObjectsInput struct { + _ struct{} `type:"structure" payload:"Delete"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Delete is a required field + Delete *Delete `locationName:"Delete" type:"structure" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s DeleteObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Delete == nil { + invalidParams.Add(request.NewErrParamRequired("Delete")) + } + if s.Delete != nil { + if err := s.Delete.Validate(); err != nil { + invalidParams.AddNested("Delete", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *DeleteObjectsInput) SetBucket(v string) *DeleteObjectsInput { + s.Bucket = &v + return s +} + +// SetDelete sets the Delete field's value. +func (s *DeleteObjectsInput) SetDelete(v *Delete) *DeleteObjectsInput { + s.Delete = v + return s +} + +// SetMFA sets the MFA field's value. +func (s *DeleteObjectsInput) SetMFA(v string) *DeleteObjectsInput { + s.MFA = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *DeleteObjectsInput) SetRequestPayer(v string) *DeleteObjectsInput { + s.RequestPayer = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectsOutput +type DeleteObjectsOutput struct { + _ struct{} `type:"structure"` + + Deleted []*DeletedObject `type:"list" flattened:"true"` + + Errors []*Error `locationName:"Error" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s DeleteObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteObjectsOutput) GoString() string { + return s.String() +} + +// SetDeleted sets the Deleted field's value. +func (s *DeleteObjectsOutput) SetDeleted(v []*DeletedObject) *DeleteObjectsOutput { + s.Deleted = v + return s +} + +// SetErrors sets the Errors field's value. +func (s *DeleteObjectsOutput) SetErrors(v []*Error) *DeleteObjectsOutput { + s.Errors = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *DeleteObjectsOutput) SetRequestCharged(v string) *DeleteObjectsOutput { + s.RequestCharged = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletedObject +type DeletedObject struct { + _ struct{} `type:"structure"` + + DeleteMarker *bool `type:"boolean"` + + DeleteMarkerVersionId *string `type:"string"` + + Key *string `min:"1" type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s DeletedObject) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeletedObject) GoString() string { + return s.String() +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *DeletedObject) SetDeleteMarker(v bool) *DeletedObject { + s.DeleteMarker = &v + return s +} + +// SetDeleteMarkerVersionId sets the DeleteMarkerVersionId field's value. +func (s *DeletedObject) SetDeleteMarkerVersionId(v string) *DeletedObject { + s.DeleteMarkerVersionId = &v + return s +} + +// SetKey sets the Key field's value. +func (s *DeletedObject) SetKey(v string) *DeletedObject { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *DeletedObject) SetVersionId(v string) *DeletedObject { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Destination +type Destination struct { + _ struct{} `type:"structure"` + + // Amazon resource name (ARN) of the bucket where you want Amazon S3 to store + // replicas of the object identified by the rule. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` +} + +// String returns the string representation +func (s Destination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Destination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Destination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Destination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *Destination) SetBucket(v string) *Destination { + s.Bucket = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Destination) SetStorageClass(v string) *Destination { + s.StorageClass = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Error +type Error struct { + _ struct{} `type:"structure"` + + Code *string `type:"string"` + + Key *string `min:"1" type:"string"` + + Message *string `type:"string"` + + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s Error) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Error) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *Error) SetCode(v string) *Error { + s.Code = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Error) SetKey(v string) *Error { + s.Key = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *Error) SetMessage(v string) *Error { + s.Message = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *Error) SetVersionId(v string) *Error { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ErrorDocument +type ErrorDocument struct { + _ struct{} `type:"structure"` + + // The object key name to use when a 4XX class error occurs. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ErrorDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ErrorDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ErrorDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ErrorDocument"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ErrorDocument) SetKey(v string) *ErrorDocument { + s.Key = &v + return s +} + +// Container for key value pair that defines the criteria for the filter rule. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/FilterRule +type FilterRule struct { + _ struct{} `type:"structure"` + + // Object key name prefix or suffix identifying one or more objects to which + // the filtering rule applies. Maximum prefix length can be up to 1,024 characters. + // Overlapping prefixes and suffixes are not supported. For more information, + // go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + Name *string `type:"string" enum:"FilterRuleName"` + + Value *string `type:"string"` +} + +// String returns the string representation +func (s FilterRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FilterRule) GoString() string { + return s.String() +} + +// SetName sets the Name field's value. +func (s *FilterRule) SetName(v string) *FilterRule { + s.Name = &v + return s +} + +// SetValue sets the Value field's value. +func (s *FilterRule) SetValue(v string) *FilterRule { + s.Value = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationRequest +type GetBucketAccelerateConfigurationInput struct { + _ struct{} `type:"structure"` + + // Name of the bucket for which the accelerate configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAccelerateConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAccelerateConfigurationInput) SetBucket(v string) *GetBucketAccelerateConfigurationInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfigurationOutput +type GetBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` + + // The accelerate configuration of the bucket. + Status *string `type:"string" enum:"BucketAccelerateStatus"` +} + +// String returns the string representation +func (s GetBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +// SetStatus sets the Status field's value. +func (s *GetBucketAccelerateConfigurationOutput) SetStatus(v string) *GetBucketAccelerateConfigurationOutput { + s.Status = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclRequest +type GetBucketAclInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAclInput) SetBucket(v string) *GetBucketAclInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAclOutput +type GetBucketAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetBucketAclOutput) SetGrants(v []*Grant) *GetBucketAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetBucketAclOutput) SetOwner(v *Owner) *GetBucketAclOutput { + s.Owner = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationRequest +type GetBucketAnalyticsConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket from which an analytics configuration is retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The identifier used to represent an analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketAnalyticsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetBucket(v string) *GetBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketAnalyticsConfigurationInput) SetId(v string) *GetBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfigurationOutput +type GetBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` + + // The configuration and any analyses for the analytics filter. + AnalyticsConfiguration *AnalyticsConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *GetBucketAnalyticsConfigurationOutput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *GetBucketAnalyticsConfigurationOutput { + s.AnalyticsConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsRequest +type GetBucketCorsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketCorsInput) SetBucket(v string) *GetBucketCorsInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCorsOutput +type GetBucketCorsOutput struct { + _ struct{} `type:"structure"` + + CORSRules []*CORSRule `locationName:"CORSRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketCorsOutput) GoString() string { + return s.String() +} + +// SetCORSRules sets the CORSRules field's value. +func (s *GetBucketCorsOutput) SetCORSRules(v []*CORSRule) *GetBucketCorsOutput { + s.CORSRules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationRequest +type GetBucketInventoryConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the inventory configuration to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketInventoryConfigurationInput) SetBucket(v string) *GetBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketInventoryConfigurationInput) SetId(v string) *GetBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfigurationOutput +type GetBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure" payload:"InventoryConfiguration"` + + // Specifies the inventory configuration. + InventoryConfiguration *InventoryConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *GetBucketInventoryConfigurationOutput) SetInventoryConfiguration(v *InventoryConfiguration) *GetBucketInventoryConfigurationOutput { + s.InventoryConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationRequest +type GetBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleConfigurationInput) SetBucket(v string) *GetBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfigurationOutput +type GetBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` + + Rules []*LifecycleRule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleConfigurationOutput) SetRules(v []*LifecycleRule) *GetBucketLifecycleConfigurationOutput { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleRequest +type GetBucketLifecycleInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLifecycleInput) SetBucket(v string) *GetBucketLifecycleInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleOutput +type GetBucketLifecycleOutput struct { + _ struct{} `type:"structure"` + + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s GetBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLifecycleOutput) GoString() string { + return s.String() +} + +// SetRules sets the Rules field's value. +func (s *GetBucketLifecycleOutput) SetRules(v []*Rule) *GetBucketLifecycleOutput { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationRequest +type GetBucketLocationInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLocationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLocationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLocationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLocationInput) SetBucket(v string) *GetBucketLocationInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocationOutput +type GetBucketLocationOutput struct { + _ struct{} `type:"structure"` + + LocationConstraint *string `type:"string" enum:"BucketLocationConstraint"` +} + +// String returns the string representation +func (s GetBucketLocationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLocationOutput) GoString() string { + return s.String() +} + +// SetLocationConstraint sets the LocationConstraint field's value. +func (s *GetBucketLocationOutput) SetLocationConstraint(v string) *GetBucketLocationOutput { + s.LocationConstraint = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingRequest +type GetBucketLoggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketLoggingInput) SetBucket(v string) *GetBucketLoggingInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLoggingOutput +type GetBucketLoggingOutput struct { + _ struct{} `type:"structure"` + + LoggingEnabled *LoggingEnabled `type:"structure"` +} + +// String returns the string representation +func (s GetBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketLoggingOutput) GoString() string { + return s.String() +} + +// SetLoggingEnabled sets the LoggingEnabled field's value. +func (s *GetBucketLoggingOutput) SetLoggingEnabled(v *LoggingEnabled) *GetBucketLoggingOutput { + s.LoggingEnabled = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationRequest +type GetBucketMetricsConfigurationInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the metrics configuration to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketMetricsConfigurationInput) SetBucket(v string) *GetBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *GetBucketMetricsConfigurationInput) SetId(v string) *GetBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfigurationOutput +type GetBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure" payload:"MetricsConfiguration"` + + // Specifies the metrics configuration. + MetricsConfiguration *MetricsConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *GetBucketMetricsConfigurationOutput) SetMetricsConfiguration(v *MetricsConfiguration) *GetBucketMetricsConfigurationOutput { + s.MetricsConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfigurationRequest +type GetBucketNotificationConfigurationRequest struct { + _ struct{} `type:"structure"` + + // Name of the bucket to get the notification configuration for. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketNotificationConfigurationRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketNotificationConfigurationRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketNotificationConfigurationRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketNotificationConfigurationRequest"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketNotificationConfigurationRequest) SetBucket(v string) *GetBucketNotificationConfigurationRequest { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyRequest +type GetBucketPolicyInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketPolicyInput) SetBucket(v string) *GetBucketPolicyInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyOutput +type GetBucketPolicyOutput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // The bucket policy as a JSON document. + Policy *string `type:"string"` +} + +// String returns the string representation +func (s GetBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketPolicyOutput) GoString() string { + return s.String() +} + +// SetPolicy sets the Policy field's value. +func (s *GetBucketPolicyOutput) SetPolicy(v string) *GetBucketPolicyOutput { + s.Policy = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationRequest +type GetBucketReplicationInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketReplicationInput) SetBucket(v string) *GetBucketReplicationInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplicationOutput +type GetBucketReplicationOutput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + ReplicationConfiguration *ReplicationConfiguration `type:"structure"` +} + +// String returns the string representation +func (s GetBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketReplicationOutput) GoString() string { + return s.String() +} + +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *GetBucketReplicationOutput) SetReplicationConfiguration(v *ReplicationConfiguration) *GetBucketReplicationOutput { + s.ReplicationConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentRequest +type GetBucketRequestPaymentInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketRequestPaymentInput) SetBucket(v string) *GetBucketRequestPaymentInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPaymentOutput +type GetBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + Payer *string `type:"string" enum:"Payer"` +} + +// String returns the string representation +func (s GetBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +// SetPayer sets the Payer field's value. +func (s *GetBucketRequestPaymentOutput) SetPayer(v string) *GetBucketRequestPaymentOutput { + s.Payer = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingRequest +type GetBucketTaggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketTaggingInput) SetBucket(v string) *GetBucketTaggingInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTaggingOutput +type GetBucketTaggingOutput struct { + _ struct{} `type:"structure"` + + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s GetBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetBucketTaggingOutput) SetTagSet(v []*Tag) *GetBucketTaggingOutput { + s.TagSet = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningRequest +type GetBucketVersioningInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketVersioningInput) SetBucket(v string) *GetBucketVersioningInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioningOutput +type GetBucketVersioningOutput struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADeleteStatus"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s GetBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketVersioningOutput) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *GetBucketVersioningOutput) SetMFADelete(v string) *GetBucketVersioningOutput { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *GetBucketVersioningOutput) SetStatus(v string) *GetBucketVersioningOutput { + s.Status = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteRequest +type GetBucketWebsiteInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s GetBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetBucketWebsiteInput) SetBucket(v string) *GetBucketWebsiteInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsiteOutput +type GetBucketWebsiteOutput struct { + _ struct{} `type:"structure"` + + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s GetBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetBucketWebsiteOutput) GoString() string { + return s.String() +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *GetBucketWebsiteOutput) SetErrorDocument(v *ErrorDocument) *GetBucketWebsiteOutput { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *GetBucketWebsiteOutput) SetIndexDocument(v *IndexDocument) *GetBucketWebsiteOutput { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *GetBucketWebsiteOutput) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *GetBucketWebsiteOutput { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *GetBucketWebsiteOutput) SetRoutingRules(v []*RoutingRule) *GetBucketWebsiteOutput { + s.RoutingRules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclRequest +type GetObjectAclInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectAclInput) SetBucket(v string) *GetObjectAclInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectAclInput) SetKey(v string) *GetObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectAclInput) SetRequestPayer(v string) *GetObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectAclInput) SetVersionId(v string) *GetObjectAclInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAclOutput +type GetObjectAclOutput struct { + _ struct{} `type:"structure"` + + // A list of grants. + Grants []*Grant `locationName:"AccessControlList" locationNameList:"Grant" type:"list"` + + Owner *Owner `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectAclOutput) GoString() string { + return s.String() +} + +// SetGrants sets the Grants field's value. +func (s *GetObjectAclOutput) SetGrants(v []*Grant) *GetObjectAclOutput { + s.Grants = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *GetObjectAclOutput) SetOwner(v *Owner) *GetObjectAclOutput { + s.Owner = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectAclOutput) SetRequestCharged(v string) *GetObjectAclOutput { + s.RequestCharged = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRequest +type GetObjectInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' GET request for the part specified. + // Useful for downloading just a part of an object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Sets the Cache-Control header of the response. + ResponseCacheControl *string `location:"querystring" locationName:"response-cache-control" type:"string"` + + // Sets the Content-Disposition header of the response + ResponseContentDisposition *string `location:"querystring" locationName:"response-content-disposition" type:"string"` + + // Sets the Content-Encoding header of the response. + ResponseContentEncoding *string `location:"querystring" locationName:"response-content-encoding" type:"string"` + + // Sets the Content-Language header of the response. + ResponseContentLanguage *string `location:"querystring" locationName:"response-content-language" type:"string"` + + // Sets the Content-Type header of the response. + ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` + + // Sets the Expires header of the response. + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"iso8601"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectInput) SetBucket(v string) *GetObjectInput { + s.Bucket = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *GetObjectInput) SetIfMatch(v string) *GetObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *GetObjectInput) SetIfModifiedSince(v time.Time) *GetObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *GetObjectInput) SetIfNoneMatch(v string) *GetObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *GetObjectInput) SetIfUnmodifiedSince(v time.Time) *GetObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectInput) SetKey(v string) *GetObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *GetObjectInput) SetPartNumber(v int64) *GetObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *GetObjectInput) SetRange(v string) *GetObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectInput) SetRequestPayer(v string) *GetObjectInput { + s.RequestPayer = &v + return s +} + +// SetResponseCacheControl sets the ResponseCacheControl field's value. +func (s *GetObjectInput) SetResponseCacheControl(v string) *GetObjectInput { + s.ResponseCacheControl = &v + return s +} + +// SetResponseContentDisposition sets the ResponseContentDisposition field's value. +func (s *GetObjectInput) SetResponseContentDisposition(v string) *GetObjectInput { + s.ResponseContentDisposition = &v + return s +} + +// SetResponseContentEncoding sets the ResponseContentEncoding field's value. +func (s *GetObjectInput) SetResponseContentEncoding(v string) *GetObjectInput { + s.ResponseContentEncoding = &v + return s +} + +// SetResponseContentLanguage sets the ResponseContentLanguage field's value. +func (s *GetObjectInput) SetResponseContentLanguage(v string) *GetObjectInput { + s.ResponseContentLanguage = &v + return s +} + +// SetResponseContentType sets the ResponseContentType field's value. +func (s *GetObjectInput) SetResponseContentType(v string) *GetObjectInput { + s.ResponseContentType = &v + return s +} + +// SetResponseExpires sets the ResponseExpires field's value. +func (s *GetObjectInput) SetResponseExpires(v time.Time) *GetObjectInput { + s.ResponseExpires = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectInput) SetSSECustomerAlgorithm(v string) *GetObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *GetObjectInput) SetSSECustomerKey(v string) *GetObjectInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectInput) SetSSECustomerKeyMD5(v string) *GetObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectInput) SetVersionId(v string) *GetObjectInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectOutput +type GetObjectOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Object data. + Body io.ReadCloser `type:"blob"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // The portion of the object returned in the response. + ContentRange *string `location:"header" locationName:"Content-Range" type:"string"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The number of tags, if any, on the object. + TagCount *int64 `location:"header" locationName:"x-amz-tagging-count" type:"integer"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s GetObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *GetObjectOutput) SetAcceptRanges(v string) *GetObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetBody sets the Body field's value. +func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput { + s.Body = v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *GetObjectOutput) SetContentDisposition(v string) *GetObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *GetObjectOutput) SetContentEncoding(v string) *GetObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *GetObjectOutput) SetContentLanguage(v string) *GetObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *GetObjectOutput) SetContentLength(v int64) *GetObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentRange sets the ContentRange field's value. +func (s *GetObjectOutput) SetContentRange(v string) *GetObjectOutput { + s.ContentRange = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *GetObjectOutput) SetContentType(v string) *GetObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *GetObjectOutput) SetDeleteMarker(v bool) *GetObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *GetObjectOutput) SetETag(v string) *GetObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *GetObjectOutput) SetExpiration(v string) *GetObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *GetObjectOutput) SetExpires(v string) *GetObjectOutput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *GetObjectOutput) SetLastModified(v time.Time) *GetObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *GetObjectOutput) SetMetadata(v map[string]*string) *GetObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *GetObjectOutput) SetMissingMeta(v int64) *GetObjectOutput { + s.MissingMeta = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *GetObjectOutput) SetPartsCount(v int64) *GetObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *GetObjectOutput) SetReplicationStatus(v string) *GetObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectOutput) SetRequestCharged(v string) *GetObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *GetObjectOutput) SetRestore(v string) *GetObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *GetObjectOutput) SetSSECustomerAlgorithm(v string) *GetObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *GetObjectOutput) SetSSECustomerKeyMD5(v string) *GetObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *GetObjectOutput) SetSSEKMSKeyId(v string) *GetObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *GetObjectOutput) SetServerSideEncryption(v string) *GetObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *GetObjectOutput) SetStorageClass(v string) *GetObjectOutput { + s.StorageClass = &v + return s +} + +// SetTagCount sets the TagCount field's value. +func (s *GetObjectOutput) SetTagCount(v int64) *GetObjectOutput { + s.TagCount = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectOutput) SetVersionId(v string) *GetObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *GetObjectOutput) SetWebsiteRedirectLocation(v string) *GetObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingRequest +type GetObjectTaggingInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s GetObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTaggingInput) SetBucket(v string) *GetObjectTaggingInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectTaggingInput) SetKey(v string) *GetObjectTaggingInput { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingInput) SetVersionId(v string) *GetObjectTaggingInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTaggingOutput +type GetObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` + + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s GetObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetTagSet sets the TagSet field's value. +func (s *GetObjectTaggingOutput) SetTagSet(v []*Tag) *GetObjectTaggingOutput { + s.TagSet = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *GetObjectTaggingOutput) SetVersionId(v string) *GetObjectTaggingOutput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentRequest +type GetObjectTorrentInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s GetObjectTorrentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetObjectTorrentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetObjectTorrentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *GetObjectTorrentInput) SetBucket(v string) *GetObjectTorrentInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *GetObjectTorrentInput) SetKey(v string) *GetObjectTorrentInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *GetObjectTorrentInput) SetRequestPayer(v string) *GetObjectTorrentInput { + s.RequestPayer = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrentOutput +type GetObjectTorrentOutput struct { + _ struct{} `type:"structure" payload:"Body"` + + Body io.ReadCloser `type:"blob"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s GetObjectTorrentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetObjectTorrentOutput) GoString() string { + return s.String() +} + +// SetBody sets the Body field's value. +func (s *GetObjectTorrentOutput) SetBody(v io.ReadCloser) *GetObjectTorrentOutput { + s.Body = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *GetObjectTorrentOutput) SetRequestCharged(v string) *GetObjectTorrentOutput { + s.RequestCharged = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GlacierJobParameters +type GlacierJobParameters struct { + _ struct{} `type:"structure"` + + // Glacier retrieval tier at which the restore will be processed. + // + // Tier is a required field + Tier *string `type:"string" required:"true" enum:"Tier"` +} + +// String returns the string representation +func (s GlacierJobParameters) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GlacierJobParameters) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GlacierJobParameters) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GlacierJobParameters"} + if s.Tier == nil { + invalidParams.Add(request.NewErrParamRequired("Tier")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTier sets the Tier field's value. +func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters { + s.Tier = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grant +type Grant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure"` + + // Specifies the permission given to the grantee. + Permission *string `type:"string" enum:"Permission"` +} + +// String returns the string representation +func (s Grant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *Grant) SetGrantee(v *Grantee) *Grant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *Grant) SetPermission(v string) *Grant { + s.Permission = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grantee +type Grantee struct { + _ struct{} `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"` + + // Screen name of the grantee. + DisplayName *string `type:"string"` + + // Email address of the grantee. + EmailAddress *string `type:"string"` + + // The canonical user ID of the grantee. + ID *string `type:"string"` + + // Type of grantee + // + // Type is a required field + Type *string `locationName:"xsi:type" type:"string" xmlAttribute:"true" required:"true" enum:"Type"` + + // URI of the grantee group. + URI *string `type:"string"` +} + +// String returns the string representation +func (s Grantee) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Grantee) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Grantee) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Grantee"} + if s.Type == nil { + invalidParams.Add(request.NewErrParamRequired("Type")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Grantee) SetDisplayName(v string) *Grantee { + s.DisplayName = &v + return s +} + +// SetEmailAddress sets the EmailAddress field's value. +func (s *Grantee) SetEmailAddress(v string) *Grantee { + s.EmailAddress = &v + return s +} + +// SetID sets the ID field's value. +func (s *Grantee) SetID(v string) *Grantee { + s.ID = &v + return s +} + +// SetType sets the Type field's value. +func (s *Grantee) SetType(v string) *Grantee { + s.Type = &v + return s +} + +// SetURI sets the URI field's value. +func (s *Grantee) SetURI(v string) *Grantee { + s.URI = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketRequest +type HeadBucketInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s HeadBucketInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadBucketInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadBucketInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadBucketInput) SetBucket(v string) *HeadBucketInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucketOutput +type HeadBucketOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s HeadBucketOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadBucketOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectRequest +type HeadObjectInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Return the object only if its entity tag (ETag) is the same as the one specified, + // otherwise return a 412 (precondition failed). + IfMatch *string `location:"header" locationName:"If-Match" type:"string"` + + // Return the object only if it has been modified since the specified time, + // otherwise return a 304 (not modified). + IfModifiedSince *time.Time `location:"header" locationName:"If-Modified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Return the object only if its entity tag (ETag) is different from the one + // specified, otherwise return a 304 (not modified). + IfNoneMatch *string `location:"header" locationName:"If-None-Match" type:"string"` + + // Return the object only if it has not been modified since the specified time, + // otherwise return a 412 (precondition failed). + IfUnmodifiedSince *time.Time `location:"header" locationName:"If-Unmodified-Since" type:"timestamp" timestampFormat:"rfc822"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of the object being read. This is a positive integer between + // 1 and 10,000. Effectively performs a 'ranged' HEAD request for the part specified. + // Useful querying about the size of the part and the number of parts in this + // object. + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer"` + + // Downloads the specified range bytes of an object. For more information about + // the HTTP Range header, go to http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35. + Range *string `location:"header" locationName:"Range" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s HeadObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *HeadObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "HeadObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *HeadObjectInput) SetBucket(v string) *HeadObjectInput { + s.Bucket = &v + return s +} + +// SetIfMatch sets the IfMatch field's value. +func (s *HeadObjectInput) SetIfMatch(v string) *HeadObjectInput { + s.IfMatch = &v + return s +} + +// SetIfModifiedSince sets the IfModifiedSince field's value. +func (s *HeadObjectInput) SetIfModifiedSince(v time.Time) *HeadObjectInput { + s.IfModifiedSince = &v + return s +} + +// SetIfNoneMatch sets the IfNoneMatch field's value. +func (s *HeadObjectInput) SetIfNoneMatch(v string) *HeadObjectInput { + s.IfNoneMatch = &v + return s +} + +// SetIfUnmodifiedSince sets the IfUnmodifiedSince field's value. +func (s *HeadObjectInput) SetIfUnmodifiedSince(v time.Time) *HeadObjectInput { + s.IfUnmodifiedSince = &v + return s +} + +// SetKey sets the Key field's value. +func (s *HeadObjectInput) SetKey(v string) *HeadObjectInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *HeadObjectInput) SetPartNumber(v int64) *HeadObjectInput { + s.PartNumber = &v + return s +} + +// SetRange sets the Range field's value. +func (s *HeadObjectInput) SetRange(v string) *HeadObjectInput { + s.Range = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *HeadObjectInput) SetRequestPayer(v string) *HeadObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectInput) SetSSECustomerAlgorithm(v string) *HeadObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *HeadObjectInput) SetSSECustomerKey(v string) *HeadObjectInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectInput) SetSSECustomerKeyMD5(v string) *HeadObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectInput) SetVersionId(v string) *HeadObjectInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObjectOutput +type HeadObjectOutput struct { + _ struct{} `type:"structure"` + + AcceptRanges *string `location:"header" locationName:"accept-ranges" type:"string"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // Specifies whether the object retrieved was (true) or was not (false) a Delete + // Marker. If false, this response header does not appear in the response. + DeleteMarker *bool `location:"header" locationName:"x-amz-delete-marker" type:"boolean"` + + // An ETag is an opaque identifier assigned by a web server to a specific version + // of a resource found at a URL + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured (see PUT Bucket lifecycle), the response + // includes this header. It includes the expiry-date and rule-id key value pairs + // providing object expiration information. The value of the rule-id is URL + // encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *string `location:"header" locationName:"Expires" type:"string"` + + // Last modified date of the object + LastModified *time.Time `location:"header" locationName:"Last-Modified" type:"timestamp" timestampFormat:"rfc822"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // This is set to the number of metadata entries not returned in x-amz-meta + // headers. This can happen if you create metadata using an API like SOAP that + // supports more flexible metadata than the REST API. For example, using SOAP, + // you can create metadata whose values are not legal HTTP headers. + MissingMeta *int64 `location:"header" locationName:"x-amz-missing-meta" type:"integer"` + + // The count of parts this object has. + PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` + + ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // Provides information about object restoration operation and expiration time + // of the restored object copy. + Restore *string `location:"header" locationName:"x-amz-restore" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s HeadObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s HeadObjectOutput) GoString() string { + return s.String() +} + +// SetAcceptRanges sets the AcceptRanges field's value. +func (s *HeadObjectOutput) SetAcceptRanges(v string) *HeadObjectOutput { + s.AcceptRanges = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *HeadObjectOutput) SetContentDisposition(v string) *HeadObjectOutput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *HeadObjectOutput) SetContentEncoding(v string) *HeadObjectOutput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *HeadObjectOutput) SetContentLanguage(v string) *HeadObjectOutput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *HeadObjectOutput) SetContentLength(v int64) *HeadObjectOutput { + s.ContentLength = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *HeadObjectOutput) SetContentType(v string) *HeadObjectOutput { + s.ContentType = &v + return s +} + +// SetDeleteMarker sets the DeleteMarker field's value. +func (s *HeadObjectOutput) SetDeleteMarker(v bool) *HeadObjectOutput { + s.DeleteMarker = &v + return s +} + +// SetETag sets the ETag field's value. +func (s *HeadObjectOutput) SetETag(v string) *HeadObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *HeadObjectOutput) SetExpiration(v string) *HeadObjectOutput { + s.Expiration = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *HeadObjectOutput) SetExpires(v string) *HeadObjectOutput { + s.Expires = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *HeadObjectOutput) SetLastModified(v time.Time) *HeadObjectOutput { + s.LastModified = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *HeadObjectOutput) SetMetadata(v map[string]*string) *HeadObjectOutput { + s.Metadata = v + return s +} + +// SetMissingMeta sets the MissingMeta field's value. +func (s *HeadObjectOutput) SetMissingMeta(v int64) *HeadObjectOutput { + s.MissingMeta = &v + return s +} + +// SetPartsCount sets the PartsCount field's value. +func (s *HeadObjectOutput) SetPartsCount(v int64) *HeadObjectOutput { + s.PartsCount = &v + return s +} + +// SetReplicationStatus sets the ReplicationStatus field's value. +func (s *HeadObjectOutput) SetReplicationStatus(v string) *HeadObjectOutput { + s.ReplicationStatus = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *HeadObjectOutput) SetRequestCharged(v string) *HeadObjectOutput { + s.RequestCharged = &v + return s +} + +// SetRestore sets the Restore field's value. +func (s *HeadObjectOutput) SetRestore(v string) *HeadObjectOutput { + s.Restore = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *HeadObjectOutput) SetSSECustomerAlgorithm(v string) *HeadObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *HeadObjectOutput) SetSSECustomerKeyMD5(v string) *HeadObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *HeadObjectOutput) SetSSEKMSKeyId(v string) *HeadObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *HeadObjectOutput) SetServerSideEncryption(v string) *HeadObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *HeadObjectOutput) SetStorageClass(v string) *HeadObjectOutput { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *HeadObjectOutput) SetVersionId(v string) *HeadObjectOutput { + s.VersionId = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *HeadObjectOutput) SetWebsiteRedirectLocation(v string) *HeadObjectOutput { + s.WebsiteRedirectLocation = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IndexDocument +type IndexDocument struct { + _ struct{} `type:"structure"` + + // A suffix that is appended to a request that is for a directory on the website + // endpoint (e.g. if the suffix is index.html and you make a request to samplebucket/images/ + // the data that is returned will be for the object with the key name images/index.html) + // The suffix must not be empty and must not include a slash character. + // + // Suffix is a required field + Suffix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s IndexDocument) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s IndexDocument) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *IndexDocument) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "IndexDocument"} + if s.Suffix == nil { + invalidParams.Add(request.NewErrParamRequired("Suffix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetSuffix sets the Suffix field's value. +func (s *IndexDocument) SetSuffix(v string) *IndexDocument { + s.Suffix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Initiator +type Initiator struct { + _ struct{} `type:"structure"` + + // Name of the Principal. + DisplayName *string `type:"string"` + + // If the principal is an AWS account, it provides the Canonical User ID. If + // the principal is an IAM User, it provides a user ARN value. + ID *string `type:"string"` +} + +// String returns the string representation +func (s Initiator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Initiator) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Initiator) SetDisplayName(v string) *Initiator { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Initiator) SetID(v string) *Initiator { + s.ID = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryConfiguration +type InventoryConfiguration struct { + _ struct{} `type:"structure"` + + // Contains information about where to publish the inventory results. + // + // Destination is a required field + Destination *InventoryDestination `type:"structure" required:"true"` + + // Specifies an inventory filter. The inventory only includes objects that meet + // the filter's criteria. + Filter *InventoryFilter `type:"structure"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` + + // Specifies which object version(s) to included in the inventory results. + // + // IncludedObjectVersions is a required field + IncludedObjectVersions *string `type:"string" required:"true" enum:"InventoryIncludedObjectVersions"` + + // Specifies whether the inventory is enabled or disabled. + // + // IsEnabled is a required field + IsEnabled *bool `type:"boolean" required:"true"` + + // Contains the optional fields that are included in the inventory results. + OptionalFields []*string `locationNameList:"Field" type:"list"` + + // Specifies the schedule for generating inventory results. + // + // Schedule is a required field + Schedule *InventorySchedule `type:"structure" required:"true"` +} + +// String returns the string representation +func (s InventoryConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryConfiguration"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.IncludedObjectVersions == nil { + invalidParams.Add(request.NewErrParamRequired("IncludedObjectVersions")) + } + if s.IsEnabled == nil { + invalidParams.Add(request.NewErrParamRequired("IsEnabled")) + } + if s.Schedule == nil { + invalidParams.Add(request.NewErrParamRequired("Schedule")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + if s.Schedule != nil { + if err := s.Schedule.Validate(); err != nil { + invalidParams.AddNested("Schedule", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *InventoryConfiguration) SetDestination(v *InventoryDestination) *InventoryConfiguration { + s.Destination = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *InventoryConfiguration) SetFilter(v *InventoryFilter) *InventoryConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *InventoryConfiguration) SetId(v string) *InventoryConfiguration { + s.Id = &v + return s +} + +// SetIncludedObjectVersions sets the IncludedObjectVersions field's value. +func (s *InventoryConfiguration) SetIncludedObjectVersions(v string) *InventoryConfiguration { + s.IncludedObjectVersions = &v + return s +} + +// SetIsEnabled sets the IsEnabled field's value. +func (s *InventoryConfiguration) SetIsEnabled(v bool) *InventoryConfiguration { + s.IsEnabled = &v + return s +} + +// SetOptionalFields sets the OptionalFields field's value. +func (s *InventoryConfiguration) SetOptionalFields(v []*string) *InventoryConfiguration { + s.OptionalFields = v + return s +} + +// SetSchedule sets the Schedule field's value. +func (s *InventoryConfiguration) SetSchedule(v *InventorySchedule) *InventoryConfiguration { + s.Schedule = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryDestination +type InventoryDestination struct { + _ struct{} `type:"structure"` + + // Contains the bucket name, file format, bucket owner (optional), and prefix + // (optional) where inventory results are published. + // + // S3BucketDestination is a required field + S3BucketDestination *InventoryS3BucketDestination `type:"structure" required:"true"` +} + +// String returns the string representation +func (s InventoryDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryDestination"} + if s.S3BucketDestination == nil { + invalidParams.Add(request.NewErrParamRequired("S3BucketDestination")) + } + if s.S3BucketDestination != nil { + if err := s.S3BucketDestination.Validate(); err != nil { + invalidParams.AddNested("S3BucketDestination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetS3BucketDestination sets the S3BucketDestination field's value. +func (s *InventoryDestination) SetS3BucketDestination(v *InventoryS3BucketDestination) *InventoryDestination { + s.S3BucketDestination = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryFilter +type InventoryFilter struct { + _ struct{} `type:"structure"` + + // The prefix that an object must have to be included in the inventory results. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s InventoryFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryFilter"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *InventoryFilter) SetPrefix(v string) *InventoryFilter { + s.Prefix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryS3BucketDestination +type InventoryS3BucketDestination struct { + _ struct{} `type:"structure"` + + // The ID of the account that owns the destination bucket. + AccountId *string `type:"string"` + + // The Amazon resource name (ARN) of the bucket where inventory results will + // be published. + // + // Bucket is a required field + Bucket *string `type:"string" required:"true"` + + // Specifies the output format of the inventory results. + // + // Format is a required field + Format *string `type:"string" required:"true" enum:"InventoryFormat"` + + // The prefix that is prepended to all inventory results. + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s InventoryS3BucketDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventoryS3BucketDestination) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventoryS3BucketDestination) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventoryS3BucketDestination"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Format == nil { + invalidParams.Add(request.NewErrParamRequired("Format")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccountId sets the AccountId field's value. +func (s *InventoryS3BucketDestination) SetAccountId(v string) *InventoryS3BucketDestination { + s.AccountId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *InventoryS3BucketDestination) SetBucket(v string) *InventoryS3BucketDestination { + s.Bucket = &v + return s +} + +// SetFormat sets the Format field's value. +func (s *InventoryS3BucketDestination) SetFormat(v string) *InventoryS3BucketDestination { + s.Format = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *InventoryS3BucketDestination) SetPrefix(v string) *InventoryS3BucketDestination { + s.Prefix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventorySchedule +type InventorySchedule struct { + _ struct{} `type:"structure"` + + // Specifies how frequently inventory results are produced. + // + // Frequency is a required field + Frequency *string `type:"string" required:"true" enum:"InventoryFrequency"` +} + +// String returns the string representation +func (s InventorySchedule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s InventorySchedule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *InventorySchedule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "InventorySchedule"} + if s.Frequency == nil { + invalidParams.Add(request.NewErrParamRequired("Frequency")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFrequency sets the Frequency field's value. +func (s *InventorySchedule) SetFrequency(v string) *InventorySchedule { + s.Frequency = &v + return s +} + +// Container for object key name prefix and suffix filtering rules. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3KeyFilter +type KeyFilter struct { + _ struct{} `type:"structure"` + + // A list of containers for key value pair that defines the criteria for the + // filter rule. + FilterRules []*FilterRule `locationName:"FilterRule" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s KeyFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KeyFilter) GoString() string { + return s.String() +} + +// SetFilterRules sets the FilterRules field's value. +func (s *KeyFilter) SetFilterRules(v []*FilterRule) *KeyFilter { + s.FilterRules = v + return s +} + +// Container for specifying the AWS Lambda notification configuration. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LambdaFunctionConfiguration +type LambdaFunctionConfiguration struct { + _ struct{} `type:"structure"` + + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Lambda cloud function ARN that Amazon S3 can invoke when it detects events + // of the specified type. + // + // LambdaFunctionArn is a required field + LambdaFunctionArn *string `locationName:"CloudFunction" type:"string" required:"true"` +} + +// String returns the string representation +func (s LambdaFunctionConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LambdaFunctionConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LambdaFunctionConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LambdaFunctionConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.LambdaFunctionArn == nil { + invalidParams.Add(request.NewErrParamRequired("LambdaFunctionArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *LambdaFunctionConfiguration) SetEvents(v []*string) *LambdaFunctionConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LambdaFunctionConfiguration) SetFilter(v *NotificationConfigurationFilter) *LambdaFunctionConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *LambdaFunctionConfiguration) SetId(v string) *LambdaFunctionConfiguration { + s.Id = &v + return s +} + +// SetLambdaFunctionArn sets the LambdaFunctionArn field's value. +func (s *LambdaFunctionConfiguration) SetLambdaFunctionArn(v string) *LambdaFunctionConfiguration { + s.LambdaFunctionArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleConfiguration +type LifecycleConfiguration struct { + _ struct{} `type:"structure"` + + // Rules is a required field + Rules []*Rule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s LifecycleConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleConfiguration"} + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRules sets the Rules field's value. +func (s *LifecycleConfiguration) SetRules(v []*Rule) *LifecycleConfiguration { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleExpiration +type LifecycleExpiration struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // Indicates whether Amazon S3 will remove a delete marker with no noncurrent + // versions. If set to true, the delete marker will be expired; if set to false + // the policy takes no action. This cannot be specified with Days or Date in + // a Lifecycle Expiration Policy. + ExpiredObjectDeleteMarker *bool `type:"boolean"` +} + +// String returns the string representation +func (s LifecycleExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleExpiration) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *LifecycleExpiration) SetDate(v time.Time) *LifecycleExpiration { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *LifecycleExpiration) SetDays(v int64) *LifecycleExpiration { + s.Days = &v + return s +} + +// SetExpiredObjectDeleteMarker sets the ExpiredObjectDeleteMarker field's value. +func (s *LifecycleExpiration) SetExpiredObjectDeleteMarker(v bool) *LifecycleExpiration { + s.ExpiredObjectDeleteMarker = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRule +type LifecycleRule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an Incomplete Multipart Upload + // that Lifecycle will wait before permanently removing all parts of the upload. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // The Filter is used to identify objects that a Lifecycle Rule applies to. + // A Filter must have exactly one of Prefix, Tag, or And specified. + Filter *LifecycleRuleFilter `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + NoncurrentVersionTransitions []*NoncurrentVersionTransition `locationName:"NoncurrentVersionTransition" type:"list" flattened:"true"` + + // Prefix identifying one or more objects to which the rule applies. This is + // deprecated; use Filter instead. + Prefix *string `deprecated:"true" type:"string"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transitions []*Transition `locationName:"Transition" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s LifecycleRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRule"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *LifecycleRule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *LifecycleRule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *LifecycleRule) SetExpiration(v *LifecycleExpiration) *LifecycleRule { + s.Expiration = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *LifecycleRule) SetFilter(v *LifecycleRuleFilter) *LifecycleRule { + s.Filter = v + return s +} + +// SetID sets the ID field's value. +func (s *LifecycleRule) SetID(v string) *LifecycleRule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *LifecycleRule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *LifecycleRule { + s.NoncurrentVersionExpiration = v + return s +} + +// SetNoncurrentVersionTransitions sets the NoncurrentVersionTransitions field's value. +func (s *LifecycleRule) SetNoncurrentVersionTransitions(v []*NoncurrentVersionTransition) *LifecycleRule { + s.NoncurrentVersionTransitions = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRule) SetPrefix(v string) *LifecycleRule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *LifecycleRule) SetStatus(v string) *LifecycleRule { + s.Status = &v + return s +} + +// SetTransitions sets the Transitions field's value. +func (s *LifecycleRule) SetTransitions(v []*Transition) *LifecycleRule { + s.Transitions = v + return s +} + +// This is used in a Lifecycle Rule Filter to apply a logical AND to two or +// more predicates. The Lifecycle Rule will apply to any object matching all +// of the predicates configured inside the And operator. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleAndOperator +type LifecycleRuleAndOperator struct { + _ struct{} `type:"structure"` + + Prefix *string `type:"string"` + + // All of these tags must exist in the object's tag set in order for the rule + // to apply. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s LifecycleRuleAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRuleAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRuleAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleAndOperator) SetPrefix(v string) *LifecycleRuleAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *LifecycleRuleAndOperator) SetTags(v []*Tag) *LifecycleRuleAndOperator { + s.Tags = v + return s +} + +// The Filter is used to identify objects that a Lifecycle Rule applies to. +// A Filter must have exactly one of Prefix, Tag, or And specified. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleFilter +type LifecycleRuleFilter struct { + _ struct{} `type:"structure"` + + // This is used in a Lifecycle Rule Filter to apply a logical AND to two or + // more predicates. The Lifecycle Rule will apply to any object matching all + // of the predicates configured inside the And operator. + And *LifecycleRuleAndOperator `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + Prefix *string `type:"string"` + + // This tag must exist in the object's tag set in order for the rule to apply. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s LifecycleRuleFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LifecycleRuleFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LifecycleRuleFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LifecycleRuleFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *LifecycleRuleFilter) SetAnd(v *LifecycleRuleAndOperator) *LifecycleRuleFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *LifecycleRuleFilter) SetPrefix(v string) *LifecycleRuleFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *LifecycleRuleFilter) SetTag(v *Tag) *LifecycleRuleFilter { + s.Tag = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsRequest +type ListBucketAnalyticsConfigurationsInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket from which analytics configurations are retrieved. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ContinuationToken that represents a placeholder from where this request + // should begin. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} + +// String returns the string representation +func (s ListBucketAnalyticsConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketAnalyticsConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketAnalyticsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketAnalyticsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetBucket(v string) *ListBucketAnalyticsConfigurationsInput { + s.Bucket = &v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsInput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurationsOutput +type ListBucketAnalyticsConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The list of analytics configurations for a bucket. + AnalyticsConfigurationList []*AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"list" flattened:"true"` + + // The ContinuationToken that represents where this request began. + ContinuationToken *string `type:"string"` + + // Indicates whether the returned list of analytics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // NextContinuationToken is sent when isTruncated is true, which indicates that + // there are more analytics configurations to list. The next request must include + // this NextContinuationToken. The token is obfuscated and is not a usable value. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketAnalyticsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketAnalyticsConfigurationsOutput) GoString() string { + return s.String() +} + +// SetAnalyticsConfigurationList sets the AnalyticsConfigurationList field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetAnalyticsConfigurationList(v []*AnalyticsConfiguration) *ListBucketAnalyticsConfigurationsOutput { + s.AnalyticsConfigurationList = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketAnalyticsConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketAnalyticsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketAnalyticsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsRequest +type ListBucketInventoryConfigurationsInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the inventory configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The marker used to continue an inventory configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} + +// String returns the string representation +func (s ListBucketInventoryConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketInventoryConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketInventoryConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketInventoryConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketInventoryConfigurationsInput) SetBucket(v string) *ListBucketInventoryConfigurationsInput { + s.Bucket = &v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsInput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurationsOutput +type ListBucketInventoryConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // If sent in the request, the marker that is used as a starting point for this + // inventory configuration list response. + ContinuationToken *string `type:"string"` + + // The list of inventory configurations for a bucket. + InventoryConfigurationList []*InventoryConfiguration `locationName:"InventoryConfiguration" type:"list" flattened:"true"` + + // Indicates whether the returned list of inventory configurations is truncated + // in this response. A value of true indicates that the list is truncated. + IsTruncated *bool `type:"boolean"` + + // The marker used to continue this inventory configuration listing. Use the + // NextContinuationToken from this response to continue the listing in a subsequent + // request. The continuation token is an opaque value that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketInventoryConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketInventoryConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetInventoryConfigurationList sets the InventoryConfigurationList field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetInventoryConfigurationList(v []*InventoryConfiguration) *ListBucketInventoryConfigurationsOutput { + s.InventoryConfigurationList = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetIsTruncated(v bool) *ListBucketInventoryConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketInventoryConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketInventoryConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsRequest +type ListBucketMetricsConfigurationsInput struct { + _ struct{} `type:"structure"` + + // The name of the bucket containing the metrics configurations to retrieve. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The marker that is used to continue a metrics configuration listing that + // has been truncated. Use the NextContinuationToken from a previously truncated + // list response to continue the listing. The continuation token is an opaque + // value that Amazon S3 understands. + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` +} + +// String returns the string representation +func (s ListBucketMetricsConfigurationsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketMetricsConfigurationsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListBucketMetricsConfigurationsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListBucketMetricsConfigurationsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListBucketMetricsConfigurationsInput) SetBucket(v string) *ListBucketMetricsConfigurationsInput { + s.Bucket = &v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsInput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsInput { + s.ContinuationToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurationsOutput +type ListBucketMetricsConfigurationsOutput struct { + _ struct{} `type:"structure"` + + // The marker that is used as a starting point for this metrics configuration + // list response. This value is present if it was sent in the request. + ContinuationToken *string `type:"string"` + + // Indicates whether the returned list of metrics configurations is complete. + // A value of true indicates that the list is not complete and the NextContinuationToken + // will be provided for a subsequent request. + IsTruncated *bool `type:"boolean"` + + // The list of metrics configurations for a bucket. + MetricsConfigurationList []*MetricsConfiguration `locationName:"MetricsConfiguration" type:"list" flattened:"true"` + + // The marker used to continue a metrics configuration listing that has been + // truncated. Use the NextContinuationToken from a previously truncated list + // response to continue the listing. The continuation token is an opaque value + // that Amazon S3 understands. + NextContinuationToken *string `type:"string"` +} + +// String returns the string representation +func (s ListBucketMetricsConfigurationsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketMetricsConfigurationsOutput) GoString() string { + return s.String() +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.ContinuationToken = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetIsTruncated(v bool) *ListBucketMetricsConfigurationsOutput { + s.IsTruncated = &v + return s +} + +// SetMetricsConfigurationList sets the MetricsConfigurationList field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetMetricsConfigurationList(v []*MetricsConfiguration) *ListBucketMetricsConfigurationsOutput { + s.MetricsConfigurationList = v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListBucketMetricsConfigurationsOutput) SetNextContinuationToken(v string) *ListBucketMetricsConfigurationsOutput { + s.NextContinuationToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsInput +type ListBucketsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsInput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketsOutput +type ListBucketsOutput struct { + _ struct{} `type:"structure"` + + Buckets []*Bucket `locationNameList:"Bucket" type:"list"` + + Owner *Owner `type:"structure"` +} + +// String returns the string representation +func (s ListBucketsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListBucketsOutput) GoString() string { + return s.String() +} + +// SetBuckets sets the Buckets field's value. +func (s *ListBucketsOutput) SetBuckets(v []*Bucket) *ListBucketsOutput { + s.Buckets = v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListBucketsOutput) SetOwner(v *Owner) *ListBucketsOutput { + s.Owner = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsRequest +type ListMultipartUploadsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Together with upload-id-marker, this parameter specifies the multipart upload + // after which listing should begin. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of multipart uploads, from 1 to 1,000, to return + // in the response body. 1,000 is the maximum number of uploads that can be + // returned in a response. + MaxUploads *int64 `location:"querystring" locationName:"max-uploads" type:"integer"` + + // Lists in-progress uploads only for those keys that begin with the specified + // prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Together with key-marker, specifies the multipart upload after which listing + // should begin. If key-marker is not specified, the upload-id-marker parameter + // is ignored. + UploadIdMarker *string `location:"querystring" locationName:"upload-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListMultipartUploadsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListMultipartUploadsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListMultipartUploadsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsInput) SetBucket(v string) *ListMultipartUploadsInput { + s.Bucket = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsInput) SetDelimiter(v string) *ListMultipartUploadsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsInput) SetEncodingType(v string) *ListMultipartUploadsInput { + s.EncodingType = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsInput) SetKeyMarker(v string) *ListMultipartUploadsInput { + s.KeyMarker = &v + return s +} + +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsInput) SetMaxUploads(v int64) *ListMultipartUploadsInput { + s.MaxUploads = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsInput) SetPrefix(v string) *ListMultipartUploadsInput { + s.Prefix = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsInput) SetUploadIdMarker(v string) *ListMultipartUploadsInput { + s.UploadIdMarker = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploadsOutput +type ListMultipartUploadsOutput struct { + _ struct{} `type:"structure"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // Indicates whether the returned list of multipart uploads is truncated. A + // value of true indicates that the list was truncated. The list can be truncated + // if the number of multipart uploads exceeds the limit allowed or specified + // by max uploads. + IsTruncated *bool `type:"boolean"` + + // The key at or after which the listing began. + KeyMarker *string `type:"string"` + + // Maximum number of multipart uploads that could have been included in the + // response. + MaxUploads *int64 `type:"integer"` + + // When a list is truncated, this element specifies the value that should be + // used for the key-marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // When a list is truncated, this element specifies the value that should be + // used for the upload-id-marker request parameter in a subsequent request. + NextUploadIdMarker *string `type:"string"` + + // When a prefix is provided in the request, this field contains the specified + // prefix. The result contains only keys starting with the specified prefix. + Prefix *string `type:"string"` + + // Upload ID after which listing began. + UploadIdMarker *string `type:"string"` + + Uploads []*MultipartUpload `locationName:"Upload" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListMultipartUploadsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListMultipartUploadsOutput) GoString() string { + return s.String() +} + +// SetBucket sets the Bucket field's value. +func (s *ListMultipartUploadsOutput) SetBucket(v string) *ListMultipartUploadsOutput { + s.Bucket = &v + return s +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListMultipartUploadsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListMultipartUploadsOutput { + s.CommonPrefixes = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListMultipartUploadsOutput) SetDelimiter(v string) *ListMultipartUploadsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListMultipartUploadsOutput) SetEncodingType(v string) *ListMultipartUploadsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListMultipartUploadsOutput) SetIsTruncated(v bool) *ListMultipartUploadsOutput { + s.IsTruncated = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetKeyMarker(v string) *ListMultipartUploadsOutput { + s.KeyMarker = &v + return s +} + +// SetMaxUploads sets the MaxUploads field's value. +func (s *ListMultipartUploadsOutput) SetMaxUploads(v int64) *ListMultipartUploadsOutput { + s.MaxUploads = &v + return s +} + +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextKeyMarker(v string) *ListMultipartUploadsOutput { + s.NextKeyMarker = &v + return s +} + +// SetNextUploadIdMarker sets the NextUploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetNextUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.NextUploadIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListMultipartUploadsOutput) SetPrefix(v string) *ListMultipartUploadsOutput { + s.Prefix = &v + return s +} + +// SetUploadIdMarker sets the UploadIdMarker field's value. +func (s *ListMultipartUploadsOutput) SetUploadIdMarker(v string) *ListMultipartUploadsOutput { + s.UploadIdMarker = &v + return s +} + +// SetUploads sets the Uploads field's value. +func (s *ListMultipartUploadsOutput) SetUploads(v []*MultipartUpload) *ListMultipartUploadsOutput { + s.Uploads = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsRequest +type ListObjectVersionsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + KeyMarker *string `location:"querystring" locationName:"key-marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Specifies the object version you want to start listing from. + VersionIdMarker *string `location:"querystring" locationName:"version-id-marker" type:"string"` +} + +// String returns the string representation +func (s ListObjectVersionsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectVersionsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectVersionsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectVersionsInput) SetBucket(v string) *ListObjectVersionsInput { + s.Bucket = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsInput) SetDelimiter(v string) *ListObjectVersionsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsInput) SetEncodingType(v string) *ListObjectVersionsInput { + s.EncodingType = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsInput) SetKeyMarker(v string) *ListObjectVersionsInput { + s.KeyMarker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsInput) SetMaxKeys(v int64) *ListObjectVersionsInput { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsInput) SetPrefix(v string) *ListObjectVersionsInput { + s.Prefix = &v + return s +} + +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsInput) SetVersionIdMarker(v string) *ListObjectVersionsInput { + s.VersionIdMarker = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersionsOutput +type ListObjectVersionsOutput struct { + _ struct{} `type:"structure"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + DeleteMarkers []*DeleteMarkerEntry `locationName:"DeleteMarker" type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. If your results were truncated, you can + // make a follow-up paginated request using the NextKeyMarker and NextVersionIdMarker + // response parameters as a starting place in another request to return the + // rest of the results. + IsTruncated *bool `type:"boolean"` + + // Marks the last Key returned in a truncated response. + KeyMarker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // Use this value for the key marker request parameter in a subsequent request. + NextKeyMarker *string `type:"string"` + + // Use this value for the next version id marker parameter in a subsequent request. + NextVersionIdMarker *string `type:"string"` + + Prefix *string `type:"string"` + + VersionIdMarker *string `type:"string"` + + Versions []*ObjectVersion `locationName:"Version" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s ListObjectVersionsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectVersionsOutput) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectVersionsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectVersionsOutput { + s.CommonPrefixes = v + return s +} + +// SetDeleteMarkers sets the DeleteMarkers field's value. +func (s *ListObjectVersionsOutput) SetDeleteMarkers(v []*DeleteMarkerEntry) *ListObjectVersionsOutput { + s.DeleteMarkers = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectVersionsOutput) SetDelimiter(v string) *ListObjectVersionsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectVersionsOutput) SetEncodingType(v string) *ListObjectVersionsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectVersionsOutput) SetIsTruncated(v bool) *ListObjectVersionsOutput { + s.IsTruncated = &v + return s +} + +// SetKeyMarker sets the KeyMarker field's value. +func (s *ListObjectVersionsOutput) SetKeyMarker(v string) *ListObjectVersionsOutput { + s.KeyMarker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectVersionsOutput) SetMaxKeys(v int64) *ListObjectVersionsOutput { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectVersionsOutput) SetName(v string) *ListObjectVersionsOutput { + s.Name = &v + return s +} + +// SetNextKeyMarker sets the NextKeyMarker field's value. +func (s *ListObjectVersionsOutput) SetNextKeyMarker(v string) *ListObjectVersionsOutput { + s.NextKeyMarker = &v + return s +} + +// SetNextVersionIdMarker sets the NextVersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetNextVersionIdMarker(v string) *ListObjectVersionsOutput { + s.NextVersionIdMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectVersionsOutput) SetPrefix(v string) *ListObjectVersionsOutput { + s.Prefix = &v + return s +} + +// SetVersionIdMarker sets the VersionIdMarker field's value. +func (s *ListObjectVersionsOutput) SetVersionIdMarker(v string) *ListObjectVersionsOutput { + s.VersionIdMarker = &v + return s +} + +// SetVersions sets the Versions field's value. +func (s *ListObjectVersionsOutput) SetVersions(v []*ObjectVersion) *ListObjectVersionsOutput { + s.Versions = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsRequest +type ListObjectsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Requests Amazon S3 to encode the object keys in the response and specifies + // the encoding method to use. An object key may contain any Unicode character; + // however, XML 1.0 parser cannot parse some characters, such as characters + // with an ASCII value from 0 to 10. For characters that are not supported in + // XML 1.0, you can add this parameter to request that Amazon S3 encode the + // keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // Specifies the key to start with when listing objects in a bucket. + Marker *string `location:"querystring" locationName:"marker" type:"string"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request. Bucket owners need not specify this parameter in their + // requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` +} + +// String returns the string representation +func (s ListObjectsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectsInput) SetBucket(v string) *ListObjectsInput { + s.Bucket = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsInput) SetDelimiter(v string) *ListObjectsInput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsInput) SetEncodingType(v string) *ListObjectsInput { + s.EncodingType = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsInput) SetMarker(v string) *ListObjectsInput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsInput) SetMaxKeys(v int64) *ListObjectsInput { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsInput) SetPrefix(v string) *ListObjectsInput { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsInput) SetRequestPayer(v string) *ListObjectsInput { + s.RequestPayer = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsOutput +type ListObjectsOutput struct { + _ struct{} `type:"structure"` + + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + Contents []*Object `type:"list" flattened:"true"` + + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + Marker *string `type:"string"` + + MaxKeys *int64 `type:"integer"` + + Name *string `type:"string"` + + // When response is truncated (the IsTruncated element value in the response + // is true), you can use the key name in this field as marker in the subsequent + // request to get next set of objects. Amazon S3 lists objects in alphabetical + // order Note: This element is returned only if you have delimiter request parameter + // specified. If response does not include the NextMaker and it is truncated, + // you can use the value of the last Key in the response as the marker in the + // subsequent request to get the next set of object keys. + NextMarker *string `type:"string"` + + Prefix *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsOutput) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsOutput) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsOutput { + s.CommonPrefixes = v + return s +} + +// SetContents sets the Contents field's value. +func (s *ListObjectsOutput) SetContents(v []*Object) *ListObjectsOutput { + s.Contents = v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsOutput) SetDelimiter(v string) *ListObjectsOutput { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsOutput) SetEncodingType(v string) *ListObjectsOutput { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsOutput) SetIsTruncated(v bool) *ListObjectsOutput { + s.IsTruncated = &v + return s +} + +// SetMarker sets the Marker field's value. +func (s *ListObjectsOutput) SetMarker(v string) *ListObjectsOutput { + s.Marker = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsOutput) SetMaxKeys(v int64) *ListObjectsOutput { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectsOutput) SetName(v string) *ListObjectsOutput { + s.Name = &v + return s +} + +// SetNextMarker sets the NextMarker field's value. +func (s *ListObjectsOutput) SetNextMarker(v string) *ListObjectsOutput { + s.NextMarker = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsOutput) SetPrefix(v string) *ListObjectsOutput { + s.Prefix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Request +type ListObjectsV2Input struct { + _ struct{} `type:"structure"` + + // Name of the bucket to list. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key + ContinuationToken *string `location:"querystring" locationName:"continuation-token" type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `location:"querystring" locationName:"delimiter" type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `location:"querystring" locationName:"encoding-type" type:"string" enum:"EncodingType"` + + // The owner field is not present in listV2 by default, if you want to return + // owner field with each key in the result then set the fetch owner field to + // true + FetchOwner *bool `location:"querystring" locationName:"fetch-owner" type:"boolean"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `location:"querystring" locationName:"max-keys" type:"integer"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `location:"querystring" locationName:"prefix" type:"string"` + + // Confirms that the requester knows that she or he will be charged for the + // list objects request in V2 style. Bucket owners need not specify this parameter + // in their requests. + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket + StartAfter *string `location:"querystring" locationName:"start-after" type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Input) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Input) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListObjectsV2Input) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListObjectsV2Input"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListObjectsV2Input) SetBucket(v string) *ListObjectsV2Input { + s.Bucket = &v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Input) SetContinuationToken(v string) *ListObjectsV2Input { + s.ContinuationToken = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Input) SetDelimiter(v string) *ListObjectsV2Input { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Input) SetEncodingType(v string) *ListObjectsV2Input { + s.EncodingType = &v + return s +} + +// SetFetchOwner sets the FetchOwner field's value. +func (s *ListObjectsV2Input) SetFetchOwner(v bool) *ListObjectsV2Input { + s.FetchOwner = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Input) SetMaxKeys(v int64) *ListObjectsV2Input { + s.MaxKeys = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Input) SetPrefix(v string) *ListObjectsV2Input { + s.Prefix = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListObjectsV2Input) SetRequestPayer(v string) *ListObjectsV2Input { + s.RequestPayer = &v + return s +} + +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Input) SetStartAfter(v string) *ListObjectsV2Input { + s.StartAfter = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2Output +type ListObjectsV2Output struct { + _ struct{} `type:"structure"` + + // CommonPrefixes contains all (if there are any) keys between Prefix and the + // next occurrence of the string specified by delimiter + CommonPrefixes []*CommonPrefix `type:"list" flattened:"true"` + + // Metadata about each object returned. + Contents []*Object `type:"list" flattened:"true"` + + // ContinuationToken indicates Amazon S3 that the list is being continued on + // this bucket with a token. ContinuationToken is obfuscated and is not a real + // key + ContinuationToken *string `type:"string"` + + // A delimiter is a character you use to group keys. + Delimiter *string `type:"string"` + + // Encoding type used by Amazon S3 to encode object keys in the response. + EncodingType *string `type:"string" enum:"EncodingType"` + + // A flag that indicates whether or not Amazon S3 returned all of the results + // that satisfied the search criteria. + IsTruncated *bool `type:"boolean"` + + // KeyCount is the number of keys returned with this request. KeyCount will + // always be less than equals to MaxKeys field. Say you ask for 50 keys, your + // result will include less than equals 50 keys + KeyCount *int64 `type:"integer"` + + // Sets the maximum number of keys returned in the response. The response might + // contain fewer keys but will never contain more. + MaxKeys *int64 `type:"integer"` + + // Name of the bucket to list. + Name *string `type:"string"` + + // NextContinuationToken is sent when isTruncated is true which means there + // are more keys in the bucket that can be listed. The next list requests to + // Amazon S3 can be continued with this NextContinuationToken. NextContinuationToken + // is obfuscated and is not a real key + NextContinuationToken *string `type:"string"` + + // Limits the response to keys that begin with the specified prefix. + Prefix *string `type:"string"` + + // StartAfter is where you want Amazon S3 to start listing from. Amazon S3 starts + // listing after this specified key. StartAfter can be any key in the bucket + StartAfter *string `type:"string"` +} + +// String returns the string representation +func (s ListObjectsV2Output) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListObjectsV2Output) GoString() string { + return s.String() +} + +// SetCommonPrefixes sets the CommonPrefixes field's value. +func (s *ListObjectsV2Output) SetCommonPrefixes(v []*CommonPrefix) *ListObjectsV2Output { + s.CommonPrefixes = v + return s +} + +// SetContents sets the Contents field's value. +func (s *ListObjectsV2Output) SetContents(v []*Object) *ListObjectsV2Output { + s.Contents = v + return s +} + +// SetContinuationToken sets the ContinuationToken field's value. +func (s *ListObjectsV2Output) SetContinuationToken(v string) *ListObjectsV2Output { + s.ContinuationToken = &v + return s +} + +// SetDelimiter sets the Delimiter field's value. +func (s *ListObjectsV2Output) SetDelimiter(v string) *ListObjectsV2Output { + s.Delimiter = &v + return s +} + +// SetEncodingType sets the EncodingType field's value. +func (s *ListObjectsV2Output) SetEncodingType(v string) *ListObjectsV2Output { + s.EncodingType = &v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListObjectsV2Output) SetIsTruncated(v bool) *ListObjectsV2Output { + s.IsTruncated = &v + return s +} + +// SetKeyCount sets the KeyCount field's value. +func (s *ListObjectsV2Output) SetKeyCount(v int64) *ListObjectsV2Output { + s.KeyCount = &v + return s +} + +// SetMaxKeys sets the MaxKeys field's value. +func (s *ListObjectsV2Output) SetMaxKeys(v int64) *ListObjectsV2Output { + s.MaxKeys = &v + return s +} + +// SetName sets the Name field's value. +func (s *ListObjectsV2Output) SetName(v string) *ListObjectsV2Output { + s.Name = &v + return s +} + +// SetNextContinuationToken sets the NextContinuationToken field's value. +func (s *ListObjectsV2Output) SetNextContinuationToken(v string) *ListObjectsV2Output { + s.NextContinuationToken = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ListObjectsV2Output) SetPrefix(v string) *ListObjectsV2Output { + s.Prefix = &v + return s +} + +// SetStartAfter sets the StartAfter field's value. +func (s *ListObjectsV2Output) SetStartAfter(v string) *ListObjectsV2Output { + s.StartAfter = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsRequest +type ListPartsInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Sets the maximum number of parts to return. + MaxParts *int64 `location:"querystring" locationName:"max-parts" type:"integer"` + + // Specifies the part after which listing should begin. Only parts with higher + // part numbers will be listed. + PartNumberMarker *int64 `location:"querystring" locationName:"part-number-marker" type:"integer"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Upload ID identifying the multipart upload whose parts are being listed. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s ListPartsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ListPartsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ListPartsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *ListPartsInput) SetBucket(v string) *ListPartsInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ListPartsInput) SetKey(v string) *ListPartsInput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsInput) SetMaxParts(v int64) *ListPartsInput { + s.MaxParts = &v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsInput) SetPartNumberMarker(v int64) *ListPartsInput { + s.PartNumberMarker = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *ListPartsInput) SetRequestPayer(v string) *ListPartsInput { + s.RequestPayer = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsInput) SetUploadId(v string) *ListPartsInput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListPartsOutput +type ListPartsOutput struct { + _ struct{} `type:"structure"` + + // Date when multipart upload will become eligible for abort operation by lifecycle. + AbortDate *time.Time `location:"header" locationName:"x-amz-abort-date" type:"timestamp" timestampFormat:"rfc822"` + + // Id of the lifecycle rule that makes a multipart upload eligible for abort + // operation. + AbortRuleId *string `location:"header" locationName:"x-amz-abort-rule-id" type:"string"` + + // Name of the bucket to which the multipart upload was initiated. + Bucket *string `type:"string"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Indicates whether the returned list of parts is truncated. + IsTruncated *bool `type:"boolean"` + + // Object key for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + // Maximum number of parts that were allowed in the response. + MaxParts *int64 `type:"integer"` + + // When a list is truncated, this element specifies the last part in the list, + // as well as the value to use for the part-number-marker request parameter + // in a subsequent request. + NextPartNumberMarker *int64 `type:"integer"` + + Owner *Owner `type:"structure"` + + // Part number after which listing begins. + PartNumberMarker *int64 `type:"integer"` + + Parts []*Part `locationName:"Part" type:"list" flattened:"true"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID identifying the multipart upload whose parts are being listed. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s ListPartsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ListPartsOutput) GoString() string { + return s.String() +} + +// SetAbortDate sets the AbortDate field's value. +func (s *ListPartsOutput) SetAbortDate(v time.Time) *ListPartsOutput { + s.AbortDate = &v + return s +} + +// SetAbortRuleId sets the AbortRuleId field's value. +func (s *ListPartsOutput) SetAbortRuleId(v string) *ListPartsOutput { + s.AbortRuleId = &v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *ListPartsOutput) SetBucket(v string) *ListPartsOutput { + s.Bucket = &v + return s +} + +// SetInitiator sets the Initiator field's value. +func (s *ListPartsOutput) SetInitiator(v *Initiator) *ListPartsOutput { + s.Initiator = v + return s +} + +// SetIsTruncated sets the IsTruncated field's value. +func (s *ListPartsOutput) SetIsTruncated(v bool) *ListPartsOutput { + s.IsTruncated = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ListPartsOutput) SetKey(v string) *ListPartsOutput { + s.Key = &v + return s +} + +// SetMaxParts sets the MaxParts field's value. +func (s *ListPartsOutput) SetMaxParts(v int64) *ListPartsOutput { + s.MaxParts = &v + return s +} + +// SetNextPartNumberMarker sets the NextPartNumberMarker field's value. +func (s *ListPartsOutput) SetNextPartNumberMarker(v int64) *ListPartsOutput { + s.NextPartNumberMarker = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ListPartsOutput) SetOwner(v *Owner) *ListPartsOutput { + s.Owner = v + return s +} + +// SetPartNumberMarker sets the PartNumberMarker field's value. +func (s *ListPartsOutput) SetPartNumberMarker(v int64) *ListPartsOutput { + s.PartNumberMarker = &v + return s +} + +// SetParts sets the Parts field's value. +func (s *ListPartsOutput) SetParts(v []*Part) *ListPartsOutput { + s.Parts = v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *ListPartsOutput) SetRequestCharged(v string) *ListPartsOutput { + s.RequestCharged = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *ListPartsOutput) SetStorageClass(v string) *ListPartsOutput { + s.StorageClass = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *ListPartsOutput) SetUploadId(v string) *ListPartsOutput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LoggingEnabled +type LoggingEnabled struct { + _ struct{} `type:"structure"` + + // Specifies the bucket where you want Amazon S3 to store server access logs. + // You can have your logs delivered to any bucket that you own, including the + // same bucket that is being logged. You can also configure multiple buckets + // to deliver their logs to the same target bucket. In this case you should + // choose a different TargetPrefix for each source bucket so that the delivered + // log files can be distinguished by key. + TargetBucket *string `type:"string"` + + TargetGrants []*TargetGrant `locationNameList:"Grant" type:"list"` + + // This element lets you specify a prefix for the keys that the log files will + // be stored under. + TargetPrefix *string `type:"string"` +} + +// String returns the string representation +func (s LoggingEnabled) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s LoggingEnabled) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *LoggingEnabled) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "LoggingEnabled"} + if s.TargetGrants != nil { + for i, v := range s.TargetGrants { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TargetGrants", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTargetBucket sets the TargetBucket field's value. +func (s *LoggingEnabled) SetTargetBucket(v string) *LoggingEnabled { + s.TargetBucket = &v + return s +} + +// SetTargetGrants sets the TargetGrants field's value. +func (s *LoggingEnabled) SetTargetGrants(v []*TargetGrant) *LoggingEnabled { + s.TargetGrants = v + return s +} + +// SetTargetPrefix sets the TargetPrefix field's value. +func (s *LoggingEnabled) SetTargetPrefix(v string) *LoggingEnabled { + s.TargetPrefix = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsAndOperator +type MetricsAndOperator struct { + _ struct{} `type:"structure"` + + // The prefix used when evaluating an AND predicate. + Prefix *string `type:"string"` + + // The list of tags used when evaluating an AND predicate. + Tags []*Tag `locationName:"Tag" locationNameList:"Tag" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s MetricsAndOperator) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricsAndOperator) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsAndOperator) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsAndOperator"} + if s.Tags != nil { + for i, v := range s.Tags { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Tags", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPrefix sets the Prefix field's value. +func (s *MetricsAndOperator) SetPrefix(v string) *MetricsAndOperator { + s.Prefix = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *MetricsAndOperator) SetTags(v []*Tag) *MetricsAndOperator { + s.Tags = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsConfiguration +type MetricsConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies a metrics configuration filter. The metrics configuration will + // only include objects that meet the filter's criteria. A filter must be a + // prefix, a tag, or a conjunction (MetricsAndOperator). + Filter *MetricsFilter `type:"structure"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s MetricsConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricsConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsConfiguration"} + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.Filter != nil { + if err := s.Filter.Validate(); err != nil { + invalidParams.AddNested("Filter", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetFilter sets the Filter field's value. +func (s *MetricsConfiguration) SetFilter(v *MetricsFilter) *MetricsConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *MetricsConfiguration) SetId(v string) *MetricsConfiguration { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsFilter +type MetricsFilter struct { + _ struct{} `type:"structure"` + + // A conjunction (logical AND) of predicates, which is used in evaluating a + // metrics filter. The operator must have at least two predicates, and an object + // must match all of the predicates in order for the filter to apply. + And *MetricsAndOperator `type:"structure"` + + // The prefix used when evaluating a metrics filter. + Prefix *string `type:"string"` + + // The tag used when evaluating a metrics filter. + Tag *Tag `type:"structure"` +} + +// String returns the string representation +func (s MetricsFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MetricsFilter) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *MetricsFilter) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "MetricsFilter"} + if s.And != nil { + if err := s.And.Validate(); err != nil { + invalidParams.AddNested("And", err.(request.ErrInvalidParams)) + } + } + if s.Tag != nil { + if err := s.Tag.Validate(); err != nil { + invalidParams.AddNested("Tag", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnd sets the And field's value. +func (s *MetricsFilter) SetAnd(v *MetricsAndOperator) *MetricsFilter { + s.And = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *MetricsFilter) SetPrefix(v string) *MetricsFilter { + s.Prefix = &v + return s +} + +// SetTag sets the Tag field's value. +func (s *MetricsFilter) SetTag(v *Tag) *MetricsFilter { + s.Tag = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MultipartUpload +type MultipartUpload struct { + _ struct{} `type:"structure"` + + // Date and time at which the multipart upload was initiated. + Initiated *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Identifies who initiated the multipart upload. + Initiator *Initiator `type:"structure"` + + // Key of the object for which the multipart upload was initiated. + Key *string `min:"1" type:"string"` + + Owner *Owner `type:"structure"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"StorageClass"` + + // Upload ID that identifies the multipart upload. + UploadId *string `type:"string"` +} + +// String returns the string representation +func (s MultipartUpload) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s MultipartUpload) GoString() string { + return s.String() +} + +// SetInitiated sets the Initiated field's value. +func (s *MultipartUpload) SetInitiated(v time.Time) *MultipartUpload { + s.Initiated = &v + return s +} + +// SetInitiator sets the Initiator field's value. +func (s *MultipartUpload) SetInitiator(v *Initiator) *MultipartUpload { + s.Initiator = v + return s +} + +// SetKey sets the Key field's value. +func (s *MultipartUpload) SetKey(v string) *MultipartUpload { + s.Key = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *MultipartUpload) SetOwner(v *Owner) *MultipartUpload { + s.Owner = v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *MultipartUpload) SetStorageClass(v string) *MultipartUpload { + s.StorageClass = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *MultipartUpload) SetUploadId(v string) *MultipartUpload { + s.UploadId = &v + return s +} + +// Specifies when noncurrent object versions expire. Upon expiration, Amazon +// S3 permanently deletes the noncurrent object versions. You set this lifecycle +// configuration action on a bucket that has versioning enabled (or suspended) +// to request that Amazon S3 delete noncurrent object versions at a specific +// period in the object's lifetime. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionExpiration +type NoncurrentVersionExpiration struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) + NoncurrentDays *int64 `type:"integer"` +} + +// String returns the string representation +func (s NoncurrentVersionExpiration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionExpiration) GoString() string { + return s.String() +} + +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionExpiration) SetNoncurrentDays(v int64) *NoncurrentVersionExpiration { + s.NoncurrentDays = &v + return s +} + +// Container for the transition rule that describes when noncurrent objects +// transition to the STANDARD_IA or GLACIER storage class. If your bucket is +// versioning-enabled (or versioning is suspended), you can set this action +// to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA +// or GLACIER storage class at a specific period in the object's lifetime. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionTransition +type NoncurrentVersionTransition struct { + _ struct{} `type:"structure"` + + // Specifies the number of days an object is noncurrent before Amazon S3 can + // perform the associated action. For information about the noncurrent days + // calculations, see How Amazon S3 Calculates When an Object Became Noncurrent + // (http://docs.aws.amazon.com/AmazonS3/latest/dev/s3-access-control.html) + NoncurrentDays *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s NoncurrentVersionTransition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NoncurrentVersionTransition) GoString() string { + return s.String() +} + +// SetNoncurrentDays sets the NoncurrentDays field's value. +func (s *NoncurrentVersionTransition) SetNoncurrentDays(v int64) *NoncurrentVersionTransition { + s.NoncurrentDays = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *NoncurrentVersionTransition) SetStorageClass(v string) *NoncurrentVersionTransition { + s.StorageClass = &v + return s +} + +// Container for specifying the notification configuration of the bucket. If +// this element is empty, notifications are turned off on the bucket. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfiguration +type NotificationConfiguration struct { + _ struct{} `type:"structure"` + + LambdaFunctionConfigurations []*LambdaFunctionConfiguration `locationName:"CloudFunctionConfiguration" type:"list" flattened:"true"` + + QueueConfigurations []*QueueConfiguration `locationName:"QueueConfiguration" type:"list" flattened:"true"` + + TopicConfigurations []*TopicConfiguration `locationName:"TopicConfiguration" type:"list" flattened:"true"` +} + +// String returns the string representation +func (s NotificationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *NotificationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "NotificationConfiguration"} + if s.LambdaFunctionConfigurations != nil { + for i, v := range s.LambdaFunctionConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LambdaFunctionConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.QueueConfigurations != nil { + for i, v := range s.QueueConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "QueueConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + if s.TopicConfigurations != nil { + for i, v := range s.TopicConfigurations { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TopicConfigurations", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetLambdaFunctionConfigurations sets the LambdaFunctionConfigurations field's value. +func (s *NotificationConfiguration) SetLambdaFunctionConfigurations(v []*LambdaFunctionConfiguration) *NotificationConfiguration { + s.LambdaFunctionConfigurations = v + return s +} + +// SetQueueConfigurations sets the QueueConfigurations field's value. +func (s *NotificationConfiguration) SetQueueConfigurations(v []*QueueConfiguration) *NotificationConfiguration { + s.QueueConfigurations = v + return s +} + +// SetTopicConfigurations sets the TopicConfigurations field's value. +func (s *NotificationConfiguration) SetTopicConfigurations(v []*TopicConfiguration) *NotificationConfiguration { + s.TopicConfigurations = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationDeprecated +type NotificationConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + CloudFunctionConfiguration *CloudFunctionConfiguration `type:"structure"` + + QueueConfiguration *QueueConfigurationDeprecated `type:"structure"` + + TopicConfiguration *TopicConfigurationDeprecated `type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetCloudFunctionConfiguration sets the CloudFunctionConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetCloudFunctionConfiguration(v *CloudFunctionConfiguration) *NotificationConfigurationDeprecated { + s.CloudFunctionConfiguration = v + return s +} + +// SetQueueConfiguration sets the QueueConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetQueueConfiguration(v *QueueConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.QueueConfiguration = v + return s +} + +// SetTopicConfiguration sets the TopicConfiguration field's value. +func (s *NotificationConfigurationDeprecated) SetTopicConfiguration(v *TopicConfigurationDeprecated) *NotificationConfigurationDeprecated { + s.TopicConfiguration = v + return s +} + +// Container for object key name filtering rules. For information about key +// name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationFilter +type NotificationConfigurationFilter struct { + _ struct{} `type:"structure"` + + // Container for object key name prefix and suffix filtering rules. + Key *KeyFilter `locationName:"S3Key" type:"structure"` +} + +// String returns the string representation +func (s NotificationConfigurationFilter) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s NotificationConfigurationFilter) GoString() string { + return s.String() +} + +// SetKey sets the Key field's value. +func (s *NotificationConfigurationFilter) SetKey(v *KeyFilter) *NotificationConfigurationFilter { + s.Key = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Object +type Object struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + Key *string `min:"1" type:"string"` + + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectStorageClass"` +} + +// String returns the string representation +func (s Object) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Object) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *Object) SetETag(v string) *Object { + s.ETag = &v + return s +} + +// SetKey sets the Key field's value. +func (s *Object) SetKey(v string) *Object { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *Object) SetLastModified(v time.Time) *Object { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *Object) SetOwner(v *Owner) *Object { + s.Owner = v + return s +} + +// SetSize sets the Size field's value. +func (s *Object) SetSize(v int64) *Object { + s.Size = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Object) SetStorageClass(v string) *Object { + s.StorageClass = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectIdentifier +type ObjectIdentifier struct { + _ struct{} `type:"structure"` + + // Key name of the object to delete. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // VersionId for the specific version of the object to delete. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectIdentifier) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectIdentifier) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ObjectIdentifier) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ObjectIdentifier"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *ObjectIdentifier) SetKey(v string) *ObjectIdentifier { + s.Key = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectIdentifier) SetVersionId(v string) *ObjectIdentifier { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectVersion +type ObjectVersion struct { + _ struct{} `type:"structure"` + + ETag *string `type:"string"` + + // Specifies whether the object is (true) or is not (false) the latest version + // of an object. + IsLatest *bool `type:"boolean"` + + // The object key. + Key *string `min:"1" type:"string"` + + // Date and time the object was last modified. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + Owner *Owner `type:"structure"` + + // Size in bytes of the object. + Size *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"ObjectVersionStorageClass"` + + // Version ID of an object. + VersionId *string `type:"string"` +} + +// String returns the string representation +func (s ObjectVersion) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ObjectVersion) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *ObjectVersion) SetETag(v string) *ObjectVersion { + s.ETag = &v + return s +} + +// SetIsLatest sets the IsLatest field's value. +func (s *ObjectVersion) SetIsLatest(v bool) *ObjectVersion { + s.IsLatest = &v + return s +} + +// SetKey sets the Key field's value. +func (s *ObjectVersion) SetKey(v string) *ObjectVersion { + s.Key = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *ObjectVersion) SetLastModified(v time.Time) *ObjectVersion { + s.LastModified = &v + return s +} + +// SetOwner sets the Owner field's value. +func (s *ObjectVersion) SetOwner(v *Owner) *ObjectVersion { + s.Owner = v + return s +} + +// SetSize sets the Size field's value. +func (s *ObjectVersion) SetSize(v int64) *ObjectVersion { + s.Size = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *ObjectVersion) SetStorageClass(v string) *ObjectVersion { + s.StorageClass = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *ObjectVersion) SetVersionId(v string) *ObjectVersion { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Owner +type Owner struct { + _ struct{} `type:"structure"` + + DisplayName *string `type:"string"` + + ID *string `type:"string"` +} + +// String returns the string representation +func (s Owner) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Owner) GoString() string { + return s.String() +} + +// SetDisplayName sets the DisplayName field's value. +func (s *Owner) SetDisplayName(v string) *Owner { + s.DisplayName = &v + return s +} + +// SetID sets the ID field's value. +func (s *Owner) SetID(v string) *Owner { + s.ID = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Part +type Part struct { + _ struct{} `type:"structure"` + + // Entity tag returned when the part was uploaded. + ETag *string `type:"string"` + + // Date and time at which the part was uploaded. + LastModified *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Part number identifying the part. This is a positive integer between 1 and + // 10,000. + PartNumber *int64 `type:"integer"` + + // Size of the uploaded part data. + Size *int64 `type:"integer"` +} + +// String returns the string representation +func (s Part) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Part) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *Part) SetETag(v string) *Part { + s.ETag = &v + return s +} + +// SetLastModified sets the LastModified field's value. +func (s *Part) SetLastModified(v time.Time) *Part { + s.LastModified = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *Part) SetPartNumber(v int64) *Part { + s.PartNumber = &v + return s +} + +// SetSize sets the Size field's value. +func (s *Part) SetSize(v int64) *Part { + s.Size = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationRequest +type PutBucketAccelerateConfigurationInput struct { + _ struct{} `type:"structure" payload:"AccelerateConfiguration"` + + // Specifies the Accelerate Configuration you want to set for the bucket. + // + // AccelerateConfiguration is a required field + AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true"` + + // Name of the bucket for which the accelerate configuration is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketAccelerateConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAccelerateConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAccelerateConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAccelerateConfigurationInput"} + if s.AccelerateConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AccelerateConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAccelerateConfiguration sets the AccelerateConfiguration field's value. +func (s *PutBucketAccelerateConfigurationInput) SetAccelerateConfiguration(v *AccelerateConfiguration) *PutBucketAccelerateConfigurationInput { + s.AccelerateConfiguration = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAccelerateConfigurationInput) SetBucket(v string) *PutBucketAccelerateConfigurationInput { + s.Bucket = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfigurationOutput +type PutBucketAccelerateConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAccelerateConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAccelerateConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclRequest +type PutBucketAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the bucket. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` +} + +// String returns the string representation +func (s PutBucketAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutBucketAclInput) SetACL(v string) *PutBucketAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutBucketAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutBucketAclInput { + s.AccessControlPolicy = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAclInput) SetBucket(v string) *PutBucketAclInput { + s.Bucket = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutBucketAclInput) SetGrantFullControl(v string) *PutBucketAclInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutBucketAclInput) SetGrantRead(v string) *PutBucketAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutBucketAclInput) SetGrantReadACP(v string) *PutBucketAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutBucketAclInput) SetGrantWrite(v string) *PutBucketAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutBucketAclInput) SetGrantWriteACP(v string) *PutBucketAclInput { + s.GrantWriteACP = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAclOutput +type PutBucketAclOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAclOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationRequest +type PutBucketAnalyticsConfigurationInput struct { + _ struct{} `type:"structure" payload:"AnalyticsConfiguration"` + + // The configuration and any analyses for the analytics filter. + // + // AnalyticsConfiguration is a required field + AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true"` + + // The name of the bucket to which an analytics configuration is stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The identifier used to represent an analytics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketAnalyticsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAnalyticsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketAnalyticsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketAnalyticsConfigurationInput"} + if s.AnalyticsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("AnalyticsConfiguration")) + } + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.AnalyticsConfiguration != nil { + if err := s.AnalyticsConfiguration.Validate(); err != nil { + invalidParams.AddNested("AnalyticsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAnalyticsConfiguration sets the AnalyticsConfiguration field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetAnalyticsConfiguration(v *AnalyticsConfiguration) *PutBucketAnalyticsConfigurationInput { + s.AnalyticsConfiguration = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetBucket(v string) *PutBucketAnalyticsConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketAnalyticsConfigurationInput) SetId(v string) *PutBucketAnalyticsConfigurationInput { + s.Id = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfigurationOutput +type PutBucketAnalyticsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketAnalyticsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketAnalyticsConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsRequest +type PutBucketCorsInput struct { + _ struct{} `type:"structure" payload:"CORSConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // CORSConfiguration is a required field + CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketCorsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketCorsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketCorsInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.CORSConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("CORSConfiguration")) + } + if s.CORSConfiguration != nil { + if err := s.CORSConfiguration.Validate(); err != nil { + invalidParams.AddNested("CORSConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketCorsInput) SetBucket(v string) *PutBucketCorsInput { + s.Bucket = &v + return s +} + +// SetCORSConfiguration sets the CORSConfiguration field's value. +func (s *PutBucketCorsInput) SetCORSConfiguration(v *CORSConfiguration) *PutBucketCorsInput { + s.CORSConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCorsOutput +type PutBucketCorsOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketCorsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketCorsOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationRequest +type PutBucketInventoryConfigurationInput struct { + _ struct{} `type:"structure" payload:"InventoryConfiguration"` + + // The name of the bucket where the inventory configuration will be stored. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the inventory configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Specifies the inventory configuration. + // + // InventoryConfiguration is a required field + InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketInventoryConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketInventoryConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketInventoryConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketInventoryConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.InventoryConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("InventoryConfiguration")) + } + if s.InventoryConfiguration != nil { + if err := s.InventoryConfiguration.Validate(); err != nil { + invalidParams.AddNested("InventoryConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketInventoryConfigurationInput) SetBucket(v string) *PutBucketInventoryConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketInventoryConfigurationInput) SetId(v string) *PutBucketInventoryConfigurationInput { + s.Id = &v + return s +} + +// SetInventoryConfiguration sets the InventoryConfiguration field's value. +func (s *PutBucketInventoryConfigurationInput) SetInventoryConfiguration(v *InventoryConfiguration) *PutBucketInventoryConfigurationInput { + s.InventoryConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfigurationOutput +type PutBucketInventoryConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketInventoryConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketInventoryConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationRequest +type PutBucketLifecycleConfigurationInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleConfigurationInput) SetBucket(v string) *PutBucketLifecycleConfigurationInput { + s.Bucket = &v + return s +} + +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleConfigurationInput) SetLifecycleConfiguration(v *BucketLifecycleConfiguration) *PutBucketLifecycleConfigurationInput { + s.LifecycleConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfigurationOutput +type PutBucketLifecycleConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleRequest +type PutBucketLifecycleInput struct { + _ struct{} `type:"structure" payload:"LifecycleConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLifecycleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLifecycleInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.LifecycleConfiguration != nil { + if err := s.LifecycleConfiguration.Validate(); err != nil { + invalidParams.AddNested("LifecycleConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLifecycleInput) SetBucket(v string) *PutBucketLifecycleInput { + s.Bucket = &v + return s +} + +// SetLifecycleConfiguration sets the LifecycleConfiguration field's value. +func (s *PutBucketLifecycleInput) SetLifecycleConfiguration(v *LifecycleConfiguration) *PutBucketLifecycleInput { + s.LifecycleConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleOutput +type PutBucketLifecycleOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLifecycleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLifecycleOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingRequest +type PutBucketLoggingInput struct { + _ struct{} `type:"structure" payload:"BucketLoggingStatus"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // BucketLoggingStatus is a required field + BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketLoggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketLoggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketLoggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.BucketLoggingStatus == nil { + invalidParams.Add(request.NewErrParamRequired("BucketLoggingStatus")) + } + if s.BucketLoggingStatus != nil { + if err := s.BucketLoggingStatus.Validate(); err != nil { + invalidParams.AddNested("BucketLoggingStatus", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketLoggingInput) SetBucket(v string) *PutBucketLoggingInput { + s.Bucket = &v + return s +} + +// SetBucketLoggingStatus sets the BucketLoggingStatus field's value. +func (s *PutBucketLoggingInput) SetBucketLoggingStatus(v *BucketLoggingStatus) *PutBucketLoggingInput { + s.BucketLoggingStatus = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLoggingOutput +type PutBucketLoggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketLoggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketLoggingOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationRequest +type PutBucketMetricsConfigurationInput struct { + _ struct{} `type:"structure" payload:"MetricsConfiguration"` + + // The name of the bucket for which the metrics configuration is set. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The ID used to identify the metrics configuration. + // + // Id is a required field + Id *string `location:"querystring" locationName:"id" type:"string" required:"true"` + + // Specifies the metrics configuration. + // + // MetricsConfiguration is a required field + MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketMetricsConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketMetricsConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketMetricsConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketMetricsConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Id == nil { + invalidParams.Add(request.NewErrParamRequired("Id")) + } + if s.MetricsConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("MetricsConfiguration")) + } + if s.MetricsConfiguration != nil { + if err := s.MetricsConfiguration.Validate(); err != nil { + invalidParams.AddNested("MetricsConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketMetricsConfigurationInput) SetBucket(v string) *PutBucketMetricsConfigurationInput { + s.Bucket = &v + return s +} + +// SetId sets the Id field's value. +func (s *PutBucketMetricsConfigurationInput) SetId(v string) *PutBucketMetricsConfigurationInput { + s.Id = &v + return s +} + +// SetMetricsConfiguration sets the MetricsConfiguration field's value. +func (s *PutBucketMetricsConfigurationInput) SetMetricsConfiguration(v *MetricsConfiguration) *PutBucketMetricsConfigurationInput { + s.MetricsConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfigurationOutput +type PutBucketMetricsConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketMetricsConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketMetricsConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationRequest +type PutBucketNotificationConfigurationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for specifying the notification configuration of the bucket. If + // this element is empty, notifications are turned off on the bucket. + // + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationConfigurationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationConfigurationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + if s.NotificationConfiguration != nil { + if err := s.NotificationConfiguration.Validate(); err != nil { + invalidParams.AddNested("NotificationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationConfigurationInput) SetBucket(v string) *PutBucketNotificationConfigurationInput { + s.Bucket = &v + return s +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationConfigurationInput) SetNotificationConfiguration(v *NotificationConfiguration) *PutBucketNotificationConfigurationInput { + s.NotificationConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfigurationOutput +type PutBucketNotificationConfigurationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationConfigurationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationConfigurationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationRequest +type PutBucketNotificationInput struct { + _ struct{} `type:"structure" payload:"NotificationConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // NotificationConfiguration is a required field + NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketNotificationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketNotificationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketNotificationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.NotificationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("NotificationConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketNotificationInput) SetBucket(v string) *PutBucketNotificationInput { + s.Bucket = &v + return s +} + +// SetNotificationConfiguration sets the NotificationConfiguration field's value. +func (s *PutBucketNotificationInput) SetNotificationConfiguration(v *NotificationConfigurationDeprecated) *PutBucketNotificationInput { + s.NotificationConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationOutput +type PutBucketNotificationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketNotificationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketNotificationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyRequest +type PutBucketPolicyInput struct { + _ struct{} `type:"structure" payload:"Policy"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The bucket policy as a JSON document. + // + // Policy is a required field + Policy *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s PutBucketPolicyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketPolicyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketPolicyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Policy == nil { + invalidParams.Add(request.NewErrParamRequired("Policy")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketPolicyInput) SetBucket(v string) *PutBucketPolicyInput { + s.Bucket = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *PutBucketPolicyInput) SetPolicy(v string) *PutBucketPolicyInput { + s.Policy = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicyOutput +type PutBucketPolicyOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketPolicyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketPolicyOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationRequest +type PutBucketReplicationInput struct { + _ struct{} `type:"structure" payload:"ReplicationConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Container for replication rules. You can add as many as 1,000 rules. Total + // replication configuration size can be up to 2 MB. + // + // ReplicationConfiguration is a required field + ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketReplicationInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketReplicationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketReplicationInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.ReplicationConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("ReplicationConfiguration")) + } + if s.ReplicationConfiguration != nil { + if err := s.ReplicationConfiguration.Validate(); err != nil { + invalidParams.AddNested("ReplicationConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketReplicationInput) SetBucket(v string) *PutBucketReplicationInput { + s.Bucket = &v + return s +} + +// SetReplicationConfiguration sets the ReplicationConfiguration field's value. +func (s *PutBucketReplicationInput) SetReplicationConfiguration(v *ReplicationConfiguration) *PutBucketReplicationInput { + s.ReplicationConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplicationOutput +type PutBucketReplicationOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketReplicationOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketReplicationOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentRequest +type PutBucketRequestPaymentInput struct { + _ struct{} `type:"structure" payload:"RequestPaymentConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // RequestPaymentConfiguration is a required field + RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketRequestPaymentInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketRequestPaymentInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.RequestPaymentConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("RequestPaymentConfiguration")) + } + if s.RequestPaymentConfiguration != nil { + if err := s.RequestPaymentConfiguration.Validate(); err != nil { + invalidParams.AddNested("RequestPaymentConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketRequestPaymentInput) SetBucket(v string) *PutBucketRequestPaymentInput { + s.Bucket = &v + return s +} + +// SetRequestPaymentConfiguration sets the RequestPaymentConfiguration field's value. +func (s *PutBucketRequestPaymentInput) SetRequestPaymentConfiguration(v *RequestPaymentConfiguration) *PutBucketRequestPaymentInput { + s.RequestPaymentConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPaymentOutput +type PutBucketRequestPaymentOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketRequestPaymentOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketRequestPaymentOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingRequest +type PutBucketTaggingInput struct { + _ struct{} `type:"structure" payload:"Tagging"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketTaggingInput) SetBucket(v string) *PutBucketTaggingInput { + s.Bucket = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutBucketTaggingInput) SetTagging(v *Tagging) *PutBucketTaggingInput { + s.Tagging = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTaggingOutput +type PutBucketTaggingOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketTaggingOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningRequest +type PutBucketVersioningInput struct { + _ struct{} `type:"structure" payload:"VersioningConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The concatenation of the authentication device's serial number, a space, + // and the value that is displayed on your authentication device. + MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"` + + // VersioningConfiguration is a required field + VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketVersioningInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketVersioningInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketVersioningInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.VersioningConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("VersioningConfiguration")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketVersioningInput) SetBucket(v string) *PutBucketVersioningInput { + s.Bucket = &v + return s +} + +// SetMFA sets the MFA field's value. +func (s *PutBucketVersioningInput) SetMFA(v string) *PutBucketVersioningInput { + s.MFA = &v + return s +} + +// SetVersioningConfiguration sets the VersioningConfiguration field's value. +func (s *PutBucketVersioningInput) SetVersioningConfiguration(v *VersioningConfiguration) *PutBucketVersioningInput { + s.VersioningConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioningOutput +type PutBucketVersioningOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketVersioningOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketVersioningOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteRequest +type PutBucketWebsiteInput struct { + _ struct{} `type:"structure" payload:"WebsiteConfiguration"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // WebsiteConfiguration is a required field + WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"` +} + +// String returns the string representation +func (s PutBucketWebsiteInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutBucketWebsiteInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutBucketWebsiteInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.WebsiteConfiguration == nil { + invalidParams.Add(request.NewErrParamRequired("WebsiteConfiguration")) + } + if s.WebsiteConfiguration != nil { + if err := s.WebsiteConfiguration.Validate(); err != nil { + invalidParams.AddNested("WebsiteConfiguration", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutBucketWebsiteInput) SetBucket(v string) *PutBucketWebsiteInput { + s.Bucket = &v + return s +} + +// SetWebsiteConfiguration sets the WebsiteConfiguration field's value. +func (s *PutBucketWebsiteInput) SetWebsiteConfiguration(v *WebsiteConfiguration) *PutBucketWebsiteInput { + s.WebsiteConfiguration = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsiteOutput +type PutBucketWebsiteOutput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s PutBucketWebsiteOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutBucketWebsiteOutput) GoString() string { + return s.String() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclRequest +type PutObjectAclInput struct { + _ struct{} `type:"structure" payload:"AccessControlPolicy"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Allows grantee the read, write, read ACP, and write ACP permissions on the + // bucket. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to list the objects in the bucket. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the bucket ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to create, overwrite, and delete any object in the bucket. + GrantWrite *string `location:"header" locationName:"x-amz-grant-write" type:"string"` + + // Allows grantee to write the ACL for the applicable bucket. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // VersionId used to reference a specific version of the object. + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectAclInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectAclInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectAclInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.AccessControlPolicy != nil { + if err := s.AccessControlPolicy.Validate(); err != nil { + invalidParams.AddNested("AccessControlPolicy", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutObjectAclInput) SetACL(v string) *PutObjectAclInput { + s.ACL = &v + return s +} + +// SetAccessControlPolicy sets the AccessControlPolicy field's value. +func (s *PutObjectAclInput) SetAccessControlPolicy(v *AccessControlPolicy) *PutObjectAclInput { + s.AccessControlPolicy = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectAclInput) SetBucket(v string) *PutObjectAclInput { + s.Bucket = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectAclInput) SetGrantFullControl(v string) *PutObjectAclInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectAclInput) SetGrantRead(v string) *PutObjectAclInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectAclInput) SetGrantReadACP(v string) *PutObjectAclInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWrite sets the GrantWrite field's value. +func (s *PutObjectAclInput) SetGrantWrite(v string) *PutObjectAclInput { + s.GrantWrite = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectAclInput) SetGrantWriteACP(v string) *PutObjectAclInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectAclInput) SetKey(v string) *PutObjectAclInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectAclInput) SetRequestPayer(v string) *PutObjectAclInput { + s.RequestPayer = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectAclInput) SetVersionId(v string) *PutObjectAclInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAclOutput +type PutObjectAclOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s PutObjectAclOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectAclOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectAclOutput) SetRequestCharged(v string) *PutObjectAclOutput { + s.RequestCharged = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRequest +type PutObjectInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // The canned ACL to apply to the object. + ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // Name of the bucket to which the PUT operation was initiated. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Specifies caching behavior along the request/reply chain. + CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` + + // Specifies presentational information for the object. + ContentDisposition *string `location:"header" locationName:"Content-Disposition" type:"string"` + + // Specifies what content encodings have been applied to the object and thus + // what decoding mechanisms must be applied to obtain the media-type referenced + // by the Content-Type header field. + ContentEncoding *string `location:"header" locationName:"Content-Encoding" type:"string"` + + // The language the content is in. + ContentLanguage *string `location:"header" locationName:"Content-Language" type:"string"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // A standard MIME type describing the format of the object data. + ContentType *string `location:"header" locationName:"Content-Type" type:"string"` + + // The date and time at which the object is no longer cacheable. + Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp" timestampFormat:"rfc822"` + + // Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object. + GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"` + + // Allows grantee to read the object data and its metadata. + GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"` + + // Allows grantee to read the object ACL. + GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"` + + // Allows grantee to write the ACL for the applicable object. + GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"` + + // Object key for which the PUT operation was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // A map of metadata to store with the object in S3. + Metadata map[string]*string `location:"headers" locationName:"x-amz-meta-" type:"map"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Specifies the AWS KMS key ID to use for object encryption. All GET and PUT + // requests for an object protected by AWS KMS will fail if not made via SSL + // or using SigV4. Documentation on configuring any of the officially supported + // AWS SDKs and CLI can be found at http://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // The type of storage to use for the object. Defaults to 'STANDARD'. + StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"` + + // The tag-set for the object. The tag-set must be encoded as URL Query parameters + Tagging *string `location:"header" locationName:"x-amz-tagging" type:"string"` + + // If the bucket is configured as a website, redirects requests for this object + // to another object in the same bucket or to an external URL. Amazon S3 stores + // the value of this header in the object metadata. + WebsiteRedirectLocation *string `location:"header" locationName:"x-amz-website-redirect-location" type:"string"` +} + +// String returns the string representation +func (s PutObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetACL sets the ACL field's value. +func (s *PutObjectInput) SetACL(v string) *PutObjectInput { + s.ACL = &v + return s +} + +// SetBody sets the Body field's value. +func (s *PutObjectInput) SetBody(v io.ReadSeeker) *PutObjectInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectInput) SetBucket(v string) *PutObjectInput { + s.Bucket = &v + return s +} + +// SetCacheControl sets the CacheControl field's value. +func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { + s.CacheControl = &v + return s +} + +// SetContentDisposition sets the ContentDisposition field's value. +func (s *PutObjectInput) SetContentDisposition(v string) *PutObjectInput { + s.ContentDisposition = &v + return s +} + +// SetContentEncoding sets the ContentEncoding field's value. +func (s *PutObjectInput) SetContentEncoding(v string) *PutObjectInput { + s.ContentEncoding = &v + return s +} + +// SetContentLanguage sets the ContentLanguage field's value. +func (s *PutObjectInput) SetContentLanguage(v string) *PutObjectInput { + s.ContentLanguage = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *PutObjectInput) SetContentLength(v int64) *PutObjectInput { + s.ContentLength = &v + return s +} + +// SetContentType sets the ContentType field's value. +func (s *PutObjectInput) SetContentType(v string) *PutObjectInput { + s.ContentType = &v + return s +} + +// SetExpires sets the Expires field's value. +func (s *PutObjectInput) SetExpires(v time.Time) *PutObjectInput { + s.Expires = &v + return s +} + +// SetGrantFullControl sets the GrantFullControl field's value. +func (s *PutObjectInput) SetGrantFullControl(v string) *PutObjectInput { + s.GrantFullControl = &v + return s +} + +// SetGrantRead sets the GrantRead field's value. +func (s *PutObjectInput) SetGrantRead(v string) *PutObjectInput { + s.GrantRead = &v + return s +} + +// SetGrantReadACP sets the GrantReadACP field's value. +func (s *PutObjectInput) SetGrantReadACP(v string) *PutObjectInput { + s.GrantReadACP = &v + return s +} + +// SetGrantWriteACP sets the GrantWriteACP field's value. +func (s *PutObjectInput) SetGrantWriteACP(v string) *PutObjectInput { + s.GrantWriteACP = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectInput) SetKey(v string) *PutObjectInput { + s.Key = &v + return s +} + +// SetMetadata sets the Metadata field's value. +func (s *PutObjectInput) SetMetadata(v map[string]*string) *PutObjectInput { + s.Metadata = v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *PutObjectInput) SetRequestPayer(v string) *PutObjectInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectInput) SetSSECustomerAlgorithm(v string) *PutObjectInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *PutObjectInput) SetSSECustomerKey(v string) *PutObjectInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectInput) SetSSECustomerKeyMD5(v string) *PutObjectInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectInput) SetSSEKMSKeyId(v string) *PutObjectInput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectInput) SetServerSideEncryption(v string) *PutObjectInput { + s.ServerSideEncryption = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *PutObjectInput) SetStorageClass(v string) *PutObjectInput { + s.StorageClass = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectInput) SetTagging(v string) *PutObjectInput { + s.Tagging = &v + return s +} + +// SetWebsiteRedirectLocation sets the WebsiteRedirectLocation field's value. +func (s *PutObjectInput) SetWebsiteRedirectLocation(v string) *PutObjectInput { + s.WebsiteRedirectLocation = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectOutput +type PutObjectOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If the object expiration is configured, this will contain the expiration + // date (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded. + Expiration *string `location:"header" locationName:"x-amz-expiration" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` + + // Version of the object. + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectOutput) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { + s.ETag = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *PutObjectOutput) SetExpiration(v string) *PutObjectOutput { + s.Expiration = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *PutObjectOutput) SetRequestCharged(v string) *PutObjectOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *PutObjectOutput) SetSSECustomerAlgorithm(v string) *PutObjectOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *PutObjectOutput) SetSSECustomerKeyMD5(v string) *PutObjectOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *PutObjectOutput) SetSSEKMSKeyId(v string) *PutObjectOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *PutObjectOutput) SetServerSideEncryption(v string) *PutObjectOutput { + s.ServerSideEncryption = &v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectOutput) SetVersionId(v string) *PutObjectOutput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingRequest +type PutObjectTaggingInput struct { + _ struct{} `type:"structure" payload:"Tagging"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Tagging is a required field + Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s PutObjectTaggingInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectTaggingInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *PutObjectTaggingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "PutObjectTaggingInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Tagging == nil { + invalidParams.Add(request.NewErrParamRequired("Tagging")) + } + if s.Tagging != nil { + if err := s.Tagging.Validate(); err != nil { + invalidParams.AddNested("Tagging", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *PutObjectTaggingInput) SetBucket(v string) *PutObjectTaggingInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *PutObjectTaggingInput) SetKey(v string) *PutObjectTaggingInput { + s.Key = &v + return s +} + +// SetTagging sets the Tagging field's value. +func (s *PutObjectTaggingInput) SetTagging(v *Tagging) *PutObjectTaggingInput { + s.Tagging = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingInput) SetVersionId(v string) *PutObjectTaggingInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTaggingOutput +type PutObjectTaggingOutput struct { + _ struct{} `type:"structure"` + + VersionId *string `location:"header" locationName:"x-amz-version-id" type:"string"` +} + +// String returns the string representation +func (s PutObjectTaggingOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PutObjectTaggingOutput) GoString() string { + return s.String() +} + +// SetVersionId sets the VersionId field's value. +func (s *PutObjectTaggingOutput) SetVersionId(v string) *PutObjectTaggingOutput { + s.VersionId = &v + return s +} + +// Container for specifying an configuration when you want Amazon S3 to publish +// events to an Amazon Simple Queue Service (Amazon SQS) queue. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfiguration +type QueueConfiguration struct { + _ struct{} `type:"structure"` + + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SQS queue ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + // + // QueueArn is a required field + QueueArn *string `locationName:"Queue" type:"string" required:"true"` +} + +// String returns the string representation +func (s QueueConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *QueueConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "QueueConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.QueueArn == nil { + invalidParams.Add(request.NewErrParamRequired("QueueArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *QueueConfiguration) SetEvents(v []*string) *QueueConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *QueueConfiguration) SetFilter(v *NotificationConfigurationFilter) *QueueConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfiguration) SetId(v string) *QueueConfiguration { + s.Id = &v + return s +} + +// SetQueueArn sets the QueueArn field's value. +func (s *QueueConfiguration) SetQueueArn(v string) *QueueConfiguration { + s.QueueArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfigurationDeprecated +type QueueConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + Queue *string `type:"string"` +} + +// String returns the string representation +func (s QueueConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s QueueConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *QueueConfigurationDeprecated) SetEvent(v string) *QueueConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *QueueConfigurationDeprecated) SetEvents(v []*string) *QueueConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *QueueConfigurationDeprecated) SetId(v string) *QueueConfigurationDeprecated { + s.Id = &v + return s +} + +// SetQueue sets the Queue field's value. +func (s *QueueConfigurationDeprecated) SetQueue(v string) *QueueConfigurationDeprecated { + s.Queue = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Redirect +type Redirect struct { + _ struct{} `type:"structure"` + + // The host name to use in the redirect request. + HostName *string `type:"string"` + + // The HTTP redirect code to use on the response. Not required if one of the + // siblings is present. + HttpRedirectCode *string `type:"string"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` + + // The object key prefix to use in the redirect request. For example, to redirect + // requests for all pages with prefix docs/ (objects in the docs/ folder) to + // documents/, you can set a condition block with KeyPrefixEquals set to docs/ + // and in the Redirect set ReplaceKeyPrefixWith to /documents. Not required + // if one of the siblings is present. Can be present only if ReplaceKeyWith + // is not provided. + ReplaceKeyPrefixWith *string `type:"string"` + + // The specific object key to use in the redirect request. For example, redirect + // request to error.html. Not required if one of the sibling is present. Can + // be present only if ReplaceKeyPrefixWith is not provided. + ReplaceKeyWith *string `type:"string"` +} + +// String returns the string representation +func (s Redirect) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Redirect) GoString() string { + return s.String() +} + +// SetHostName sets the HostName field's value. +func (s *Redirect) SetHostName(v string) *Redirect { + s.HostName = &v + return s +} + +// SetHttpRedirectCode sets the HttpRedirectCode field's value. +func (s *Redirect) SetHttpRedirectCode(v string) *Redirect { + s.HttpRedirectCode = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *Redirect) SetProtocol(v string) *Redirect { + s.Protocol = &v + return s +} + +// SetReplaceKeyPrefixWith sets the ReplaceKeyPrefixWith field's value. +func (s *Redirect) SetReplaceKeyPrefixWith(v string) *Redirect { + s.ReplaceKeyPrefixWith = &v + return s +} + +// SetReplaceKeyWith sets the ReplaceKeyWith field's value. +func (s *Redirect) SetReplaceKeyWith(v string) *Redirect { + s.ReplaceKeyWith = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RedirectAllRequestsTo +type RedirectAllRequestsTo struct { + _ struct{} `type:"structure"` + + // Name of the host where requests will be redirected. + // + // HostName is a required field + HostName *string `type:"string" required:"true"` + + // Protocol to use (http, https) when redirecting requests. The default is the + // protocol that is used in the original request. + Protocol *string `type:"string" enum:"Protocol"` +} + +// String returns the string representation +func (s RedirectAllRequestsTo) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RedirectAllRequestsTo) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RedirectAllRequestsTo) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RedirectAllRequestsTo"} + if s.HostName == nil { + invalidParams.Add(request.NewErrParamRequired("HostName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetHostName sets the HostName field's value. +func (s *RedirectAllRequestsTo) SetHostName(v string) *RedirectAllRequestsTo { + s.HostName = &v + return s +} + +// SetProtocol sets the Protocol field's value. +func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { + s.Protocol = &v + return s +} + +// Container for replication rules. You can add as many as 1,000 rules. Total +// replication configuration size can be up to 2 MB. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationConfiguration +type ReplicationConfiguration struct { + _ struct{} `type:"structure"` + + // Amazon Resource Name (ARN) of an IAM role for Amazon S3 to assume when replicating + // the objects. + // + // Role is a required field + Role *string `type:"string" required:"true"` + + // Container for information about a particular replication rule. Replication + // configuration must have at least one rule and can contain up to 1,000 rules. + // + // Rules is a required field + Rules []*ReplicationRule `locationName:"Rule" type:"list" flattened:"true" required:"true"` +} + +// String returns the string representation +func (s ReplicationConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationConfiguration"} + if s.Role == nil { + invalidParams.Add(request.NewErrParamRequired("Role")) + } + if s.Rules == nil { + invalidParams.Add(request.NewErrParamRequired("Rules")) + } + if s.Rules != nil { + for i, v := range s.Rules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Rules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRole sets the Role field's value. +func (s *ReplicationConfiguration) SetRole(v string) *ReplicationConfiguration { + s.Role = &v + return s +} + +// SetRules sets the Rules field's value. +func (s *ReplicationConfiguration) SetRules(v []*ReplicationRule) *ReplicationConfiguration { + s.Rules = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRule +type ReplicationRule struct { + _ struct{} `type:"structure"` + + // Destination is a required field + Destination *Destination `type:"structure" required:"true"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Object keyname prefix identifying one or more objects to which the rule applies. + // Maximum prefix length can be up to 1,024 characters. Overlapping prefixes + // are not supported. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // The rule is ignored if status is not Enabled. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicationRuleStatus"` +} + +// String returns the string representation +func (s ReplicationRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicationRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicationRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicationRule"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *ReplicationRule) SetDestination(v *Destination) *ReplicationRule { + s.Destination = v + return s +} + +// SetID sets the ID field's value. +func (s *ReplicationRule) SetID(v string) *ReplicationRule { + s.ID = &v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *ReplicationRule) SetPrefix(v string) *ReplicationRule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *ReplicationRule) SetStatus(v string) *ReplicationRule { + s.Status = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestPaymentConfiguration +type RequestPaymentConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies who pays for the download and request fees. + // + // Payer is a required field + Payer *string `type:"string" required:"true" enum:"Payer"` +} + +// String returns the string representation +func (s RequestPaymentConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RequestPaymentConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RequestPaymentConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RequestPaymentConfiguration"} + if s.Payer == nil { + invalidParams.Add(request.NewErrParamRequired("Payer")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetPayer sets the Payer field's value. +func (s *RequestPaymentConfiguration) SetPayer(v string) *RequestPaymentConfiguration { + s.Payer = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectRequest +type RestoreObjectInput struct { + _ struct{} `type:"structure" payload:"RestoreRequest"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"` + + VersionId *string `location:"querystring" locationName:"versionId" type:"string"` +} + +// String returns the string representation +func (s RestoreObjectInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreObjectInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreObjectInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.RestoreRequest != nil { + if err := s.RestoreRequest.Validate(); err != nil { + invalidParams.AddNested("RestoreRequest", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *RestoreObjectInput) SetBucket(v string) *RestoreObjectInput { + s.Bucket = &v + return s +} + +// SetKey sets the Key field's value. +func (s *RestoreObjectInput) SetKey(v string) *RestoreObjectInput { + s.Key = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *RestoreObjectInput) SetRequestPayer(v string) *RestoreObjectInput { + s.RequestPayer = &v + return s +} + +// SetRestoreRequest sets the RestoreRequest field's value. +func (s *RestoreObjectInput) SetRestoreRequest(v *RestoreRequest) *RestoreObjectInput { + s.RestoreRequest = v + return s +} + +// SetVersionId sets the VersionId field's value. +func (s *RestoreObjectInput) SetVersionId(v string) *RestoreObjectInput { + s.VersionId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObjectOutput +type RestoreObjectOutput struct { + _ struct{} `type:"structure"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` +} + +// String returns the string representation +func (s RestoreObjectOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreObjectOutput) GoString() string { + return s.String() +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *RestoreObjectOutput) SetRequestCharged(v string) *RestoreObjectOutput { + s.RequestCharged = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreRequest +type RestoreRequest struct { + _ struct{} `type:"structure"` + + // Lifetime of the active copy in days + // + // Days is a required field + Days *int64 `type:"integer" required:"true"` + + // Glacier related prameters pertaining to this job. + GlacierJobParameters *GlacierJobParameters `type:"structure"` +} + +// String returns the string representation +func (s RestoreRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RestoreRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RestoreRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RestoreRequest"} + if s.Days == nil { + invalidParams.Add(request.NewErrParamRequired("Days")) + } + if s.GlacierJobParameters != nil { + if err := s.GlacierJobParameters.Validate(); err != nil { + invalidParams.AddNested("GlacierJobParameters", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDays sets the Days field's value. +func (s *RestoreRequest) SetDays(v int64) *RestoreRequest { + s.Days = &v + return s +} + +// SetGlacierJobParameters sets the GlacierJobParameters field's value. +func (s *RestoreRequest) SetGlacierJobParameters(v *GlacierJobParameters) *RestoreRequest { + s.GlacierJobParameters = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RoutingRule +type RoutingRule struct { + _ struct{} `type:"structure"` + + // A container for describing a condition that must be met for the specified + // redirect to apply. For example, 1. If request is for pages in the /docs folder, + // redirect to the /documents folder. 2. If request results in HTTP error 4xx, + // redirect request to another host where you might process the error. + Condition *Condition `type:"structure"` + + // Container for redirect information. You can redirect requests to another + // host, to another page, or with another protocol. In the event of an error, + // you can can specify a different error code to return. + // + // Redirect is a required field + Redirect *Redirect `type:"structure" required:"true"` +} + +// String returns the string representation +func (s RoutingRule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RoutingRule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *RoutingRule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "RoutingRule"} + if s.Redirect == nil { + invalidParams.Add(request.NewErrParamRequired("Redirect")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetCondition sets the Condition field's value. +func (s *RoutingRule) SetCondition(v *Condition) *RoutingRule { + s.Condition = v + return s +} + +// SetRedirect sets the Redirect field's value. +func (s *RoutingRule) SetRedirect(v *Redirect) *RoutingRule { + s.Redirect = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Rule +type Rule struct { + _ struct{} `type:"structure"` + + // Specifies the days since the initiation of an Incomplete Multipart Upload + // that Lifecycle will wait before permanently removing all parts of the upload. + AbortIncompleteMultipartUpload *AbortIncompleteMultipartUpload `type:"structure"` + + Expiration *LifecycleExpiration `type:"structure"` + + // Unique identifier for the rule. The value cannot be longer than 255 characters. + ID *string `type:"string"` + + // Specifies when noncurrent object versions expire. Upon expiration, Amazon + // S3 permanently deletes the noncurrent object versions. You set this lifecycle + // configuration action on a bucket that has versioning enabled (or suspended) + // to request that Amazon S3 delete noncurrent object versions at a specific + // period in the object's lifetime. + NoncurrentVersionExpiration *NoncurrentVersionExpiration `type:"structure"` + + // Container for the transition rule that describes when noncurrent objects + // transition to the STANDARD_IA or GLACIER storage class. If your bucket is + // versioning-enabled (or versioning is suspended), you can set this action + // to request that Amazon S3 transition noncurrent object versions to the STANDARD_IA + // or GLACIER storage class at a specific period in the object's lifetime. + NoncurrentVersionTransition *NoncurrentVersionTransition `type:"structure"` + + // Prefix identifying one or more objects to which the rule applies. + // + // Prefix is a required field + Prefix *string `type:"string" required:"true"` + + // If 'Enabled', the rule is currently being applied. If 'Disabled', the rule + // is not currently being applied. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ExpirationStatus"` + + Transition *Transition `type:"structure"` +} + +// String returns the string representation +func (s Rule) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Rule) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Rule) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Rule"} + if s.Prefix == nil { + invalidParams.Add(request.NewErrParamRequired("Prefix")) + } + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetAbortIncompleteMultipartUpload sets the AbortIncompleteMultipartUpload field's value. +func (s *Rule) SetAbortIncompleteMultipartUpload(v *AbortIncompleteMultipartUpload) *Rule { + s.AbortIncompleteMultipartUpload = v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Rule) SetExpiration(v *LifecycleExpiration) *Rule { + s.Expiration = v + return s +} + +// SetID sets the ID field's value. +func (s *Rule) SetID(v string) *Rule { + s.ID = &v + return s +} + +// SetNoncurrentVersionExpiration sets the NoncurrentVersionExpiration field's value. +func (s *Rule) SetNoncurrentVersionExpiration(v *NoncurrentVersionExpiration) *Rule { + s.NoncurrentVersionExpiration = v + return s +} + +// SetNoncurrentVersionTransition sets the NoncurrentVersionTransition field's value. +func (s *Rule) SetNoncurrentVersionTransition(v *NoncurrentVersionTransition) *Rule { + s.NoncurrentVersionTransition = v + return s +} + +// SetPrefix sets the Prefix field's value. +func (s *Rule) SetPrefix(v string) *Rule { + s.Prefix = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *Rule) SetStatus(v string) *Rule { + s.Status = &v + return s +} + +// SetTransition sets the Transition field's value. +func (s *Rule) SetTransition(v *Transition) *Rule { + s.Transition = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysis +type StorageClassAnalysis struct { + _ struct{} `type:"structure"` + + // A container used to describe how data related to the storage class analysis + // should be exported. + DataExport *StorageClassAnalysisDataExport `type:"structure"` +} + +// String returns the string representation +func (s StorageClassAnalysis) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StorageClassAnalysis) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StorageClassAnalysis) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysis"} + if s.DataExport != nil { + if err := s.DataExport.Validate(); err != nil { + invalidParams.AddNested("DataExport", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDataExport sets the DataExport field's value. +func (s *StorageClassAnalysis) SetDataExport(v *StorageClassAnalysisDataExport) *StorageClassAnalysis { + s.DataExport = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysisDataExport +type StorageClassAnalysisDataExport struct { + _ struct{} `type:"structure"` + + // The place to store the data for an analysis. + // + // Destination is a required field + Destination *AnalyticsExportDestination `type:"structure" required:"true"` + + // The version of the output schema to use when exporting data. Must be V_1. + // + // OutputSchemaVersion is a required field + OutputSchemaVersion *string `type:"string" required:"true" enum:"StorageClassAnalysisSchemaVersion"` +} + +// String returns the string representation +func (s StorageClassAnalysisDataExport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s StorageClassAnalysisDataExport) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *StorageClassAnalysisDataExport) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "StorageClassAnalysisDataExport"} + if s.Destination == nil { + invalidParams.Add(request.NewErrParamRequired("Destination")) + } + if s.OutputSchemaVersion == nil { + invalidParams.Add(request.NewErrParamRequired("OutputSchemaVersion")) + } + if s.Destination != nil { + if err := s.Destination.Validate(); err != nil { + invalidParams.AddNested("Destination", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDestination sets the Destination field's value. +func (s *StorageClassAnalysisDataExport) SetDestination(v *AnalyticsExportDestination) *StorageClassAnalysisDataExport { + s.Destination = v + return s +} + +// SetOutputSchemaVersion sets the OutputSchemaVersion field's value. +func (s *StorageClassAnalysisDataExport) SetOutputSchemaVersion(v string) *StorageClassAnalysisDataExport { + s.OutputSchemaVersion = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tag +type Tag struct { + _ struct{} `type:"structure"` + + // Name of the tag. + // + // Key is a required field + Key *string `min:"1" type:"string" required:"true"` + + // Value of the tag. + // + // Value is a required field + Value *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Tag) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tag) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tag) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tag"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.Value == nil { + invalidParams.Add(request.NewErrParamRequired("Value")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetKey sets the Key field's value. +func (s *Tag) SetKey(v string) *Tag { + s.Key = &v + return s +} + +// SetValue sets the Value field's value. +func (s *Tag) SetValue(v string) *Tag { + s.Value = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tagging +type Tagging struct { + _ struct{} `type:"structure"` + + // TagSet is a required field + TagSet []*Tag `locationNameList:"Tag" type:"list" required:"true"` +} + +// String returns the string representation +func (s Tagging) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Tagging) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Tagging) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Tagging"} + if s.TagSet == nil { + invalidParams.Add(request.NewErrParamRequired("TagSet")) + } + if s.TagSet != nil { + for i, v := range s.TagSet { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TagSet", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTagSet sets the TagSet field's value. +func (s *Tagging) SetTagSet(v []*Tag) *Tagging { + s.TagSet = v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TargetGrant +type TargetGrant struct { + _ struct{} `type:"structure"` + + Grantee *Grantee `type:"structure"` + + // Logging permissions assigned to the Grantee for the bucket. + Permission *string `type:"string" enum:"BucketLogsPermission"` +} + +// String returns the string representation +func (s TargetGrant) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TargetGrant) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TargetGrant) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TargetGrant"} + if s.Grantee != nil { + if err := s.Grantee.Validate(); err != nil { + invalidParams.AddNested("Grantee", err.(request.ErrInvalidParams)) + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetGrantee sets the Grantee field's value. +func (s *TargetGrant) SetGrantee(v *Grantee) *TargetGrant { + s.Grantee = v + return s +} + +// SetPermission sets the Permission field's value. +func (s *TargetGrant) SetPermission(v string) *TargetGrant { + s.Permission = &v + return s +} + +// Container for specifying the configuration when you want Amazon S3 to publish +// events to an Amazon Simple Notification Service (Amazon SNS) topic. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfiguration +type TopicConfiguration struct { + _ struct{} `type:"structure"` + + // Events is a required field + Events []*string `locationName:"Event" type:"list" flattened:"true" required:"true"` + + // Container for object key name filtering rules. For information about key + // name filtering, go to Configuring Event Notifications (http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html) + Filter *NotificationConfigurationFilter `type:"structure"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic ARN to which Amazon S3 will publish a message when it detects + // events of specified type. + // + // TopicArn is a required field + TopicArn *string `locationName:"Topic" type:"string" required:"true"` +} + +// String returns the string representation +func (s TopicConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *TopicConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "TopicConfiguration"} + if s.Events == nil { + invalidParams.Add(request.NewErrParamRequired("Events")) + } + if s.TopicArn == nil { + invalidParams.Add(request.NewErrParamRequired("TopicArn")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEvents sets the Events field's value. +func (s *TopicConfiguration) SetEvents(v []*string) *TopicConfiguration { + s.Events = v + return s +} + +// SetFilter sets the Filter field's value. +func (s *TopicConfiguration) SetFilter(v *NotificationConfigurationFilter) *TopicConfiguration { + s.Filter = v + return s +} + +// SetId sets the Id field's value. +func (s *TopicConfiguration) SetId(v string) *TopicConfiguration { + s.Id = &v + return s +} + +// SetTopicArn sets the TopicArn field's value. +func (s *TopicConfiguration) SetTopicArn(v string) *TopicConfiguration { + s.TopicArn = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfigurationDeprecated +type TopicConfigurationDeprecated struct { + _ struct{} `type:"structure"` + + // Bucket event for which to send notifications. + Event *string `deprecated:"true" type:"string" enum:"Event"` + + Events []*string `locationName:"Event" type:"list" flattened:"true"` + + // Optional unique identifier for configurations in a notification configuration. + // If you don't provide one, Amazon S3 will assign an ID. + Id *string `type:"string"` + + // Amazon SNS topic to which Amazon S3 will publish a message to report the + // specified events for the bucket. + Topic *string `type:"string"` +} + +// String returns the string representation +func (s TopicConfigurationDeprecated) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s TopicConfigurationDeprecated) GoString() string { + return s.String() +} + +// SetEvent sets the Event field's value. +func (s *TopicConfigurationDeprecated) SetEvent(v string) *TopicConfigurationDeprecated { + s.Event = &v + return s +} + +// SetEvents sets the Events field's value. +func (s *TopicConfigurationDeprecated) SetEvents(v []*string) *TopicConfigurationDeprecated { + s.Events = v + return s +} + +// SetId sets the Id field's value. +func (s *TopicConfigurationDeprecated) SetId(v string) *TopicConfigurationDeprecated { + s.Id = &v + return s +} + +// SetTopic sets the Topic field's value. +func (s *TopicConfigurationDeprecated) SetTopic(v string) *TopicConfigurationDeprecated { + s.Topic = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Transition +type Transition struct { + _ struct{} `type:"structure"` + + // Indicates at what date the object is to be moved or deleted. Should be in + // GMT ISO 8601 Format. + Date *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // Indicates the lifetime, in days, of the objects that are subject to the rule. + // The value must be a non-zero positive integer. + Days *int64 `type:"integer"` + + // The class of storage used to store the object. + StorageClass *string `type:"string" enum:"TransitionStorageClass"` +} + +// String returns the string representation +func (s Transition) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Transition) GoString() string { + return s.String() +} + +// SetDate sets the Date field's value. +func (s *Transition) SetDate(v time.Time) *Transition { + s.Date = &v + return s +} + +// SetDays sets the Days field's value. +func (s *Transition) SetDays(v int64) *Transition { + s.Days = &v + return s +} + +// SetStorageClass sets the StorageClass field's value. +func (s *Transition) SetStorageClass(v string) *Transition { + s.StorageClass = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyRequest +type UploadPartCopyInput struct { + _ struct{} `type:"structure"` + + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // The name of the source bucket and key name of the source object, separated + // by a slash (/). Must be URL-encoded. + // + // CopySource is a required field + CopySource *string `location:"header" locationName:"x-amz-copy-source" type:"string" required:"true"` + + // Copies the object if its entity tag (ETag) matches the specified tag. + CopySourceIfMatch *string `location:"header" locationName:"x-amz-copy-source-if-match" type:"string"` + + // Copies the object if it has been modified since the specified time. + CopySourceIfModifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-modified-since" type:"timestamp" timestampFormat:"rfc822"` + + // Copies the object if its entity tag (ETag) is different than the specified + // ETag. + CopySourceIfNoneMatch *string `location:"header" locationName:"x-amz-copy-source-if-none-match" type:"string"` + + // Copies the object if it hasn't been modified since the specified time. + CopySourceIfUnmodifiedSince *time.Time `location:"header" locationName:"x-amz-copy-source-if-unmodified-since" type:"timestamp" timestampFormat:"rfc822"` + + // The range of bytes to copy from the source object. The range value must use + // the form bytes=first-last, where the first and last are the zero-based byte + // offsets to copy. For example, bytes=0-9 indicates that you want to copy the + // first ten bytes of the source. You can copy a range only if the source object + // is greater than 5 GB. + CopySourceRange *string `location:"header" locationName:"x-amz-copy-source-range" type:"string"` + + // Specifies the algorithm to use when decrypting the source object (e.g., AES256). + CopySourceSSECustomerAlgorithm *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use to decrypt + // the source object. The encryption key provided in this header must be one + // that was used when the source object was created. + CopySourceSSECustomerKey *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + CopySourceSSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-copy-source-server-side-encryption-customer-key-MD5" type:"string"` + + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being copied. This is a positive integer between 1 and + // 10,000. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being copied. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartCopyInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartCopyInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartCopyInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.CopySource == nil { + invalidParams.Add(request.NewErrParamRequired("CopySource")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartCopyInput) SetBucket(v string) *UploadPartCopyInput { + s.Bucket = &v + return s +} + +// SetCopySource sets the CopySource field's value. +func (s *UploadPartCopyInput) SetCopySource(v string) *UploadPartCopyInput { + s.CopySource = &v + return s +} + +// SetCopySourceIfMatch sets the CopySourceIfMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfMatch(v string) *UploadPartCopyInput { + s.CopySourceIfMatch = &v + return s +} + +// SetCopySourceIfModifiedSince sets the CopySourceIfModifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfModifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfModifiedSince = &v + return s +} + +// SetCopySourceIfNoneMatch sets the CopySourceIfNoneMatch field's value. +func (s *UploadPartCopyInput) SetCopySourceIfNoneMatch(v string) *UploadPartCopyInput { + s.CopySourceIfNoneMatch = &v + return s +} + +// SetCopySourceIfUnmodifiedSince sets the CopySourceIfUnmodifiedSince field's value. +func (s *UploadPartCopyInput) SetCopySourceIfUnmodifiedSince(v time.Time) *UploadPartCopyInput { + s.CopySourceIfUnmodifiedSince = &v + return s +} + +// SetCopySourceRange sets the CopySourceRange field's value. +func (s *UploadPartCopyInput) SetCopySourceRange(v string) *UploadPartCopyInput { + s.CopySourceRange = &v + return s +} + +// SetCopySourceSSECustomerAlgorithm sets the CopySourceSSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerAlgorithm = &v + return s +} + +// SetCopySourceSSECustomerKey sets the CopySourceSSECustomerKey field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKey(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKey = &v + return s +} + +// SetCopySourceSSECustomerKeyMD5 sets the CopySourceSSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetCopySourceSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.CopySourceSSECustomerKeyMD5 = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartCopyInput) SetKey(v string) *UploadPartCopyInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartCopyInput) SetPartNumber(v int64) *UploadPartCopyInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartCopyInput) SetRequestPayer(v string) *UploadPartCopyInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyInput) SetSSECustomerAlgorithm(v string) *UploadPartCopyInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartCopyInput) SetSSECustomerKey(v string) *UploadPartCopyInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyInput) SetSSECustomerKeyMD5(v string) *UploadPartCopyInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartCopyInput) SetUploadId(v string) *UploadPartCopyInput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopyOutput +type UploadPartCopyOutput struct { + _ struct{} `type:"structure" payload:"CopyPartResult"` + + CopyPartResult *CopyPartResult `type:"structure"` + + // The version of the source object that was copied, if you have enabled versioning + // on the source bucket. + CopySourceVersionId *string `location:"header" locationName:"x-amz-copy-source-version-id" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartCopyOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartCopyOutput) GoString() string { + return s.String() +} + +// SetCopyPartResult sets the CopyPartResult field's value. +func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput { + s.CopyPartResult = v + return s +} + +// SetCopySourceVersionId sets the CopySourceVersionId field's value. +func (s *UploadPartCopyOutput) SetCopySourceVersionId(v string) *UploadPartCopyOutput { + s.CopySourceVersionId = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartCopyOutput) SetRequestCharged(v string) *UploadPartCopyOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartCopyOutput) SetSSECustomerAlgorithm(v string) *UploadPartCopyOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartCopyOutput) SetSSECustomerKeyMD5(v string) *UploadPartCopyOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartCopyOutput) SetSSEKMSKeyId(v string) *UploadPartCopyOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartCopyOutput) SetServerSideEncryption(v string) *UploadPartCopyOutput { + s.ServerSideEncryption = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartRequest +type UploadPartInput struct { + _ struct{} `type:"structure" payload:"Body"` + + // Object data. + Body io.ReadSeeker `type:"blob"` + + // Name of the bucket to which the multipart upload was initiated. + // + // Bucket is a required field + Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + + // Size of the body in bytes. This parameter is useful when the size of the + // body cannot be determined automatically. + ContentLength *int64 `location:"header" locationName:"Content-Length" type:"long"` + + // Object key for which the multipart upload was initiated. + // + // Key is a required field + Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"` + + // Part number of part being uploaded. This is a positive integer between 1 + // and 10,000. + // + // PartNumber is a required field + PartNumber *int64 `location:"querystring" locationName:"partNumber" type:"integer" required:"true"` + + // Confirms that the requester knows that she or he will be charged for the + // request. Bucket owners need not specify this parameter in their requests. + // Documentation on downloading objects from requester pays buckets can be found + // at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html + RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"` + + // Specifies the algorithm to use to when encrypting the object (e.g., AES256). + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // Specifies the customer-provided encryption key for Amazon S3 to use in encrypting + // data. This value is used to store the object and then it is discarded; Amazon + // does not store the encryption key. The key must be appropriate for use with + // the algorithm specified in the x-amz-server-side​-encryption​-customer-algorithm + // header. This must be the same encryption key specified in the initiate multipart + // upload request. + SSECustomerKey *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string"` + + // Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321. + // Amazon S3 uses this header for a message integrity check to ensure the encryption + // key was transmitted without error. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // Upload ID identifying the multipart upload whose part is being uploaded. + // + // UploadId is a required field + UploadId *string `location:"querystring" locationName:"uploadId" type:"string" required:"true"` +} + +// String returns the string representation +func (s UploadPartInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *UploadPartInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "UploadPartInput"} + if s.Bucket == nil { + invalidParams.Add(request.NewErrParamRequired("Bucket")) + } + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.Key != nil && len(*s.Key) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Key", 1)) + } + if s.PartNumber == nil { + invalidParams.Add(request.NewErrParamRequired("PartNumber")) + } + if s.UploadId == nil { + invalidParams.Add(request.NewErrParamRequired("UploadId")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBody sets the Body field's value. +func (s *UploadPartInput) SetBody(v io.ReadSeeker) *UploadPartInput { + s.Body = v + return s +} + +// SetBucket sets the Bucket field's value. +func (s *UploadPartInput) SetBucket(v string) *UploadPartInput { + s.Bucket = &v + return s +} + +// SetContentLength sets the ContentLength field's value. +func (s *UploadPartInput) SetContentLength(v int64) *UploadPartInput { + s.ContentLength = &v + return s +} + +// SetKey sets the Key field's value. +func (s *UploadPartInput) SetKey(v string) *UploadPartInput { + s.Key = &v + return s +} + +// SetPartNumber sets the PartNumber field's value. +func (s *UploadPartInput) SetPartNumber(v int64) *UploadPartInput { + s.PartNumber = &v + return s +} + +// SetRequestPayer sets the RequestPayer field's value. +func (s *UploadPartInput) SetRequestPayer(v string) *UploadPartInput { + s.RequestPayer = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartInput) SetSSECustomerAlgorithm(v string) *UploadPartInput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKey sets the SSECustomerKey field's value. +func (s *UploadPartInput) SetSSECustomerKey(v string) *UploadPartInput { + s.SSECustomerKey = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartInput) SetSSECustomerKeyMD5(v string) *UploadPartInput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetUploadId sets the UploadId field's value. +func (s *UploadPartInput) SetUploadId(v string) *UploadPartInput { + s.UploadId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartOutput +type UploadPartOutput struct { + _ struct{} `type:"structure"` + + // Entity tag for the uploaded object. + ETag *string `location:"header" locationName:"ETag" type:"string"` + + // If present, indicates that the requester was successfully charged for the + // request. + RequestCharged *string `location:"header" locationName:"x-amz-request-charged" type:"string" enum:"RequestCharged"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header confirming the encryption algorithm + // used. + SSECustomerAlgorithm *string `location:"header" locationName:"x-amz-server-side-encryption-customer-algorithm" type:"string"` + + // If server-side encryption with a customer-provided encryption key was requested, + // the response will include this header to provide round trip message integrity + // verification of the customer-provided encryption key. + SSECustomerKeyMD5 *string `location:"header" locationName:"x-amz-server-side-encryption-customer-key-MD5" type:"string"` + + // If present, specifies the ID of the AWS Key Management Service (KMS) master + // encryption key that was used for the object. + SSEKMSKeyId *string `location:"header" locationName:"x-amz-server-side-encryption-aws-kms-key-id" type:"string"` + + // The Server-side encryption algorithm used when storing this object in S3 + // (e.g., AES256, aws:kms). + ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"` +} + +// String returns the string representation +func (s UploadPartOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s UploadPartOutput) GoString() string { + return s.String() +} + +// SetETag sets the ETag field's value. +func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput { + s.ETag = &v + return s +} + +// SetRequestCharged sets the RequestCharged field's value. +func (s *UploadPartOutput) SetRequestCharged(v string) *UploadPartOutput { + s.RequestCharged = &v + return s +} + +// SetSSECustomerAlgorithm sets the SSECustomerAlgorithm field's value. +func (s *UploadPartOutput) SetSSECustomerAlgorithm(v string) *UploadPartOutput { + s.SSECustomerAlgorithm = &v + return s +} + +// SetSSECustomerKeyMD5 sets the SSECustomerKeyMD5 field's value. +func (s *UploadPartOutput) SetSSECustomerKeyMD5(v string) *UploadPartOutput { + s.SSECustomerKeyMD5 = &v + return s +} + +// SetSSEKMSKeyId sets the SSEKMSKeyId field's value. +func (s *UploadPartOutput) SetSSEKMSKeyId(v string) *UploadPartOutput { + s.SSEKMSKeyId = &v + return s +} + +// SetServerSideEncryption sets the ServerSideEncryption field's value. +func (s *UploadPartOutput) SetServerSideEncryption(v string) *UploadPartOutput { + s.ServerSideEncryption = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/VersioningConfiguration +type VersioningConfiguration struct { + _ struct{} `type:"structure"` + + // Specifies whether MFA delete is enabled in the bucket versioning configuration. + // This element is only returned if the bucket has been configured with MFA + // delete. If the bucket has never been so configured, this element is not returned. + MFADelete *string `locationName:"MfaDelete" type:"string" enum:"MFADelete"` + + // The versioning state of the bucket. + Status *string `type:"string" enum:"BucketVersioningStatus"` +} + +// String returns the string representation +func (s VersioningConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s VersioningConfiguration) GoString() string { + return s.String() +} + +// SetMFADelete sets the MFADelete field's value. +func (s *VersioningConfiguration) SetMFADelete(v string) *VersioningConfiguration { + s.MFADelete = &v + return s +} + +// SetStatus sets the Status field's value. +func (s *VersioningConfiguration) SetStatus(v string) *VersioningConfiguration { + s.Status = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WebsiteConfiguration +type WebsiteConfiguration struct { + _ struct{} `type:"structure"` + + ErrorDocument *ErrorDocument `type:"structure"` + + IndexDocument *IndexDocument `type:"structure"` + + RedirectAllRequestsTo *RedirectAllRequestsTo `type:"structure"` + + RoutingRules []*RoutingRule `locationNameList:"RoutingRule" type:"list"` +} + +// String returns the string representation +func (s WebsiteConfiguration) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s WebsiteConfiguration) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *WebsiteConfiguration) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "WebsiteConfiguration"} + if s.ErrorDocument != nil { + if err := s.ErrorDocument.Validate(); err != nil { + invalidParams.AddNested("ErrorDocument", err.(request.ErrInvalidParams)) + } + } + if s.IndexDocument != nil { + if err := s.IndexDocument.Validate(); err != nil { + invalidParams.AddNested("IndexDocument", err.(request.ErrInvalidParams)) + } + } + if s.RedirectAllRequestsTo != nil { + if err := s.RedirectAllRequestsTo.Validate(); err != nil { + invalidParams.AddNested("RedirectAllRequestsTo", err.(request.ErrInvalidParams)) + } + } + if s.RoutingRules != nil { + for i, v := range s.RoutingRules { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "RoutingRules", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetErrorDocument sets the ErrorDocument field's value. +func (s *WebsiteConfiguration) SetErrorDocument(v *ErrorDocument) *WebsiteConfiguration { + s.ErrorDocument = v + return s +} + +// SetIndexDocument sets the IndexDocument field's value. +func (s *WebsiteConfiguration) SetIndexDocument(v *IndexDocument) *WebsiteConfiguration { + s.IndexDocument = v + return s +} + +// SetRedirectAllRequestsTo sets the RedirectAllRequestsTo field's value. +func (s *WebsiteConfiguration) SetRedirectAllRequestsTo(v *RedirectAllRequestsTo) *WebsiteConfiguration { + s.RedirectAllRequestsTo = v + return s +} + +// SetRoutingRules sets the RoutingRules field's value. +func (s *WebsiteConfiguration) SetRoutingRules(v []*RoutingRule) *WebsiteConfiguration { + s.RoutingRules = v + return s +} + +const ( + // AnalyticsS3ExportFileFormatCsv is a AnalyticsS3ExportFileFormat enum value + AnalyticsS3ExportFileFormatCsv = "CSV" +) + +const ( + // BucketAccelerateStatusEnabled is a BucketAccelerateStatus enum value + BucketAccelerateStatusEnabled = "Enabled" + + // BucketAccelerateStatusSuspended is a BucketAccelerateStatus enum value + BucketAccelerateStatusSuspended = "Suspended" +) + +const ( + // BucketCannedACLPrivate is a BucketCannedACL enum value + BucketCannedACLPrivate = "private" + + // BucketCannedACLPublicRead is a BucketCannedACL enum value + BucketCannedACLPublicRead = "public-read" + + // BucketCannedACLPublicReadWrite is a BucketCannedACL enum value + BucketCannedACLPublicReadWrite = "public-read-write" + + // BucketCannedACLAuthenticatedRead is a BucketCannedACL enum value + BucketCannedACLAuthenticatedRead = "authenticated-read" +) + +const ( + // BucketLocationConstraintEu is a BucketLocationConstraint enum value + BucketLocationConstraintEu = "EU" + + // BucketLocationConstraintEuWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuWest1 = "eu-west-1" + + // BucketLocationConstraintUsWest1 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest1 = "us-west-1" + + // BucketLocationConstraintUsWest2 is a BucketLocationConstraint enum value + BucketLocationConstraintUsWest2 = "us-west-2" + + // BucketLocationConstraintApSouth1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSouth1 = "ap-south-1" + + // BucketLocationConstraintApSoutheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast1 = "ap-southeast-1" + + // BucketLocationConstraintApSoutheast2 is a BucketLocationConstraint enum value + BucketLocationConstraintApSoutheast2 = "ap-southeast-2" + + // BucketLocationConstraintApNortheast1 is a BucketLocationConstraint enum value + BucketLocationConstraintApNortheast1 = "ap-northeast-1" + + // BucketLocationConstraintSaEast1 is a BucketLocationConstraint enum value + BucketLocationConstraintSaEast1 = "sa-east-1" + + // BucketLocationConstraintCnNorth1 is a BucketLocationConstraint enum value + BucketLocationConstraintCnNorth1 = "cn-north-1" + + // BucketLocationConstraintEuCentral1 is a BucketLocationConstraint enum value + BucketLocationConstraintEuCentral1 = "eu-central-1" +) + +const ( + // BucketLogsPermissionFullControl is a BucketLogsPermission enum value + BucketLogsPermissionFullControl = "FULL_CONTROL" + + // BucketLogsPermissionRead is a BucketLogsPermission enum value + BucketLogsPermissionRead = "READ" + + // BucketLogsPermissionWrite is a BucketLogsPermission enum value + BucketLogsPermissionWrite = "WRITE" +) + +const ( + // BucketVersioningStatusEnabled is a BucketVersioningStatus enum value + BucketVersioningStatusEnabled = "Enabled" + + // BucketVersioningStatusSuspended is a BucketVersioningStatus enum value + BucketVersioningStatusSuspended = "Suspended" +) + +// Requests Amazon S3 to encode the object keys in the response and specifies +// the encoding method to use. An object key may contain any Unicode character; +// however, XML 1.0 parser cannot parse some characters, such as characters +// with an ASCII value from 0 to 10. For characters that are not supported in +// XML 1.0, you can add this parameter to request that Amazon S3 encode the +// keys in the response. +const ( + // EncodingTypeUrl is a EncodingType enum value + EncodingTypeUrl = "url" +) + +// Bucket event for which to send notifications. +const ( + // EventS3ReducedRedundancyLostObject is a Event enum value + EventS3ReducedRedundancyLostObject = "s3:ReducedRedundancyLostObject" + + // EventS3ObjectCreated is a Event enum value + EventS3ObjectCreated = "s3:ObjectCreated:*" + + // EventS3ObjectCreatedPut is a Event enum value + EventS3ObjectCreatedPut = "s3:ObjectCreated:Put" + + // EventS3ObjectCreatedPost is a Event enum value + EventS3ObjectCreatedPost = "s3:ObjectCreated:Post" + + // EventS3ObjectCreatedCopy is a Event enum value + EventS3ObjectCreatedCopy = "s3:ObjectCreated:Copy" + + // EventS3ObjectCreatedCompleteMultipartUpload is a Event enum value + EventS3ObjectCreatedCompleteMultipartUpload = "s3:ObjectCreated:CompleteMultipartUpload" + + // EventS3ObjectRemoved is a Event enum value + EventS3ObjectRemoved = "s3:ObjectRemoved:*" + + // EventS3ObjectRemovedDelete is a Event enum value + EventS3ObjectRemovedDelete = "s3:ObjectRemoved:Delete" + + // EventS3ObjectRemovedDeleteMarkerCreated is a Event enum value + EventS3ObjectRemovedDeleteMarkerCreated = "s3:ObjectRemoved:DeleteMarkerCreated" +) + +const ( + // ExpirationStatusEnabled is a ExpirationStatus enum value + ExpirationStatusEnabled = "Enabled" + + // ExpirationStatusDisabled is a ExpirationStatus enum value + ExpirationStatusDisabled = "Disabled" +) + +const ( + // FilterRuleNamePrefix is a FilterRuleName enum value + FilterRuleNamePrefix = "prefix" + + // FilterRuleNameSuffix is a FilterRuleName enum value + FilterRuleNameSuffix = "suffix" +) + +const ( + // InventoryFormatCsv is a InventoryFormat enum value + InventoryFormatCsv = "CSV" +) + +const ( + // InventoryFrequencyDaily is a InventoryFrequency enum value + InventoryFrequencyDaily = "Daily" + + // InventoryFrequencyWeekly is a InventoryFrequency enum value + InventoryFrequencyWeekly = "Weekly" +) + +const ( + // InventoryIncludedObjectVersionsAll is a InventoryIncludedObjectVersions enum value + InventoryIncludedObjectVersionsAll = "All" + + // InventoryIncludedObjectVersionsCurrent is a InventoryIncludedObjectVersions enum value + InventoryIncludedObjectVersionsCurrent = "Current" +) + +const ( + // InventoryOptionalFieldSize is a InventoryOptionalField enum value + InventoryOptionalFieldSize = "Size" + + // InventoryOptionalFieldLastModifiedDate is a InventoryOptionalField enum value + InventoryOptionalFieldLastModifiedDate = "LastModifiedDate" + + // InventoryOptionalFieldStorageClass is a InventoryOptionalField enum value + InventoryOptionalFieldStorageClass = "StorageClass" + + // InventoryOptionalFieldEtag is a InventoryOptionalField enum value + InventoryOptionalFieldEtag = "ETag" + + // InventoryOptionalFieldIsMultipartUploaded is a InventoryOptionalField enum value + InventoryOptionalFieldIsMultipartUploaded = "IsMultipartUploaded" + + // InventoryOptionalFieldReplicationStatus is a InventoryOptionalField enum value + InventoryOptionalFieldReplicationStatus = "ReplicationStatus" +) + +const ( + // MFADeleteEnabled is a MFADelete enum value + MFADeleteEnabled = "Enabled" + + // MFADeleteDisabled is a MFADelete enum value + MFADeleteDisabled = "Disabled" +) + +const ( + // MFADeleteStatusEnabled is a MFADeleteStatus enum value + MFADeleteStatusEnabled = "Enabled" + + // MFADeleteStatusDisabled is a MFADeleteStatus enum value + MFADeleteStatusDisabled = "Disabled" +) + +const ( + // MetadataDirectiveCopy is a MetadataDirective enum value + MetadataDirectiveCopy = "COPY" + + // MetadataDirectiveReplace is a MetadataDirective enum value + MetadataDirectiveReplace = "REPLACE" +) + +const ( + // ObjectCannedACLPrivate is a ObjectCannedACL enum value + ObjectCannedACLPrivate = "private" + + // ObjectCannedACLPublicRead is a ObjectCannedACL enum value + ObjectCannedACLPublicRead = "public-read" + + // ObjectCannedACLPublicReadWrite is a ObjectCannedACL enum value + ObjectCannedACLPublicReadWrite = "public-read-write" + + // ObjectCannedACLAuthenticatedRead is a ObjectCannedACL enum value + ObjectCannedACLAuthenticatedRead = "authenticated-read" + + // ObjectCannedACLAwsExecRead is a ObjectCannedACL enum value + ObjectCannedACLAwsExecRead = "aws-exec-read" + + // ObjectCannedACLBucketOwnerRead is a ObjectCannedACL enum value + ObjectCannedACLBucketOwnerRead = "bucket-owner-read" + + // ObjectCannedACLBucketOwnerFullControl is a ObjectCannedACL enum value + ObjectCannedACLBucketOwnerFullControl = "bucket-owner-full-control" +) + +const ( + // ObjectStorageClassStandard is a ObjectStorageClass enum value + ObjectStorageClassStandard = "STANDARD" + + // ObjectStorageClassReducedRedundancy is a ObjectStorageClass enum value + ObjectStorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // ObjectStorageClassGlacier is a ObjectStorageClass enum value + ObjectStorageClassGlacier = "GLACIER" +) + +const ( + // ObjectVersionStorageClassStandard is a ObjectVersionStorageClass enum value + ObjectVersionStorageClassStandard = "STANDARD" +) + +const ( + // PayerRequester is a Payer enum value + PayerRequester = "Requester" + + // PayerBucketOwner is a Payer enum value + PayerBucketOwner = "BucketOwner" +) + +const ( + // PermissionFullControl is a Permission enum value + PermissionFullControl = "FULL_CONTROL" + + // PermissionWrite is a Permission enum value + PermissionWrite = "WRITE" + + // PermissionWriteAcp is a Permission enum value + PermissionWriteAcp = "WRITE_ACP" + + // PermissionRead is a Permission enum value + PermissionRead = "READ" + + // PermissionReadAcp is a Permission enum value + PermissionReadAcp = "READ_ACP" +) + +const ( + // ProtocolHttp is a Protocol enum value + ProtocolHttp = "http" + + // ProtocolHttps is a Protocol enum value + ProtocolHttps = "https" +) + +const ( + // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value + ReplicationRuleStatusEnabled = "Enabled" + + // ReplicationRuleStatusDisabled is a ReplicationRuleStatus enum value + ReplicationRuleStatusDisabled = "Disabled" +) + +const ( + // ReplicationStatusComplete is a ReplicationStatus enum value + ReplicationStatusComplete = "COMPLETE" + + // ReplicationStatusPending is a ReplicationStatus enum value + ReplicationStatusPending = "PENDING" + + // ReplicationStatusFailed is a ReplicationStatus enum value + ReplicationStatusFailed = "FAILED" + + // ReplicationStatusReplica is a ReplicationStatus enum value + ReplicationStatusReplica = "REPLICA" +) + +// If present, indicates that the requester was successfully charged for the +// request. +const ( + // RequestChargedRequester is a RequestCharged enum value + RequestChargedRequester = "requester" +) + +// Confirms that the requester knows that she or he will be charged for the +// request. Bucket owners need not specify this parameter in their requests. +// Documentation on downloading objects from requester pays buckets can be found +// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html +const ( + // RequestPayerRequester is a RequestPayer enum value + RequestPayerRequester = "requester" +) + +const ( + // ServerSideEncryptionAes256 is a ServerSideEncryption enum value + ServerSideEncryptionAes256 = "AES256" + + // ServerSideEncryptionAwsKms is a ServerSideEncryption enum value + ServerSideEncryptionAwsKms = "aws:kms" +) + +const ( + // StorageClassStandard is a StorageClass enum value + StorageClassStandard = "STANDARD" + + // StorageClassReducedRedundancy is a StorageClass enum value + StorageClassReducedRedundancy = "REDUCED_REDUNDANCY" + + // StorageClassStandardIa is a StorageClass enum value + StorageClassStandardIa = "STANDARD_IA" +) + +const ( + // StorageClassAnalysisSchemaVersionV1 is a StorageClassAnalysisSchemaVersion enum value + StorageClassAnalysisSchemaVersionV1 = "V_1" +) + +const ( + // TaggingDirectiveCopy is a TaggingDirective enum value + TaggingDirectiveCopy = "COPY" + + // TaggingDirectiveReplace is a TaggingDirective enum value + TaggingDirectiveReplace = "REPLACE" +) + +const ( + // TierStandard is a Tier enum value + TierStandard = "Standard" + + // TierBulk is a Tier enum value + TierBulk = "Bulk" + + // TierExpedited is a Tier enum value + TierExpedited = "Expedited" +) + +const ( + // TransitionStorageClassGlacier is a TransitionStorageClass enum value + TransitionStorageClassGlacier = "GLACIER" + + // TransitionStorageClassStandardIa is a TransitionStorageClass enum value + TransitionStorageClassStandardIa = "STANDARD_IA" +) + +const ( + // TypeCanonicalUser is a Type enum value + TypeCanonicalUser = "CanonicalUser" + + // TypeAmazonCustomerByEmail is a Type enum value + TypeAmazonCustomerByEmail = "AmazonCustomerByEmail" + + // TypeGroup is a Type enum value + TypeGroup = "Group" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go new file mode 100644 index 0000000..bc68a46 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/bucket_location.go @@ -0,0 +1,106 @@ +package s3 + +import ( + "io/ioutil" + "regexp" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +var reBucketLocation = regexp.MustCompile(`>([^<>]+)<\/Location`) + +// NormalizeBucketLocation is a utility function which will update the +// passed in value to always be a region ID. Generally this would be used +// with GetBucketLocation API operation. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +func NormalizeBucketLocation(loc string) string { + switch loc { + case "": + loc = "us-east-1" + case "EU": + loc = "eu-west-1" + } + + return loc +} + +// NormalizeBucketLocationHandler is a request handler which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// req, result := svc.GetBucketLocationRequest(&s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }) +// req.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +// err := req.Send() +var NormalizeBucketLocationHandler = request.NamedHandler{ + Name: "awssdk.s3.NormalizeBucketLocation", + Fn: func(req *request.Request) { + if req.Error != nil { + return + } + + out := req.Data.(*GetBucketLocationOutput) + loc := NormalizeBucketLocation(aws.StringValue(out.LocationConstraint)) + out.LocationConstraint = aws.String(loc) + }, +} + +// WithNormalizeBucketLocation is a request option which will update the +// GetBucketLocation's result LocationConstraint value to always be a region ID. +// +// Replaces empty string with "us-east-1", and "EU" with "eu-west-1". +// +// See http://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETlocation.html +// for more information on the values that can be returned. +// +// result, err := svc.GetBucketLocationWithContext(ctx, +// &s3.GetBucketLocationInput{ +// Bucket: aws.String(bucket), +// }, +// s3.WithNormalizeBucketLocation, +// ) +func WithNormalizeBucketLocation(r *request.Request) { + r.Handlers.Unmarshal.PushBackNamed(NormalizeBucketLocationHandler) +} + +func buildGetBucketLocation(r *request.Request) { + if r.DataFilled() { + out := r.Data.(*GetBucketLocationOutput) + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "failed reading response body", err) + return + } + + match := reBucketLocation.FindSubmatch(b) + if len(match) > 1 { + loc := string(match[1]) + out.LocationConstraint = aws.String(loc) + } + } +} + +func populateLocationConstraint(r *request.Request) { + if r.ParamsFilled() && aws.StringValue(r.Config.Region) != "us-east-1" { + in := r.Params.(*CreateBucketInput) + if in.CreateBucketConfiguration == nil { + r.Params = awsutil.CopyOf(r.Params) + in = r.Params.(*CreateBucketInput) + in.CreateBucketConfiguration = &CreateBucketConfiguration{ + LocationConstraint: r.Config.Region, + } + } + } +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/content_md5.go b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go similarity index 91% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/content_md5.go rename to vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go index 386f09a..9fc5df9 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/content_md5.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/content_md5.go @@ -5,13 +5,13 @@ import ( "encoding/base64" "io" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" ) // contentMD5 computes and sets the HTTP Content-MD5 header for requests that // require it. -func contentMD5(r *aws.Request) { +func contentMD5(r *request.Request) { h := md5.New() // hash the body. seek back to the first position after reading to reset diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go new file mode 100644 index 0000000..8463347 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/customizations.go @@ -0,0 +1,46 @@ +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/request" +) + +func init() { + initClient = defaultInitClientFn + initRequest = defaultInitRequestFn +} + +func defaultInitClientFn(c *client.Client) { + // Support building custom endpoints based on config + c.Handlers.Build.PushFront(updateEndpointForS3Config) + + // Require SSL when using SSE keys + c.Handlers.Validate.PushBack(validateSSERequiresSSL) + c.Handlers.Build.PushBack(computeSSEKeys) + + // S3 uses custom error unmarshaling logic + c.Handlers.UnmarshalError.Clear() + c.Handlers.UnmarshalError.PushBack(unmarshalError) +} + +func defaultInitRequestFn(r *request.Request) { + // Add reuest handlers for specific platforms. + // e.g. 100-continue support for PUT requests using Go 1.6 + platformRequestHandlers(r) + + switch r.Operation.Name { + case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, + opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration, + opPutBucketReplication: + // These S3 operations require Content-MD5 to be set + r.Handlers.Build.PushBack(contentMD5) + case opGetBucketLocation: + // GetBucketLocation has custom parsing logic + r.Handlers.Unmarshal.PushFront(buildGetBucketLocation) + case opCreateBucket: + // Auto-populate LocationConstraint with current region + r.Handlers.Validate.PushFront(populateLocationConstraint) + case opCopyObject, opUploadPartCopy, opCompleteMultipartUpload: + r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go new file mode 100644 index 0000000..f045fd0 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc.go @@ -0,0 +1,78 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package s3 provides the client and types for making API +// requests to Amazon Simple Storage Service. +// +// See https://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01 for more information on this service. +// +// See s3 package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/ +// +// Using the Client +// +// To use the client for Amazon Simple Storage Service you will first need +// to create a new instance of it. +// +// When creating a client for an AWS service you'll first need to have a Session +// already created. The Session provides configuration that can be shared +// between multiple service clients. Additional configuration can be applied to +// the Session and service's client when they are constructed. The aws package's +// Config type contains several fields such as Region for the AWS Region the +// client should make API requests too. The optional Config value can be provided +// as the variadic argument for Sessions and client creation. +// +// Once the service's client is created you can use it to make API requests the +// AWS service. These clients are safe to use concurrently. +// +// // Create a session to share configuration, and load external configuration. +// sess := session.Must(session.NewSession()) +// +// // Create the service's client with the session. +// svc := s3.New(sess) +// +// See the SDK's documentation for more information on how to use service clients. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws package's Config type for more information on configuration options. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the Amazon Simple Storage Service client S3 for more +// information on creating the service's client. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New +// +// Once the client is created you can make an API request to the service. +// Each API method takes a input parameter, and returns the service response +// and an error. +// +// The API method will document which error codes the service can be returned +// by the operation if the service models the API operation's errors. These +// errors will also be available as const strings prefixed with "ErrCode". +// +// result, err := svc.AbortMultipartUpload(params) +// if err != nil { +// // Cast err to awserr.Error to handle specific error codes. +// aerr, ok := err.(awserr.Error) +// if ok && aerr.Code() == { +// // Specific error code handling +// } +// return err +// } +// +// fmt.Println("AbortMultipartUpload result:") +// fmt.Println(result) +// +// Using the Client with Context +// +// The service's client also provides methods to make API requests with a Context +// value. This allows you to control the timeout, and cancellation of pending +// requests. These methods also take request Option as variadic parameter to apply +// additional configuration to the API request. +// +// ctx := context.Background() +// +// result, err := svc.AbortMultipartUploadWithContext(ctx, params) +// +// See the request package documentation for more information on using Context pattern +// with the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/ +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go new file mode 100644 index 0000000..b794a63 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/doc_custom.go @@ -0,0 +1,109 @@ +// Upload Managers +// +// The s3manager package's Uploader provides concurrent upload of content to S3 +// by taking advantage of S3's Multipart APIs. The Uploader also supports both +// io.Reader for streaming uploads, and will also take advantage of io.ReadSeeker +// for optimizations if the Body satisfies that type. Once the Uploader instance +// is created you can call Upload concurrently from multiple goroutines safely. +// +// // The session the S3 Uploader will use +// sess := session.Must(session.NewSession()) +// +// // Create an uploader with the session and default options +// uploader := s3manager.NewUploader(sess) +// +// f, err := os.Open(filename) +// if err != nil { +// return fmt.Errorf("failed to open file %q, %v", filename, err) +// } +// +// // Upload the file to S3. +// result, err := uploader.Upload(&s3manager.UploadInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// Body: f, +// }) +// if err != nil { +// return fmt.Errorf("failed to upload file, %v", err) +// } +// fmt.Printf("file uploaded to, %s\n", aws.StringValue(result.Location)) +// +// See the s3manager package's Uploader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Uploader +// +// Download Manager +// +// The s3manager package's Downloader provides concurrently downloading of Objects +// from S3. The Downloader will write S3 Object content with an io.WriterAt. +// Once the Downloader instance is created you can call Upload concurrently from +// multiple goroutines safely. +// +// // The session the S3 Downloader will use +// sess := session.Must(session.NewSession()) +// +// // Create a downloader with the session and default options +// downloader := s3manager.NewDownloader(sess) +// +// // Create a file to write the S3 Object contents to. +// f, err := os.Create(filename) +// if err != nil { +// return fmt.Errorf("failed to create file %q, %v", filename, err) +// } +// +// // Write the contents of S3 Object to the file +// n, err := downloader.Download(f, &s3.GetObjectInput{ +// Bucket: aws.String(myBucket), +// Key: aws.String(myString), +// }) +// if err != nil { +// return fmt.Errorf("failed to upload file, %v", err) +// } +// fmt.Printf("file downloaded, %d bytes\n", n) +// +// See the s3manager package's Downloader type documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#Downloader +// +// Get Bucket Region +// +// GetBucketRegion will attempt to get the region for a bucket using a region +// hint to determine which AWS partition to perform the query on. Use this utility +// to determine the region a bucket is in. +// +// sess := session.Must(session.NewSession()) +// +// bucket := "my-bucket" +// region, err := s3manager.GetBucketRegion(ctx, sess, bucket, "us-west-2") +// if err != nil { +// if aerr, ok := err.(awserr.Error); ok && aerr.Code() == "NotFound" { +// fmt.Fprintf(os.Stderr, "unable to find bucket %s's region not found\n", bucket) +// } +// return err +// } +// fmt.Printf("Bucket %s is in %s region\n", bucket, region) +// +// See the s3manager package's GetBucketRegion function documentation for more information +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3manager/#GetBucketRegion +// +// S3 Crypto Client +// +// The s3crypto package provides the tools to upload and download encrypted +// content from S3. The Encryption and Decryption clients can be used concurrently +// once the client is created. +// +// sess := session.Must(session.NewSession()) +// +// // Create the decryption client. +// svc := s3crypto.NewDecryptionClient(sess) +// +// // The object will be downloaded from S3 and decrypted locally. By metadata +// // about the object's encryption will instruct the decryption client how +// // decrypt the content of the object. By default KMS is used for keys. +// result, err := svc.GetObject(&s3.GetObjectInput { +// Bucket: aws.String(myBucket), +// Key: aws.String(myKey), +// }) +// +// See the s3crypto package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/ +// +package s3 diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go new file mode 100644 index 0000000..931cb17 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/errors.go @@ -0,0 +1,48 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +const ( + + // ErrCodeBucketAlreadyExists for service response error code + // "BucketAlreadyExists". + // + // The requested bucket name is not available. The bucket namespace is shared + // by all users of the system. Please select a different name and try again. + ErrCodeBucketAlreadyExists = "BucketAlreadyExists" + + // ErrCodeBucketAlreadyOwnedByYou for service response error code + // "BucketAlreadyOwnedByYou". + ErrCodeBucketAlreadyOwnedByYou = "BucketAlreadyOwnedByYou" + + // ErrCodeNoSuchBucket for service response error code + // "NoSuchBucket". + // + // The specified bucket does not exist. + ErrCodeNoSuchBucket = "NoSuchBucket" + + // ErrCodeNoSuchKey for service response error code + // "NoSuchKey". + // + // The specified key does not exist. + ErrCodeNoSuchKey = "NoSuchKey" + + // ErrCodeNoSuchUpload for service response error code + // "NoSuchUpload". + // + // The specified multipart upload does not exist. + ErrCodeNoSuchUpload = "NoSuchUpload" + + // ErrCodeObjectAlreadyInActiveTierError for service response error code + // "ObjectAlreadyInActiveTierError". + // + // This operation is not allowed against this storage tier + ErrCodeObjectAlreadyInActiveTierError = "ObjectAlreadyInActiveTierError" + + // ErrCodeObjectNotInActiveTierError for service response error code + // "ObjectNotInActiveTierError". + // + // The source object of the COPY operation is not in the active tier and is + // only stored in Amazon Glacier. + ErrCodeObjectNotInActiveTierError = "ObjectNotInActiveTierError" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go new file mode 100644 index 0000000..ec3ffe4 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/host_style_bucket.go @@ -0,0 +1,162 @@ +package s3 + +import ( + "fmt" + "net/url" + "regexp" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +// an operationBlacklist is a list of operation names that should a +// request handler should not be executed with. +type operationBlacklist []string + +// Continue will return true of the Request's operation name is not +// in the blacklist. False otherwise. +func (b operationBlacklist) Continue(r *request.Request) bool { + for i := 0; i < len(b); i++ { + if b[i] == r.Operation.Name { + return false + } + } + return true +} + +var accelerateOpBlacklist = operationBlacklist{ + opListBuckets, opCreateBucket, opDeleteBucket, +} + +// Request handler to automatically add the bucket name to the endpoint domain +// if possible. This style of bucket is valid for all bucket names which are +// DNS compatible and do not contain "." +func updateEndpointForS3Config(r *request.Request) { + forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle) + accelerate := aws.BoolValue(r.Config.S3UseAccelerate) + + if accelerate && accelerateOpBlacklist.Continue(r) { + if forceHostStyle { + if r.Config.Logger != nil { + r.Config.Logger.Log("ERROR: aws.Config.S3UseAccelerate is not compatible with aws.Config.S3ForcePathStyle, ignoring S3ForcePathStyle.") + } + } + updateEndpointForAccelerate(r) + } else if !forceHostStyle && r.Operation.Name != opGetBucketLocation { + updateEndpointForHostStyle(r) + } +} + +func updateEndpointForHostStyle(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucketname was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { + // bucket name must be valid to put into the host + return + } + + moveBucketToHost(r.HTTPRequest.URL, bucket) +} + +var ( + accelElem = []byte("s3-accelerate.dualstack.") +) + +func updateEndpointForAccelerate(r *request.Request) { + bucket, ok := bucketNameFromReqParams(r.Params) + if !ok { + // Ignore operation requests if the bucketname was not provided + // if this is an input validation error the validation handler + // will report it. + return + } + + if !hostCompatibleBucketName(r.HTTPRequest.URL, bucket) { + r.Error = awserr.New("InvalidParameterException", + fmt.Sprintf("bucket name %s is not compatible with S3 Accelerate", bucket), + nil) + return + } + + parts := strings.Split(r.HTTPRequest.URL.Host, ".") + if len(parts) < 3 { + r.Error = awserr.New("InvalidParameterExecption", + fmt.Sprintf("unable to update endpoint host for S3 accelerate, hostname invalid, %s", + r.HTTPRequest.URL.Host), nil) + return + } + + if parts[0] == "s3" || strings.HasPrefix(parts[0], "s3-") { + parts[0] = "s3-accelerate" + } + for i := 1; i+1 < len(parts); i++ { + if parts[i] == aws.StringValue(r.Config.Region) { + parts = append(parts[:i], parts[i+1:]...) + break + } + } + + r.HTTPRequest.URL.Host = strings.Join(parts, ".") + + moveBucketToHost(r.HTTPRequest.URL, bucket) +} + +// Attempts to retrieve the bucket name from the request input parameters. +// If no bucket is found, or the field is empty "", false will be returned. +func bucketNameFromReqParams(params interface{}) (string, bool) { + b, _ := awsutil.ValuesAtPath(params, "Bucket") + if len(b) == 0 { + return "", false + } + + if bucket, ok := b[0].(*string); ok { + if bucketStr := aws.StringValue(bucket); bucketStr != "" { + return bucketStr, true + } + } + + return "", false +} + +// hostCompatibleBucketName returns true if the request should +// put the bucket in the host. This is false if S3ForcePathStyle is +// explicitly set or if the bucket is not DNS compatible. +func hostCompatibleBucketName(u *url.URL, bucket string) bool { + // Bucket might be DNS compatible but dots in the hostname will fail + // certificate validation, so do not use host-style. + if u.Scheme == "https" && strings.Contains(bucket, ".") { + return false + } + + // if the bucket is DNS compatible + return dnsCompatibleBucketName(bucket) +} + +var reDomain = regexp.MustCompile(`^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$`) +var reIPAddress = regexp.MustCompile(`^(\d+\.){3}\d+$`) + +// dnsCompatibleBucketName returns true if the bucket name is DNS compatible. +// Buckets created outside of the classic region MUST be DNS compatible. +func dnsCompatibleBucketName(bucket string) bool { + return reDomain.MatchString(bucket) && + !reIPAddress.MatchString(bucket) && + !strings.Contains(bucket, "..") +} + +// moveBucketToHost moves the bucket name from the URI path to URL host. +func moveBucketToHost(u *url.URL, bucket string) { + u.Host = bucket + "." + u.Host + u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1) + if u.Path == "" { + u.Path = "/" + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go new file mode 100644 index 0000000..8e6f330 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers.go @@ -0,0 +1,8 @@ +// +build !go1.6 + +package s3 + +import "github.com/aws/aws-sdk-go/aws/request" + +func platformRequestHandlers(r *request.Request) { +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go new file mode 100644 index 0000000..14d05f7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/platform_handlers_go1.6.go @@ -0,0 +1,28 @@ +// +build go1.6 + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +func platformRequestHandlers(r *request.Request) { + if r.Operation.HTTPMethod == "PUT" { + // 100-Continue should only be used on put requests. + r.Handlers.Sign.PushBack(add100Continue) + } +} + +func add100Continue(r *request.Request) { + if aws.BoolValue(r.Config.S3Disable100Continue) { + return + } + if r.HTTPRequest.ContentLength < 1024*1024*2 { + // Ignore requests smaller than 2MB. This helps prevent delaying + // requests unnecessarily. + return + } + + r.HTTPRequest.Header.Set("Expect", "100-Continue") +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/service.go b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go new file mode 100644 index 0000000..614e477 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/service.go @@ -0,0 +1,93 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/restxml" +) + +// S3 provides the API operation methods for making requests to +// Amazon Simple Storage Service. See this package's package overview docs +// for details on the service. +// +// S3 methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type S3 struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "s3" // Service endpoint prefix API calls made to. + EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. +) + +// New creates a new instance of the S3 client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a S3 client from just a session. +// svc := s3.New(mySession) +// +// // Create a S3 client with additional configuration +// svc := s3.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *S3 { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *S3 { + svc := &S3{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2006-03-01", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(restxml.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(restxml.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(restxml.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(restxml.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a S3 operation and runs any +// custom request initialization. +func (c *S3) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/sse.go b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go similarity index 80% rename from Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/sse.go rename to vendor/github.com/aws/aws-sdk-go/service/s3/sse.go index 01350f7..268ea2f 100644 --- a/Godeps/_workspace/src/github.com/aws/aws-sdk-go/service/s3/sse.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/sse.go @@ -4,23 +4,23 @@ import ( "crypto/md5" "encoding/base64" - "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" ) var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) -func validateSSERequiresSSL(r *aws.Request) { +func validateSSERequiresSSL(r *request.Request) { if r.HTTPRequest.URL.Scheme != "https" { - p := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey") + p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey") if len(p) > 0 { r.Error = errSSERequiresSSL } } } -func computeSSEKeys(r *aws.Request) { +func computeSSEKeys(r *request.Request) { headers := []string{ "x-amz-server-side-encryption-customer-key", "x-amz-copy-source-server-side-encryption-customer-key", diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go new file mode 100644 index 0000000..5a78fd3 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/statusok_error.go @@ -0,0 +1,35 @@ +package s3 + +import ( + "bytes" + "io/ioutil" + "net/http" + + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +func copyMultipartStatusOKUnmarhsalError(r *request.Request) { + b, err := ioutil.ReadAll(r.HTTPResponse.Body) + if err != nil { + r.Error = awserr.New("SerializationError", "unable to read response body", err) + return + } + body := bytes.NewReader(b) + r.HTTPResponse.Body = ioutil.NopCloser(body) + defer body.Seek(0, 0) + + if body.Len() == 0 { + // If there is no body don't attempt to parse the body. + return + } + + unmarshalError(r) + if err, ok := r.Error.(awserr.Error); ok && err != nil { + if err.Code() == "SerializationError" { + r.Error = nil + return + } + r.HTTPResponse.StatusCode = http.StatusServiceUnavailable + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go new file mode 100644 index 0000000..bcca862 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/unmarshal_error.go @@ -0,0 +1,103 @@ +package s3 + +import ( + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/request" +) + +type xmlErrorResponse struct { + XMLName xml.Name `xml:"Error"` + Code string `xml:"Code"` + Message string `xml:"Message"` +} + +func unmarshalError(r *request.Request) { + defer r.HTTPResponse.Body.Close() + defer io.Copy(ioutil.Discard, r.HTTPResponse.Body) + + hostID := r.HTTPResponse.Header.Get("X-Amz-Id-2") + + // Bucket exists in a different region, and request needs + // to be made to the correct region. + if r.HTTPResponse.StatusCode == http.StatusMovedPermanently { + r.Error = requestFailure{ + RequestFailure: awserr.NewRequestFailure( + awserr.New("BucketRegionError", + fmt.Sprintf("incorrect region, the bucket is not in '%s' region", + aws.StringValue(r.Config.Region)), + nil), + r.HTTPResponse.StatusCode, + r.RequestID, + ), + hostID: hostID, + } + return + } + + var errCode, errMsg string + + // Attempt to parse error from body if it is known + resp := &xmlErrorResponse{} + err := xml.NewDecoder(r.HTTPResponse.Body).Decode(resp) + if err != nil && err != io.EOF { + errCode = "SerializationError" + errMsg = "failed to decode S3 XML error response" + } else { + errCode = resp.Code + errMsg = resp.Message + err = nil + } + + // Fallback to status code converted to message if still no error code + if len(errCode) == 0 { + statusText := http.StatusText(r.HTTPResponse.StatusCode) + errCode = strings.Replace(statusText, " ", "", -1) + errMsg = statusText + } + + r.Error = requestFailure{ + RequestFailure: awserr.NewRequestFailure( + awserr.New(errCode, errMsg, err), + r.HTTPResponse.StatusCode, + r.RequestID, + ), + hostID: hostID, + } +} + +// A RequestFailure provides access to the S3 Request ID and Host ID values +// returned from API operation errors. Getting the error as a string will +// return the formated error with the same information as awserr.RequestFailure, +// while also adding the HostID value from the response. +type RequestFailure interface { + awserr.RequestFailure + + // Host ID is the S3 Host ID needed for debug, and contacting support + HostID() string +} + +type requestFailure struct { + awserr.RequestFailure + + hostID string +} + +func (r requestFailure) Error() string { + extra := fmt.Sprintf("status code: %d, request id: %s, host id: %s", + r.StatusCode(), r.RequestID(), r.hostID) + return awserr.SprintError(r.Code(), r.Message(), extra, r.OrigErr()) +} +func (r requestFailure) String() string { + return r.Error() +} +func (r requestFailure) HostID() string { + return r.hostID +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go new file mode 100644 index 0000000..cccfa8c --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/waiters.go @@ -0,0 +1,214 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package s3 + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/request" +) + +// WaitUntilBucketExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not meet within the max attempt window an error will +// be returned. +func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { + return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketExistsWithContext is an extended version of WaitUntilBucketExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 301, + }, + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 403, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilBucketNotExists uses the Amazon S3 API operation +// HeadBucket to wait for a condition to be met before returning. +// If the condition is not meet within the max attempt window an error will +// be returned. +func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { + return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilBucketNotExistsWithContext is an extended version of WaitUntilBucketNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBucketInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilBucketNotExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadBucketInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadBucketRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilObjectExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not meet within the max attempt window an error will +// be returned. +func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { + return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectExistsWithContext is an extended version of WaitUntilObjectExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 200, + }, + { + State: request.RetryWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} + +// WaitUntilObjectNotExists uses the Amazon S3 API operation +// HeadObject to wait for a condition to be met before returning. +// If the condition is not meet within the max attempt window an error will +// be returned. +func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { + return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input) +} + +// WaitUntilObjectNotExistsWithContext is an extended version of WaitUntilObjectNotExists. +// With the support for passing in a context and options to configure the +// Waiter and the underlying request options. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *S3) WaitUntilObjectNotExistsWithContext(ctx aws.Context, input *HeadObjectInput, opts ...request.WaiterOption) error { + w := request.Waiter{ + Name: "WaitUntilObjectNotExists", + MaxAttempts: 20, + Delay: request.ConstantWaiterDelay(5 * time.Second), + Acceptors: []request.WaiterAcceptor{ + { + State: request.SuccessWaiterState, + Matcher: request.StatusWaiterMatch, + Expected: 404, + }, + }, + Logger: c.Config.Logger, + NewRequest: func(opts []request.Option) (*request.Request, error) { + var inCpy *HeadObjectInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.HeadObjectRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + w.ApplyOptions(opts...) + + return w.WaitWithContext(ctx) +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/api.go b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go new file mode 100644 index 0000000..e5c105f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/api.go @@ -0,0 +1,2365 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "time" + + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awsutil" + "github.com/aws/aws-sdk-go/aws/request" +) + +const opAssumeRole = "AssumeRole" + +// AssumeRoleRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRole operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See AssumeRole for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRole method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleRequest method. +// req, resp := client.AssumeRoleRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, output *AssumeRoleOutput) { + op := &request.Operation{ + Name: opAssumeRole, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleInput{} + } + + output = &AssumeRoleOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRole API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) that you can use to access +// AWS resources that you might not normally have access to. Typically, you +// use AssumeRole for cross-account access or federation. For a comparison of +// AssumeRole with the other APIs that produce temporary credentials, see Requesting +// Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// Important: You cannot call AssumeRole by using AWS root account credentials; +// access is denied. You must use credentials for an IAM user or an IAM role +// to call AssumeRole. +// +// For cross-account access, imagine that you own multiple accounts and need +// to access resources in each account. You could create long-term credentials +// in each account to access those resources. However, managing all those credentials +// and remembering which one can access which account can be time consuming. +// Instead, you can create one set of long-term credentials in one account and +// then use temporary security credentials to access all the other accounts +// by assuming roles in those accounts. For more information about roles, see +// IAM Roles (Delegation and Federation) (http://docs.aws.amazon.com/IAM/latest/UserGuide/roles-toplevel.html) +// in the IAM User Guide. +// +// For federation, you can, for example, grant single sign-on access to the +// AWS Management Console. If you already have an identity and authentication +// system in your corporate network, you don't have to recreate user identities +// in AWS in order to grant those user identities access to AWS. Instead, after +// a user has been authenticated, you call AssumeRole (and specify the role +// with the appropriate permissions) to get temporary security credentials for +// that user. With those temporary security credentials, you construct a sign-in +// URL that users can use to access the console. For more information, see Common +// Scenarios for Temporary Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html#sts-introduction) +// in the IAM User Guide. +// +// The temporary security credentials are valid for the duration that you specified +// when calling AssumeRole, which can be from 900 seconds (15 minutes) to a +// maximum of 3600 seconds (1 hour). The default is 1 hour. +// +// The temporary security credentials created by AssumeRole can be used to make +// API calls to any AWS service with the following exception: you cannot call +// the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to further +// restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// To assume a role, your AWS account must be trusted by the role. The trust +// relationship is defined in the role's trust policy when the role is created. +// That trust policy states which accounts are allowed to delegate access to +// this account's role. +// +// The user who wants to access the role must also have permissions delegated +// from the role's administrator. If the user is in a different account than +// the role, then the user's administrator must attach a policy that allows +// the user to call AssumeRole on the ARN of the role in the other account. +// If the user is in the same account as the role, then you can either attach +// a policy to the user (identical to the previous different account user), +// or you can add the user as a principal directly in the role's trust policy +// +// Using MFA with AssumeRole +// +// You can optionally include multi-factor authentication (MFA) information +// when you call AssumeRole. This is useful for cross-account scenarios in which +// you want to make sure that the user who is assuming the role has been authenticated +// using an AWS MFA device. In that scenario, the trust policy of the role being +// assumed includes a condition that tests for MFA authentication; if the caller +// does not include valid MFA information, the request to assume the role is +// denied. The condition in a trust policy that tests for MFA authentication +// might look like the following example. +// +// "Condition": {"Bool": {"aws:MultiFactorAuthPresent": true}} +// +// For more information, see Configuring MFA-Protected API Access (http://docs.aws.amazon.com/IAM/latest/UserGuide/MFAProtectedAPI.html) +// in the IAM User Guide guide. +// +// To use MFA with AssumeRole, you pass values for the SerialNumber and TokenCode +// parameters. The SerialNumber value identifies the user's hardware or virtual +// MFA device. The TokenCode is the time-based one-time password (TOTP) that +// the MFA devices produces. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRole for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole +func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + return out, req.Send() +} + +// AssumeRoleWithContext is the same as AssumeRole with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRole for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithContext(ctx aws.Context, input *AssumeRoleInput, opts ...request.Option) (*AssumeRoleOutput, error) { + req, out := c.AssumeRoleRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithSAML = "AssumeRoleWithSAML" + +// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithSAML operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See AssumeRoleWithSAML for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRoleWithSAML method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleWithSAMLRequest method. +// req, resp := client.AssumeRoleWithSAMLRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAMLRequest(input *AssumeRoleWithSAMLInput) (req *request.Request, output *AssumeRoleWithSAMLOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithSAML, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithSAMLInput{} + } + + output = &AssumeRoleWithSAMLOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRoleWithSAML API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// via a SAML authentication response. This operation provides a mechanism for +// tying an enterprise identity store or directory to role-based AWS access +// without user-specific credentials or configuration. For a comparison of AssumeRoleWithSAML +// with the other APIs that produce temporary credentials, see Requesting Temporary +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this operation consist of +// an access key ID, a secret access key, and a security token. Applications +// can use these temporary security credentials to sign calls to AWS services. +// +// The temporary security credentials are valid for the duration that you specified +// when calling AssumeRole, or until the time specified in the SAML authentication +// response's SessionNotOnOrAfter value, whichever is shorter. The duration +// can be from 900 seconds (15 minutes) to a maximum of 3600 seconds (1 hour). +// The default is 1 hour. +// +// The temporary security credentials created by AssumeRoleWithSAML can be used +// to make API calls to any AWS service with the following exception: you cannot +// call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by the intersection of both the access policy +// of the role that is being assumed, and the policy that you pass. This means +// that both policies must grant the permission for the action to be allowed. +// This gives you a way to further restrict the permissions for the resulting +// temporary security credentials. You cannot use the passed policy to grant +// permissions that are in excess of those allowed by the access policy of the +// role that is being assumed. For more information, see Permissions for AssumeRole, +// AssumeRoleWithSAML, and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithSAML, you must configure your +// SAML identity provider (IdP) to issue the claims required by AWS. Additionally, +// you must use AWS Identity and Access Management (IAM) to create a SAML provider +// entity in your AWS account that represents your identity provider, and create +// an IAM role that specifies this SAML provider in its trust policy. +// +// Calling AssumeRoleWithSAML does not require the use of AWS security credentials. +// The identity of the caller is validated by using keys in the metadata document +// that is uploaded for the SAML provider entity for your identity provider. +// +// Calling AssumeRoleWithSAML can result in an entry in your AWS CloudTrail +// logs. The entry includes the value in the NameID element of the SAML assertion. +// We recommend that you use a NameIDType that is not associated with any personally +// identifiable information (PII). For example, you could instead use the Persistent +// Identifier (urn:oasis:names:tc:SAML:2.0:nameid-format:persistent). +// +// For more information, see the following resources: +// +// * About SAML 2.0-based Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_saml.html) +// in the IAM User Guide. +// +// * Creating SAML Identity Providers (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml.html) +// in the IAM User Guide. +// +// * Configuring a Relying Party and Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_saml_relying-party.html) +// in the IAM User Guide. +// +// * Creating a Role for SAML 2.0 Federation (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-idp_saml.html) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithSAML for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAML +func (c *STS) AssumeRoleWithSAML(input *AssumeRoleWithSAMLInput) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + return out, req.Send() +} + +// AssumeRoleWithSAMLWithContext is the same as AssumeRoleWithSAML with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithSAML for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithSAMLWithContext(ctx aws.Context, input *AssumeRoleWithSAMLInput, opts ...request.Option) (*AssumeRoleWithSAMLOutput, error) { + req, out := c.AssumeRoleWithSAMLRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity" + +// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the +// client's request for the AssumeRoleWithWebIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See AssumeRoleWithWebIdentity for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the AssumeRoleWithWebIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. +// req, resp := client.AssumeRoleWithWebIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityInput) (req *request.Request, output *AssumeRoleWithWebIdentityOutput) { + op := &request.Operation{ + Name: opAssumeRoleWithWebIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &AssumeRoleWithWebIdentityInput{} + } + + output = &AssumeRoleWithWebIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// AssumeRoleWithWebIdentity API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials for users who have been authenticated +// in a mobile or web application with a web identity provider, such as Amazon +// Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible +// identity provider. +// +// For mobile applications, we recommend that you use Amazon Cognito. You can +// use Amazon Cognito with the AWS SDK for iOS (http://aws.amazon.com/sdkforios/) +// and the AWS SDK for Android (http://aws.amazon.com/sdkforandroid/) to uniquely +// identify a user and supply the user with a consistent identity throughout +// the lifetime of an application. +// +// To learn more about Amazon Cognito, see Amazon Cognito Overview (http://docs.aws.amazon.com/mobile/sdkforandroid/developerguide/cognito-auth.html#d0e840) +// in the AWS SDK for Android Developer Guide guide and Amazon Cognito Overview +// (http://docs.aws.amazon.com/mobile/sdkforios/developerguide/cognito-auth.html#d0e664) +// in the AWS SDK for iOS Developer Guide. +// +// Calling AssumeRoleWithWebIdentity does not require the use of AWS security +// credentials. Therefore, you can distribute an application (for example, on +// mobile devices) that requests temporary security credentials without including +// long-term AWS credentials in the application, and without deploying server-based +// proxy services that use long-term AWS credentials. Instead, the identity +// of the caller is validated by using a token from the web identity provider. +// For a comparison of AssumeRoleWithWebIdentity with the other APIs that produce +// temporary credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The temporary security credentials returned by this API consist of an access +// key ID, a secret access key, and a security token. Applications can use these +// temporary security credentials to sign calls to AWS service APIs. +// +// The credentials are valid for the duration that you specified when calling +// AssumeRoleWithWebIdentity, which can be from 900 seconds (15 minutes) to +// a maximum of 3600 seconds (1 hour). The default is 1 hour. +// +// The temporary security credentials created by AssumeRoleWithWebIdentity can +// be used to make API calls to any AWS service with the following exception: +// you cannot call the STS service's GetFederationToken or GetSessionToken APIs. +// +// Optionally, you can pass an IAM access policy to this operation. If you choose +// not to pass a policy, the temporary security credentials that are returned +// by the operation have the permissions that are defined in the access policy +// of the role that is being assumed. If you pass a policy to this operation, +// the temporary security credentials that are returned by the operation have +// the permissions that are allowed by both the access policy of the role that +// is being assumed, and the policy that you pass. This gives you a way to further +// restrict the permissions for the resulting temporary security credentials. +// You cannot use the passed policy to grant permissions that are in excess +// of those allowed by the access policy of the role that is being assumed. +// For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, +// and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) +// in the IAM User Guide. +// +// Before your application can call AssumeRoleWithWebIdentity, you must have +// an identity token from a supported identity provider and create a role that +// the application can assume. The role that your application assumes must trust +// the identity provider that is associated with the identity token. In other +// words, the identity provider must be specified in the role's trust policy. +// +// Calling AssumeRoleWithWebIdentity can result in an entry in your AWS CloudTrail +// logs. The entry includes the Subject (http://openid.net/specs/openid-connect-core-1_0.html#Claims) +// of the provided Web Identity Token. We recommend that you avoid using any +// personally identifiable information (PII) in this field. For example, you +// could instead use a GUID or a pairwise identifier, as suggested in the OIDC +// specification (http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes). +// +// For more information about how to use web identity federation and the AssumeRoleWithWebIdentity +// API, see the following resources: +// +// * Using Web Identity Federation APIs for Mobile Apps (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html) +// and Federation Through a Web-based Identity Provider (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// +// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html). +// This interactive website lets you walk through the process of authenticating +// via Login with Amazon, Facebook, or Google, getting temporary security +// credentials, and then using those credentials to make a request to AWS. +// +// +// * AWS SDK for iOS (http://aws.amazon.com/sdkforios/) and AWS SDK for Android +// (http://aws.amazon.com/sdkforandroid/). These toolkits contain sample +// apps that show how to invoke the identity providers, and then how to use +// the information from these providers to get and use temporary security +// credentials. +// +// * Web Identity Federation with Mobile Applications (http://aws.amazon.com/articles/4617974389850313). +// This article discusses web identity federation and shows an example of +// how to use web identity federation to get access to content in Amazon +// S3. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation AssumeRoleWithWebIdentity for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeIDPRejectedClaimException "IDPRejectedClaim" +// The identity provider (IdP) reported that authentication failed. This might +// be because the claim is invalid. +// +// If this error is returned for the AssumeRoleWithWebIdentity operation, it +// can also mean that the claim has expired or has been explicitly revoked. +// +// * ErrCodeIDPCommunicationErrorException "IDPCommunicationError" +// The request could not be fulfilled because the non-AWS identity provider +// (IDP) that was asked to verify the incoming identity token could not be reached. +// This is often a transient error caused by network conditions. Retry the request +// a limited number of times so that you don't exceed the request rate. If the +// error persists, the non-AWS identity provider might be down or not responding. +// +// * ErrCodeInvalidIdentityTokenException "InvalidIdentityToken" +// The web identity token that was passed could not be validated by AWS. Get +// a new identity token from the identity provider and then retry the request. +// +// * ErrCodeExpiredTokenException "ExpiredTokenException" +// The web identity token that was passed is expired or is not valid. Get a +// new identity token from the identity provider and then retry the request. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentity +func (c *STS) AssumeRoleWithWebIdentity(input *AssumeRoleWithWebIdentityInput) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + return out, req.Send() +} + +// AssumeRoleWithWebIdentityWithContext is the same as AssumeRoleWithWebIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See AssumeRoleWithWebIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) AssumeRoleWithWebIdentityWithContext(ctx aws.Context, input *AssumeRoleWithWebIdentityInput, opts ...request.Option) (*AssumeRoleWithWebIdentityOutput, error) { + req, out := c.AssumeRoleWithWebIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage" + +// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the +// client's request for the DecodeAuthorizationMessage operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See DecodeAuthorizationMessage for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the DecodeAuthorizationMessage method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the DecodeAuthorizationMessageRequest method. +// req, resp := client.DecodeAuthorizationMessageRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessageRequest(input *DecodeAuthorizationMessageInput) (req *request.Request, output *DecodeAuthorizationMessageOutput) { + op := &request.Operation{ + Name: opDecodeAuthorizationMessage, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DecodeAuthorizationMessageInput{} + } + + output = &DecodeAuthorizationMessageOutput{} + req = c.newRequest(op, input, output) + return +} + +// DecodeAuthorizationMessage API operation for AWS Security Token Service. +// +// Decodes additional information about the authorization status of a request +// from an encoded message returned in response to an AWS request. +// +// For example, if a user is not authorized to perform an action that he or +// she has requested, the request returns a Client.UnauthorizedOperation response +// (an HTTP 403 response). Some AWS actions additionally return an encoded message +// that can provide details about this authorization failure. +// +// Only certain AWS actions return an encoded authorization message. The documentation +// for an individual action indicates whether that action returns an encoded +// message in addition to returning an HTTP code. +// +// The message is encoded because the details of the authorization status can +// constitute privileged information that the user who requested the action +// should not see. To decode an authorization status message, a user must be +// granted permissions via an IAM policy to request the DecodeAuthorizationMessage +// (sts:DecodeAuthorizationMessage) action. +// +// The decoded message includes the following type of information: +// +// * Whether the request was denied due to an explicit deny or due to the +// absence of an explicit allow. For more information, see Determining Whether +// a Request is Allowed or Denied (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_evaluation-logic.html#policy-eval-denyallow) +// in the IAM User Guide. +// +// * The principal who made the request. +// +// * The requested action. +// +// * The requested resource. +// +// * The values of condition keys in the context of the user's request. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation DecodeAuthorizationMessage for usage and error information. +// +// Returned Error Codes: +// * ErrCodeInvalidAuthorizationMessageException "InvalidAuthorizationMessageException" +// The error returned if the message passed to DecodeAuthorizationMessage was +// invalid. This can happen if the token contains invalid characters, such as +// linebreaks. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessage +func (c *STS) DecodeAuthorizationMessage(input *DecodeAuthorizationMessageInput) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + return out, req.Send() +} + +// DecodeAuthorizationMessageWithContext is the same as DecodeAuthorizationMessage with the addition of +// the ability to pass a context and additional request options. +// +// See DecodeAuthorizationMessage for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) DecodeAuthorizationMessageWithContext(ctx aws.Context, input *DecodeAuthorizationMessageInput, opts ...request.Option) (*DecodeAuthorizationMessageOutput, error) { + req, out := c.DecodeAuthorizationMessageRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetCallerIdentity = "GetCallerIdentity" + +// GetCallerIdentityRequest generates a "aws/request.Request" representing the +// client's request for the GetCallerIdentity operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetCallerIdentity for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetCallerIdentity method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetCallerIdentityRequest method. +// req, resp := client.GetCallerIdentityRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentityRequest(input *GetCallerIdentityInput) (req *request.Request, output *GetCallerIdentityOutput) { + op := &request.Operation{ + Name: opGetCallerIdentity, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetCallerIdentityInput{} + } + + output = &GetCallerIdentityOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetCallerIdentity API operation for AWS Security Token Service. +// +// Returns details about the IAM identity whose credentials are used to call +// the API. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetCallerIdentity for usage and error information. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentity +func (c *STS) GetCallerIdentity(input *GetCallerIdentityInput) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + return out, req.Send() +} + +// GetCallerIdentityWithContext is the same as GetCallerIdentity with the addition of +// the ability to pass a context and additional request options. +// +// See GetCallerIdentity for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetCallerIdentityWithContext(ctx aws.Context, input *GetCallerIdentityInput, opts ...request.Option) (*GetCallerIdentityOutput, error) { + req, out := c.GetCallerIdentityRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetFederationToken = "GetFederationToken" + +// GetFederationTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetFederationToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetFederationToken for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetFederationToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetFederationTokenRequest method. +// req, resp := client.GetFederationTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationTokenRequest(input *GetFederationTokenInput) (req *request.Request, output *GetFederationTokenOutput) { + op := &request.Operation{ + Name: opGetFederationToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetFederationTokenInput{} + } + + output = &GetFederationTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetFederationToken API operation for AWS Security Token Service. +// +// Returns a set of temporary security credentials (consisting of an access +// key ID, a secret access key, and a security token) for a federated user. +// A typical use is in a proxy application that gets temporary security credentials +// on behalf of distributed applications inside a corporate network. Because +// you must call the GetFederationToken action using the long-term security +// credentials of an IAM user, this call is appropriate in contexts where those +// credentials can be safely stored, usually in a server-based application. +// For a comparison of GetFederationToken with the other APIs that produce temporary +// credentials, see Requesting Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// If you are creating a mobile-based or browser-based app that can authenticate +// users using a web identity provider like Login with Amazon, Facebook, Google, +// or an OpenID Connect-compatible identity provider, we recommend that you +// use Amazon Cognito (http://aws.amazon.com/cognito/) or AssumeRoleWithWebIdentity. +// For more information, see Federation Through a Web-based Identity Provider +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity). +// +// The GetFederationToken action must be called by using the long-term AWS security +// credentials of an IAM user. You can also call GetFederationToken using the +// security credentials of an AWS root account, but we do not recommended it. +// Instead, we recommend that you create an IAM user for the purpose of the +// proxy application and then attach a policy to the IAM user that limits federated +// users to only the actions and resources that they need access to. For more +// information, see IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html) +// in the IAM User Guide. +// +// The temporary security credentials that are obtained by using the long-term +// credentials of an IAM user are valid for the specified duration, from 900 +// seconds (15 minutes) up to a maximium of 129600 seconds (36 hours). The default +// is 43200 seconds (12 hours). Temporary credentials that are obtained by using +// AWS root account credentials have a maximum duration of 3600 seconds (1 hour). +// +// The temporary security credentials created by GetFederationToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// * You cannot use these credentials to call any IAM APIs. +// +// * You cannot call any STS APIs except GetCallerIdentity. +// +// Permissions +// +// The permissions for the temporary security credentials returned by GetFederationToken +// are determined by a combination of the following: +// +// * The policy or policies that are attached to the IAM user whose credentials +// are used to call GetFederationToken. +// +// * The policy that is passed as a parameter in the call. +// +// The passed policy is attached to the temporary security credentials that +// result from the GetFederationToken API call--that is, to the federated user. +// When the federated user makes an AWS request, AWS evaluates the policy attached +// to the federated user in combination with the policy or policies attached +// to the IAM user whose credentials were used to call GetFederationToken. AWS +// allows the federated user's request only when both the federated user and +// the IAM user are explicitly allowed to perform the requested action. The +// passed policy cannot grant more permissions than those that are defined in +// the IAM user policy. +// +// A typical use case is that the permissions of the IAM user whose credentials +// are used to call GetFederationToken are designed to allow access to all the +// actions and resources that any federated user will need. Then, for individual +// users, you pass a policy to the operation that scopes down the permissions +// to a level that's appropriate to that individual user, using a policy that +// allows only a subset of permissions that are granted to the IAM user. +// +// If you do not pass a policy, the resulting temporary security credentials +// have no effective permissions. The only exception is when the temporary security +// credentials are used to access a resource that has a resource-based policy +// that specifically allows the federated user to access the resource. +// +// For more information about how permissions work, see Permissions for GetFederationToken +// (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). +// For information about using GetFederationToken to create temporary security +// credentials, see GetFederationToken—Federation Through a Custom Identity +// Broker (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getfederationtoken). +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetFederationToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeMalformedPolicyDocumentException "MalformedPolicyDocument" +// The request was rejected because the policy document was malformed. The error +// message describes the specific error. +// +// * ErrCodePackedPolicyTooLargeException "PackedPolicyTooLarge" +// The request was rejected because the policy document was too large. The error +// message describes how big the policy document is, in packed form, as a percentage +// of what the API allows. +// +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationToken +func (c *STS) GetFederationToken(input *GetFederationTokenInput) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + return out, req.Send() +} + +// GetFederationTokenWithContext is the same as GetFederationToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetFederationToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetFederationTokenWithContext(ctx aws.Context, input *GetFederationTokenInput, opts ...request.Option) (*GetFederationTokenOutput, error) { + req, out := c.GetFederationTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opGetSessionToken = "GetSessionToken" + +// GetSessionTokenRequest generates a "aws/request.Request" representing the +// client's request for the GetSessionToken operation. The "output" return +// value can be used to capture response data after the request's "Send" method +// is called. +// +// See GetSessionToken for usage and error information. +// +// Creating a request object using this method should be used when you want to inject +// custom logic into the request's lifecycle using a custom handler, or if you want to +// access properties on the request object before or after sending the request. If +// you just want the service response, call the GetSessionToken method directly +// instead. +// +// Note: You must call the "Send" method on the returned request object in order +// to execute the request. +// +// // Example sending a request using the GetSessionTokenRequest method. +// req, resp := client.GetSessionTokenRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionTokenRequest(input *GetSessionTokenInput) (req *request.Request, output *GetSessionTokenOutput) { + op := &request.Operation{ + Name: opGetSessionToken, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &GetSessionTokenInput{} + } + + output = &GetSessionTokenOutput{} + req = c.newRequest(op, input, output) + return +} + +// GetSessionToken API operation for AWS Security Token Service. +// +// Returns a set of temporary credentials for an AWS account or IAM user. The +// credentials consist of an access key ID, a secret access key, and a security +// token. Typically, you use GetSessionToken if you want to use MFA to protect +// programmatic calls to specific AWS APIs like Amazon EC2 StopInstances. MFA-enabled +// IAM users would need to call GetSessionToken and submit an MFA code that +// is associated with their MFA device. Using the temporary security credentials +// that are returned from the call, IAM users can then make programmatic calls +// to APIs that require MFA authentication. If you do not supply a correct MFA +// code, then the API returns an access denied error. For a comparison of GetSessionToken +// with the other APIs that produce temporary credentials, see Requesting Temporary +// Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html) +// and Comparing the AWS STS APIs (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#stsapi_comparison) +// in the IAM User Guide. +// +// The GetSessionToken action must be called by using the long-term AWS security +// credentials of the AWS account or an IAM user. Credentials that are created +// by IAM users are valid for the duration that you specify, from 900 seconds +// (15 minutes) up to a maximum of 129600 seconds (36 hours), with a default +// of 43200 seconds (12 hours); credentials that are created by using account +// credentials can range from 900 seconds (15 minutes) up to a maximum of 3600 +// seconds (1 hour), with a default of 1 hour. +// +// The temporary security credentials created by GetSessionToken can be used +// to make API calls to any AWS service with the following exceptions: +// +// * You cannot call any IAM APIs unless MFA authentication information is +// included in the request. +// +// * You cannot call any STS API exceptAssumeRole or GetCallerIdentity. +// +// We recommend that you do not call GetSessionToken with root account credentials. +// Instead, follow our best practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#create-iam-users) +// by creating one or more IAM users, giving them the necessary permissions, +// and using IAM users for everyday interaction with AWS. +// +// The permissions associated with the temporary security credentials returned +// by GetSessionToken are based on the permissions associated with account or +// IAM user whose credentials are used to call the action. If GetSessionToken +// is called using root account credentials, the temporary credentials have +// root account permissions. Similarly, if GetSessionToken is called using the +// credentials of an IAM user, the temporary credentials have the same permissions +// as the IAM user. +// +// For more information about using GetSessionToken to create temporary credentials, +// go to Temporary Credentials for Users in Untrusted Environments (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_getsessiontoken) +// in the IAM User Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for AWS Security Token Service's +// API operation GetSessionToken for usage and error information. +// +// Returned Error Codes: +// * ErrCodeRegionDisabledException "RegionDisabledException" +// STS is not activated in the requested region for the account that is being +// asked to generate credentials. The account administrator must use the IAM +// console to activate STS in that region. For more information, see Activating +// and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionToken +func (c *STS) GetSessionToken(input *GetSessionTokenInput) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + return out, req.Send() +} + +// GetSessionTokenWithContext is the same as GetSessionToken with the addition of +// the ability to pass a context and additional request options. +// +// See GetSessionToken for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *STS) GetSessionTokenWithContext(ctx aws.Context, input *GetSessionTokenInput, opts ...request.Option) (*GetSessionTokenOutput, error) { + req, out := c.GetSessionTokenRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleRequest +type AssumeRoleInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. + // + // This is separate from the duration of a console session that you might request + // using the returned credentials. The request to the federation endpoint for + // a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Creating a URL that Enables + // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // A unique identifier that is used by third parties when assuming roles in + // their customers' accounts. For each role that the third party can assume, + // they should instruct their customers to ensure the role's trust policy checks + // for the external ID that the third party generated. Each time the third party + // assumes the role, they should pass the customer's external ID. The external + // ID is useful in order to help third parties bind a role to the customer who + // created it. For more information about the external ID, see How to Use an + // External ID When Granting Access to Your AWS Resources to a Third Party (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_create_for-user_externalid.html) + // in the IAM User Guide. + // + // The regex used to validated this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + ExternalId *string `min:"2" type:"string"` + + // An IAM policy in JSON format. + // + // This parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both (the intersection of) the access policy of the role that + // is being assumed, and the policy that you pass. This gives you a way to further + // restrict the permissions for the resulting temporary security credentials. + // You cannot use the passed policy to grant permissions that are in excess + // of those allowed by the access policy of the role that is being assumed. + // For more information, see Permissions for AssumeRole, AssumeRoleWithSAML, + // and AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the role to assume. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. + // + // Use the role session name to uniquely identify a session when the same role + // is assumed by different principals or for different reasons. In cross-account + // scenarios, the role session name is visible to, and can be logged by the + // account that owns the role. The role session name is also used in the ARN + // of the assumed role principal. This means that subsequent cross-account API + // requests using the temporary security credentials will expose the role session + // name to the external account in their CloudTrail logs. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The identification number of the MFA device that is associated with the user + // who is making the AssumeRole call. Specify this value if the trust policy + // of the role being assumed includes a condition that requires MFA authentication. + // The value is either the serial number for a hardware device (such as GAHT12345678) + // or an Amazon Resource Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if the trust policy of the role being + // assumed requires MFA (that is, if the policy includes a condition that tests + // for MFA). If the role being assumed requires MFA and if the TokenCode value + // is missing or expired, the AssumeRole call returns an "access denied" error. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.ExternalId != nil && len(*s.ExternalId) < 2 { + invalidParams.Add(request.NewErrParamMinLen("ExternalId", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleInput) SetDurationSeconds(v int64) *AssumeRoleInput { + s.DurationSeconds = &v + return s +} + +// SetExternalId sets the ExternalId field's value. +func (s *AssumeRoleInput) SetExternalId(v string) *AssumeRoleInput { + s.ExternalId = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleInput) SetPolicy(v string) *AssumeRoleInput { + s.Policy = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleInput) SetRoleArn(v string) *AssumeRoleInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleInput) SetRoleSessionName(v string) *AssumeRoleInput { + s.RoleSessionName = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *AssumeRoleInput) SetSerialNumber(v string) *AssumeRoleInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *AssumeRoleInput) SetTokenCode(v string) *AssumeRoleInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful AssumeRole request, including temporary +// AWS credentials that can be used to make AWS requests. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleResponse +type AssumeRoleOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s AssumeRoleOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleOutput { + s.AssumedRoleUser = v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleOutput) SetCredentials(v *Credentials) *AssumeRoleOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleOutput) SetPackedPolicySize(v int64) *AssumeRoleOutput { + s.PackedPolicySize = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLRequest +type AssumeRoleWithSAMLInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. An expiration can also be specified in the SAML authentication + // response's SessionNotOnOrAfter value. The actual expiration time is whichever + // value is shorter. + // + // This is separate from the duration of a console session that you might request + // using the returned credentials. The request to the federation endpoint for + // a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Enabling SAML 2.0 Federated + // Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-saml.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict the + // permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // Permissions for AssumeRole, AssumeRoleWithSAML, and AssumeRoleWithWebIdentity + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The Amazon Resource Name (ARN) of the SAML provider in IAM that describes + // the IdP. + // + // PrincipalArn is a required field + PrincipalArn *string `min:"20" type:"string" required:"true"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // The base-64 encoded SAML authentication response provided by the IdP. + // + // For more information, see Configuring a Relying Party and Adding Claims (http://docs.aws.amazon.com/IAM/latest/UserGuide/create-role-saml-IdP-tasks.html) + // in the Using IAM guide. + // + // SAMLAssertion is a required field + SAMLAssertion *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithSAMLInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithSAMLInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.PrincipalArn == nil { + invalidParams.Add(request.NewErrParamRequired("PrincipalArn")) + } + if s.PrincipalArn != nil && len(*s.PrincipalArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("PrincipalArn", 20)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.SAMLAssertion == nil { + invalidParams.Add(request.NewErrParamRequired("SAMLAssertion")) + } + if s.SAMLAssertion != nil && len(*s.SAMLAssertion) < 4 { + invalidParams.Add(request.NewErrParamMinLen("SAMLAssertion", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithSAMLInput) SetDurationSeconds(v int64) *AssumeRoleWithSAMLInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithSAMLInput) SetPolicy(v string) *AssumeRoleWithSAMLInput { + s.Policy = &v + return s +} + +// SetPrincipalArn sets the PrincipalArn field's value. +func (s *AssumeRoleWithSAMLInput) SetPrincipalArn(v string) *AssumeRoleWithSAMLInput { + s.PrincipalArn = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithSAMLInput) SetRoleArn(v string) *AssumeRoleWithSAMLInput { + s.RoleArn = &v + return s +} + +// SetSAMLAssertion sets the SAMLAssertion field's value. +func (s *AssumeRoleWithSAMLInput) SetSAMLAssertion(v string) *AssumeRoleWithSAMLInput { + s.SAMLAssertion = &v + return s +} + +// Contains the response to a successful AssumeRoleWithSAML request, including +// temporary AWS credentials that can be used to make AWS requests. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithSAMLResponse +type AssumeRoleWithSAMLOutput struct { + _ struct{} `type:"structure"` + + // The identifiers for the temporary security credentials that the operation + // returns. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The value of the Recipient attribute of the SubjectConfirmationData element + // of the SAML assertion. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // The value of the Issuer element of the SAML assertion. + Issuer *string `type:"string"` + + // A hash value based on the concatenation of the Issuer response value, the + // AWS account ID, and the friendly name (the last part of the ARN) of the SAML + // provider in IAM. The combination of NameQualifier and Subject can be used + // to uniquely identify a federated user. + // + // The following pseudocode shows how the hash value is calculated: + // + // BASE64 ( SHA1 ( "https://example.com/saml" + "123456789012" + "/MySAMLIdP" + // ) ) + NameQualifier *string `type:"string"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The value of the NameID element in the Subject element of the SAML assertion. + Subject *string `type:"string"` + + // The format of the name ID, as defined by the Format attribute in the NameID + // element of the SAML assertion. Typical examples of the format are transient + // or persistent. + // + // If the format includes the prefix urn:oasis:names:tc:SAML:2.0:nameid-format, + // that prefix is removed. For example, urn:oasis:names:tc:SAML:2.0:nameid-format:transient + // is returned as transient. If the format includes any other prefix, the format + // is returned with no modifications. + SubjectType *string `type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithSAMLOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithSAMLOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithSAMLOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithSAMLOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithSAMLOutput) SetAudience(v string) *AssumeRoleWithSAMLOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithSAMLOutput) SetCredentials(v *Credentials) *AssumeRoleWithSAMLOutput { + s.Credentials = v + return s +} + +// SetIssuer sets the Issuer field's value. +func (s *AssumeRoleWithSAMLOutput) SetIssuer(v string) *AssumeRoleWithSAMLOutput { + s.Issuer = &v + return s +} + +// SetNameQualifier sets the NameQualifier field's value. +func (s *AssumeRoleWithSAMLOutput) SetNameQualifier(v string) *AssumeRoleWithSAMLOutput { + s.NameQualifier = &v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithSAMLOutput) SetPackedPolicySize(v int64) *AssumeRoleWithSAMLOutput { + s.PackedPolicySize = &v + return s +} + +// SetSubject sets the Subject field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubject(v string) *AssumeRoleWithSAMLOutput { + s.Subject = &v + return s +} + +// SetSubjectType sets the SubjectType field's value. +func (s *AssumeRoleWithSAMLOutput) SetSubjectType(v string) *AssumeRoleWithSAMLOutput { + s.SubjectType = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityRequest +type AssumeRoleWithWebIdentityInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, of the role session. The value can range from 900 + // seconds (15 minutes) to 3600 seconds (1 hour). By default, the value is set + // to 3600 seconds. + // + // This is separate from the duration of a console session that you might request + // using the returned credentials. The request to the federation endpoint for + // a console sign-in token takes a SessionDuration parameter that specifies + // the maximum length of the console session, separately from the DurationSeconds + // parameter on this API. For more information, see Creating a URL that Enables + // Federated Users to Access the AWS Management Console (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_enable-console-custom-url.html) + // in the IAM User Guide. + DurationSeconds *int64 `min:"900" type:"integer"` + + // An IAM policy in JSON format. + // + // The policy parameter is optional. If you pass a policy, the temporary security + // credentials that are returned by the operation have the permissions that + // are allowed by both the access policy of the role that is being assumed, + // and the policy that you pass. This gives you a way to further restrict the + // permissions for the resulting temporary security credentials. You cannot + // use the passed policy to grant permissions that are in excess of those allowed + // by the access policy of the role that is being assumed. For more information, + // see Permissions for AssumeRoleWithWebIdentity (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_assumerole.html) + // in the IAM User Guide. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + Policy *string `min:"1" type:"string"` + + // The fully qualified host component of the domain name of the identity provider. + // + // Specify this value only for OAuth 2.0 access tokens. Currently www.amazon.com + // and graph.facebook.com are the only supported identity providers for OAuth + // 2.0 access tokens. Do not include URL schemes and port numbers. + // + // Do not specify this value for OpenID Connect ID tokens. + ProviderId *string `min:"4" type:"string"` + + // The Amazon Resource Name (ARN) of the role that the caller is assuming. + // + // RoleArn is a required field + RoleArn *string `min:"20" type:"string" required:"true"` + + // An identifier for the assumed role session. Typically, you pass the name + // or identifier that is associated with the user who is using your application. + // That way, the temporary security credentials that your application will use + // are associated with that user. This session name is included as part of the + // ARN and assumed role ID in the AssumedRoleUser response element. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // RoleSessionName is a required field + RoleSessionName *string `min:"2" type:"string" required:"true"` + + // The OAuth 2.0 access token or OpenID Connect ID token that is provided by + // the identity provider. Your application must get this token by authenticating + // the user who is using your application with a web identity provider before + // the application makes an AssumeRoleWithWebIdentity call. + // + // WebIdentityToken is a required field + WebIdentityToken *string `min:"4" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *AssumeRoleWithWebIdentityInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "AssumeRoleWithWebIdentityInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + if s.ProviderId != nil && len(*s.ProviderId) < 4 { + invalidParams.Add(request.NewErrParamMinLen("ProviderId", 4)) + } + if s.RoleArn == nil { + invalidParams.Add(request.NewErrParamRequired("RoleArn")) + } + if s.RoleArn != nil && len(*s.RoleArn) < 20 { + invalidParams.Add(request.NewErrParamMinLen("RoleArn", 20)) + } + if s.RoleSessionName == nil { + invalidParams.Add(request.NewErrParamRequired("RoleSessionName")) + } + if s.RoleSessionName != nil && len(*s.RoleSessionName) < 2 { + invalidParams.Add(request.NewErrParamMinLen("RoleSessionName", 2)) + } + if s.WebIdentityToken == nil { + invalidParams.Add(request.NewErrParamRequired("WebIdentityToken")) + } + if s.WebIdentityToken != nil && len(*s.WebIdentityToken) < 4 { + invalidParams.Add(request.NewErrParamMinLen("WebIdentityToken", 4)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *AssumeRoleWithWebIdentityInput) SetDurationSeconds(v int64) *AssumeRoleWithWebIdentityInput { + s.DurationSeconds = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *AssumeRoleWithWebIdentityInput) SetPolicy(v string) *AssumeRoleWithWebIdentityInput { + s.Policy = &v + return s +} + +// SetProviderId sets the ProviderId field's value. +func (s *AssumeRoleWithWebIdentityInput) SetProviderId(v string) *AssumeRoleWithWebIdentityInput { + s.ProviderId = &v + return s +} + +// SetRoleArn sets the RoleArn field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleArn(v string) *AssumeRoleWithWebIdentityInput { + s.RoleArn = &v + return s +} + +// SetRoleSessionName sets the RoleSessionName field's value. +func (s *AssumeRoleWithWebIdentityInput) SetRoleSessionName(v string) *AssumeRoleWithWebIdentityInput { + s.RoleSessionName = &v + return s +} + +// SetWebIdentityToken sets the WebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityInput) SetWebIdentityToken(v string) *AssumeRoleWithWebIdentityInput { + s.WebIdentityToken = &v + return s +} + +// Contains the response to a successful AssumeRoleWithWebIdentity request, +// including temporary AWS credentials that can be used to make AWS requests. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRoleWithWebIdentityResponse +type AssumeRoleWithWebIdentityOutput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) and the assumed role ID, which are identifiers + // that you can use to refer to the resulting temporary security credentials. + // For example, you can reference these credentials as a principal in a resource-based + // policy by using the ARN or assumed role ID. The ARN and ID include the RoleSessionName + // that you specified when you called AssumeRole. + AssumedRoleUser *AssumedRoleUser `type:"structure"` + + // The intended audience (also known as client ID) of the web identity token. + // This is traditionally the client identifier issued to the application that + // requested the web identity token. + Audience *string `type:"string"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // A percentage value that indicates the size of the policy in packed form. + // The service rejects any policy with a packed size greater than 100 percent, + // which means the policy exceeded the allowed space. + PackedPolicySize *int64 `type:"integer"` + + // The issuing authority of the web identity token presented. For OpenID Connect + // ID Tokens this contains the value of the iss field. For OAuth 2.0 access + // tokens, this contains the value of the ProviderId parameter that was passed + // in the AssumeRoleWithWebIdentity request. + Provider *string `type:"string"` + + // The unique user identifier that is returned by the identity provider. This + // identifier is associated with the WebIdentityToken that was submitted with + // the AssumeRoleWithWebIdentity call. The identifier is typically unique to + // the user and the application that acquired the WebIdentityToken (pairwise + // identifier). For OpenID Connect ID tokens, this field contains the value + // returned by the identity provider as the token's sub (Subject) claim. + SubjectFromWebIdentityToken *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s AssumeRoleWithWebIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumeRoleWithWebIdentityOutput) GoString() string { + return s.String() +} + +// SetAssumedRoleUser sets the AssumedRoleUser field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAssumedRoleUser(v *AssumedRoleUser) *AssumeRoleWithWebIdentityOutput { + s.AssumedRoleUser = v + return s +} + +// SetAudience sets the Audience field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetAudience(v string) *AssumeRoleWithWebIdentityOutput { + s.Audience = &v + return s +} + +// SetCredentials sets the Credentials field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetCredentials(v *Credentials) *AssumeRoleWithWebIdentityOutput { + s.Credentials = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetPackedPolicySize(v int64) *AssumeRoleWithWebIdentityOutput { + s.PackedPolicySize = &v + return s +} + +// SetProvider sets the Provider field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetProvider(v string) *AssumeRoleWithWebIdentityOutput { + s.Provider = &v + return s +} + +// SetSubjectFromWebIdentityToken sets the SubjectFromWebIdentityToken field's value. +func (s *AssumeRoleWithWebIdentityOutput) SetSubjectFromWebIdentityToken(v string) *AssumeRoleWithWebIdentityOutput { + s.SubjectFromWebIdentityToken = &v + return s +} + +// The identifiers for the temporary security credentials that the operation +// returns. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumedRoleUser +type AssumedRoleUser struct { + _ struct{} `type:"structure"` + + // The ARN of the temporary security credentials that are returned from the + // AssumeRole action. For more information about ARNs and how to use them in + // policies, see IAM Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // A unique identifier that contains the role ID and the role session name of + // the role that is being assumed. The role ID is generated by AWS when the + // role is created. + // + // AssumedRoleId is a required field + AssumedRoleId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s AssumedRoleUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s AssumedRoleUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *AssumedRoleUser) SetArn(v string) *AssumedRoleUser { + s.Arn = &v + return s +} + +// SetAssumedRoleId sets the AssumedRoleId field's value. +func (s *AssumedRoleUser) SetAssumedRoleId(v string) *AssumedRoleUser { + s.AssumedRoleId = &v + return s +} + +// AWS credentials for API authentication. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/Credentials +type Credentials struct { + _ struct{} `type:"structure"` + + // The access key ID that identifies the temporary security credentials. + // + // AccessKeyId is a required field + AccessKeyId *string `min:"16" type:"string" required:"true"` + + // The date on which the current credentials expire. + // + // Expiration is a required field + Expiration *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The secret access key that can be used to sign requests. + // + // SecretAccessKey is a required field + SecretAccessKey *string `type:"string" required:"true"` + + // The token that users must pass to the service API to use the temporary credentials. + // + // SessionToken is a required field + SessionToken *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s Credentials) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Credentials) GoString() string { + return s.String() +} + +// SetAccessKeyId sets the AccessKeyId field's value. +func (s *Credentials) SetAccessKeyId(v string) *Credentials { + s.AccessKeyId = &v + return s +} + +// SetExpiration sets the Expiration field's value. +func (s *Credentials) SetExpiration(v time.Time) *Credentials { + s.Expiration = &v + return s +} + +// SetSecretAccessKey sets the SecretAccessKey field's value. +func (s *Credentials) SetSecretAccessKey(v string) *Credentials { + s.SecretAccessKey = &v + return s +} + +// SetSessionToken sets the SessionToken field's value. +func (s *Credentials) SetSessionToken(v string) *Credentials { + s.SessionToken = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageRequest +type DecodeAuthorizationMessageInput struct { + _ struct{} `type:"structure"` + + // The encoded message that was returned with the response. + // + // EncodedMessage is a required field + EncodedMessage *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DecodeAuthorizationMessageInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DecodeAuthorizationMessageInput"} + if s.EncodedMessage == nil { + invalidParams.Add(request.NewErrParamRequired("EncodedMessage")) + } + if s.EncodedMessage != nil && len(*s.EncodedMessage) < 1 { + invalidParams.Add(request.NewErrParamMinLen("EncodedMessage", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetEncodedMessage sets the EncodedMessage field's value. +func (s *DecodeAuthorizationMessageInput) SetEncodedMessage(v string) *DecodeAuthorizationMessageInput { + s.EncodedMessage = &v + return s +} + +// A document that contains additional information about the authorization status +// of a request from an encoded message that is returned in response to an AWS +// request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/DecodeAuthorizationMessageResponse +type DecodeAuthorizationMessageOutput struct { + _ struct{} `type:"structure"` + + // An XML document that contains the decoded message. + DecodedMessage *string `type:"string"` +} + +// String returns the string representation +func (s DecodeAuthorizationMessageOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DecodeAuthorizationMessageOutput) GoString() string { + return s.String() +} + +// SetDecodedMessage sets the DecodedMessage field's value. +func (s *DecodeAuthorizationMessageOutput) SetDecodedMessage(v string) *DecodeAuthorizationMessageOutput { + s.DecodedMessage = &v + return s +} + +// Identifiers for the federated user that is associated with the credentials. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/FederatedUser +type FederatedUser struct { + _ struct{} `type:"structure"` + + // The ARN that specifies the federated user that is associated with the credentials. + // For more information about ARNs and how to use them in policies, see IAM + // Identifiers (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html) + // in Using IAM. + // + // Arn is a required field + Arn *string `min:"20" type:"string" required:"true"` + + // The string that identifies the federated user associated with the credentials, + // similar to the unique ID of an IAM user. + // + // FederatedUserId is a required field + FederatedUserId *string `min:"2" type:"string" required:"true"` +} + +// String returns the string representation +func (s FederatedUser) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s FederatedUser) GoString() string { + return s.String() +} + +// SetArn sets the Arn field's value. +func (s *FederatedUser) SetArn(v string) *FederatedUser { + s.Arn = &v + return s +} + +// SetFederatedUserId sets the FederatedUserId field's value. +func (s *FederatedUser) SetFederatedUserId(v string) *FederatedUser { + s.FederatedUserId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityRequest +type GetCallerIdentityInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s GetCallerIdentityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityInput) GoString() string { + return s.String() +} + +// Contains the response to a successful GetCallerIdentity request, including +// information about the entity making the request. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetCallerIdentityResponse +type GetCallerIdentityOutput struct { + _ struct{} `type:"structure"` + + // The AWS account ID number of the account that owns or contains the calling + // entity. + Account *string `type:"string"` + + // The AWS ARN associated with the calling entity. + Arn *string `min:"20" type:"string"` + + // The unique identifier of the calling entity. The exact value depends on the + // type of entity making the call. The values returned are those listed in the + // aws:userid column in the Principal table (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_variables.html#principaltable) + // found on the Policy Variables reference page in the IAM User Guide. + UserId *string `type:"string"` +} + +// String returns the string representation +func (s GetCallerIdentityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetCallerIdentityOutput) GoString() string { + return s.String() +} + +// SetAccount sets the Account field's value. +func (s *GetCallerIdentityOutput) SetAccount(v string) *GetCallerIdentityOutput { + s.Account = &v + return s +} + +// SetArn sets the Arn field's value. +func (s *GetCallerIdentityOutput) SetArn(v string) *GetCallerIdentityOutput { + s.Arn = &v + return s +} + +// SetUserId sets the UserId field's value. +func (s *GetCallerIdentityOutput) SetUserId(v string) *GetCallerIdentityOutput { + s.UserId = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenRequest +type GetFederationTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the session should last. Acceptable durations + // for federation sessions range from 900 seconds (15 minutes) to 129600 seconds + // (36 hours), with 43200 seconds (12 hours) as the default. Sessions obtained + // using AWS account (root) credentials are restricted to a maximum of 3600 + // seconds (one hour). If the specified duration is longer than one hour, the + // session obtained by using AWS account (root) credentials defaults to one + // hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The name of the federated user. The name is used as an identifier for the + // temporary security credentials (such as Bob). For example, you can reference + // the federated user name in a resource-based policy, such as in an Amazon + // S3 bucket policy. + // + // The regex used to validate this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@- + // + // Name is a required field + Name *string `min:"2" type:"string" required:"true"` + + // An IAM policy in JSON format that is passed with the GetFederationToken call + // and evaluated along with the policy or policies that are attached to the + // IAM user whose credentials are used to call GetFederationToken. The passed + // policy is used to scope down the permissions that are available to the IAM + // user, by allowing only a subset of the permissions that are granted to the + // IAM user. The passed policy cannot grant more permissions than those granted + // to the IAM user. The final permissions for the federated user are the most + // restrictive set based on the intersection of the passed policy and the IAM + // user policy. + // + // If you do not pass a policy, the resulting temporary security credentials + // have no effective permissions. The only exception is when the temporary security + // credentials are used to access a resource that has a resource-based policy + // that specifically allows the federated user to access the resource. + // + // The format for this parameter, as described by its regex pattern, is a string + // of characters up to 2048 characters in length. The characters can be any + // ASCII character from the space character to the end of the valid character + // list (\u0020-\u00FF). It can also include the tab (\u0009), linefeed (\u000A), + // and carriage return (\u000D) characters. + // + // The policy plain text must be 2048 bytes or shorter. However, an internal + // conversion compresses it into a packed binary format with a separate limit. + // The PackedPolicySize response element indicates by percentage how close to + // the upper size limit the policy is, with 100% equaling the maximum allowed + // size. + // + // For more information about how permissions work, see Permissions for GetFederationToken + // (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_control-access_getfederationtoken.html). + Policy *string `min:"1" type:"string"` +} + +// String returns the string representation +func (s GetFederationTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetFederationTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetFederationTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.Name == nil { + invalidParams.Add(request.NewErrParamRequired("Name")) + } + if s.Name != nil && len(*s.Name) < 2 { + invalidParams.Add(request.NewErrParamMinLen("Name", 2)) + } + if s.Policy != nil && len(*s.Policy) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Policy", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetFederationTokenInput) SetDurationSeconds(v int64) *GetFederationTokenInput { + s.DurationSeconds = &v + return s +} + +// SetName sets the Name field's value. +func (s *GetFederationTokenInput) SetName(v string) *GetFederationTokenInput { + s.Name = &v + return s +} + +// SetPolicy sets the Policy field's value. +func (s *GetFederationTokenInput) SetPolicy(v string) *GetFederationTokenInput { + s.Policy = &v + return s +} + +// Contains the response to a successful GetFederationToken request, including +// temporary AWS credentials that can be used to make AWS requests. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetFederationTokenResponse +type GetFederationTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` + + // Identifiers for the federated user associated with the credentials (such + // as arn:aws:sts::123456789012:federated-user/Bob or 123456789012:Bob). You + // can use the federated user's ARN in your resource-based policies, such as + // an Amazon S3 bucket policy. + FederatedUser *FederatedUser `type:"structure"` + + // A percentage value indicating the size of the policy in packed form. The + // service rejects policies for which the packed size is greater than 100 percent + // of the allowed value. + PackedPolicySize *int64 `type:"integer"` +} + +// String returns the string representation +func (s GetFederationTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetFederationTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetFederationTokenOutput) SetCredentials(v *Credentials) *GetFederationTokenOutput { + s.Credentials = v + return s +} + +// SetFederatedUser sets the FederatedUser field's value. +func (s *GetFederationTokenOutput) SetFederatedUser(v *FederatedUser) *GetFederationTokenOutput { + s.FederatedUser = v + return s +} + +// SetPackedPolicySize sets the PackedPolicySize field's value. +func (s *GetFederationTokenOutput) SetPackedPolicySize(v int64) *GetFederationTokenOutput { + s.PackedPolicySize = &v + return s +} + +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenRequest +type GetSessionTokenInput struct { + _ struct{} `type:"structure"` + + // The duration, in seconds, that the credentials should remain valid. Acceptable + // durations for IAM user sessions range from 900 seconds (15 minutes) to 129600 + // seconds (36 hours), with 43200 seconds (12 hours) as the default. Sessions + // for AWS account owners are restricted to a maximum of 3600 seconds (one hour). + // If the duration is longer than one hour, the session for AWS account owners + // defaults to one hour. + DurationSeconds *int64 `min:"900" type:"integer"` + + // The identification number of the MFA device that is associated with the IAM + // user who is making the GetSessionToken call. Specify this value if the IAM + // user has a policy that requires MFA authentication. The value is either the + // serial number for a hardware device (such as GAHT12345678) or an Amazon Resource + // Name (ARN) for a virtual device (such as arn:aws:iam::123456789012:mfa/user). + // You can find the device for an IAM user by going to the AWS Management Console + // and viewing the user's security credentials. + // + // The regex used to validated this parameter is a string of characters consisting + // of upper- and lower-case alphanumeric characters with no spaces. You can + // also include underscores or any of the following characters: =,.@:/- + SerialNumber *string `min:"9" type:"string"` + + // The value provided by the MFA device, if MFA is required. If any policy requires + // the IAM user to submit an MFA code, specify this value. If MFA authentication + // is required, and the user does not provide a code when requesting a set of + // temporary security credentials, the user will receive an "access denied" + // response when requesting resources that require MFA authentication. + // + // The format for this parameter, as described by its regex pattern, is a sequence + // of six numeric digits. + TokenCode *string `min:"6" type:"string"` +} + +// String returns the string representation +func (s GetSessionTokenInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *GetSessionTokenInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "GetSessionTokenInput"} + if s.DurationSeconds != nil && *s.DurationSeconds < 900 { + invalidParams.Add(request.NewErrParamMinValue("DurationSeconds", 900)) + } + if s.SerialNumber != nil && len(*s.SerialNumber) < 9 { + invalidParams.Add(request.NewErrParamMinLen("SerialNumber", 9)) + } + if s.TokenCode != nil && len(*s.TokenCode) < 6 { + invalidParams.Add(request.NewErrParamMinLen("TokenCode", 6)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetDurationSeconds sets the DurationSeconds field's value. +func (s *GetSessionTokenInput) SetDurationSeconds(v int64) *GetSessionTokenInput { + s.DurationSeconds = &v + return s +} + +// SetSerialNumber sets the SerialNumber field's value. +func (s *GetSessionTokenInput) SetSerialNumber(v string) *GetSessionTokenInput { + s.SerialNumber = &v + return s +} + +// SetTokenCode sets the TokenCode field's value. +func (s *GetSessionTokenInput) SetTokenCode(v string) *GetSessionTokenInput { + s.TokenCode = &v + return s +} + +// Contains the response to a successful GetSessionToken request, including +// temporary AWS credentials that can be used to make AWS requests. +// Please also see https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/GetSessionTokenResponse +type GetSessionTokenOutput struct { + _ struct{} `type:"structure"` + + // The temporary security credentials, which include an access key ID, a secret + // access key, and a security (or session) token. + // + // Note: The size of the security token that STS APIs return is not fixed. We + // strongly recommend that you make no assumptions about the maximum size. As + // of this writing, the typical size is less than 4096 bytes, but that can vary. + // Also, future updates to AWS might require larger sizes. + Credentials *Credentials `type:"structure"` +} + +// String returns the string representation +func (s GetSessionTokenOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s GetSessionTokenOutput) GoString() string { + return s.String() +} + +// SetCredentials sets the Credentials field's value. +func (s *GetSessionTokenOutput) SetCredentials(v *Credentials) *GetSessionTokenOutput { + s.Credentials = v + return s +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go new file mode 100644 index 0000000..4010cc7 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/customizations.go @@ -0,0 +1,12 @@ +package sts + +import "github.com/aws/aws-sdk-go/aws/request" + +func init() { + initRequest = func(r *request.Request) { + switch r.Operation.Name { + case opAssumeRoleWithSAML, opAssumeRoleWithWebIdentity: + r.Handlers.Sign.Clear() // these operations are unsigned + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go new file mode 100644 index 0000000..d2af518 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/doc.go @@ -0,0 +1,124 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +// Package sts provides the client and types for making API +// requests to AWS Security Token Service. +// +// The AWS Security Token Service (STS) is a web service that enables you to +// request temporary, limited-privilege credentials for AWS Identity and Access +// Management (IAM) users or for users that you authenticate (federated users). +// This guide provides descriptions of the STS API. For more detailed information +// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html). +// +// As an alternative to using the API, you can use one of the AWS SDKs, which +// consist of libraries and sample code for various programming languages and +// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient +// way to create programmatic access to STS. For example, the SDKs take care +// of cryptographically signing requests, managing errors, and retrying requests +// automatically. For information about the AWS SDKs, including how to download +// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/). +// +// For information about setting up signatures and authorization through the +// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html) +// in the AWS General Reference. For general information about the Query API, +// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html) +// in Using IAM. For information about using security tokens with other AWS +// products, go to AWS Services That Work with IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html) +// in the IAM User Guide. +// +// If you're new to AWS and need additional technical information about a specific +// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/ +// (http://aws.amazon.com/documentation/). +// +// Endpoints +// +// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com +// that maps to the US East (N. Virginia) region. Additional regions are available +// and are activated by default. For more information, see Activating and Deactivating +// AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) +// in the IAM User Guide. +// +// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region) +// in the AWS General Reference. +// +// Recording API requests +// +// STS supports AWS CloudTrail, which is a service that records AWS calls for +// your AWS account and delivers log files to an Amazon S3 bucket. By using +// information collected by CloudTrail, you can determine what requests were +// successfully made to STS, who made the request, when it was made, and so +// on. To learn more about CloudTrail, including how to turn it on and find +// your log files, see the AWS CloudTrail User Guide (http://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html). +// +// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service. +// +// See sts package documentation for more information. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/ +// +// Using the Client +// +// To use the client for AWS Security Token Service you will first need +// to create a new instance of it. +// +// When creating a client for an AWS service you'll first need to have a Session +// already created. The Session provides configuration that can be shared +// between multiple service clients. Additional configuration can be applied to +// the Session and service's client when they are constructed. The aws package's +// Config type contains several fields such as Region for the AWS Region the +// client should make API requests too. The optional Config value can be provided +// as the variadic argument for Sessions and client creation. +// +// Once the service's client is created you can use it to make API requests the +// AWS service. These clients are safe to use concurrently. +// +// // Create a session to share configuration, and load external configuration. +// sess := session.Must(session.NewSession()) +// +// // Create the service's client with the session. +// svc := sts.New(sess) +// +// See the SDK's documentation for more information on how to use service clients. +// https://docs.aws.amazon.com/sdk-for-go/api/ +// +// See aws package's Config type for more information on configuration options. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config +// +// See the AWS Security Token Service client STS for more +// information on creating the service's client. +// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New +// +// Once the client is created you can make an API request to the service. +// Each API method takes a input parameter, and returns the service response +// and an error. +// +// The API method will document which error codes the service can be returned +// by the operation if the service models the API operation's errors. These +// errors will also be available as const strings prefixed with "ErrCode". +// +// result, err := svc.AssumeRole(params) +// if err != nil { +// // Cast err to awserr.Error to handle specific error codes. +// aerr, ok := err.(awserr.Error) +// if ok && aerr.Code() == { +// // Specific error code handling +// } +// return err +// } +// +// fmt.Println("AssumeRole result:") +// fmt.Println(result) +// +// Using the Client with Context +// +// The service's client also provides methods to make API requests with a Context +// value. This allows you to control the timeout, and cancellation of pending +// requests. These methods also take request Option as variadic parameter to apply +// additional configuration to the API request. +// +// ctx := context.Background() +// +// result, err := svc.AssumeRoleWithContext(ctx, params) +// +// See the request package documentation for more information on using Context pattern +// with the SDK. +// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/ +package sts diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go new file mode 100644 index 0000000..e24884e --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/errors.go @@ -0,0 +1,73 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +const ( + + // ErrCodeExpiredTokenException for service response error code + // "ExpiredTokenException". + // + // The web identity token that was passed is expired or is not valid. Get a + // new identity token from the identity provider and then retry the request. + ErrCodeExpiredTokenException = "ExpiredTokenException" + + // ErrCodeIDPCommunicationErrorException for service response error code + // "IDPCommunicationError". + // + // The request could not be fulfilled because the non-AWS identity provider + // (IDP) that was asked to verify the incoming identity token could not be reached. + // This is often a transient error caused by network conditions. Retry the request + // a limited number of times so that you don't exceed the request rate. If the + // error persists, the non-AWS identity provider might be down or not responding. + ErrCodeIDPCommunicationErrorException = "IDPCommunicationError" + + // ErrCodeIDPRejectedClaimException for service response error code + // "IDPRejectedClaim". + // + // The identity provider (IdP) reported that authentication failed. This might + // be because the claim is invalid. + // + // If this error is returned for the AssumeRoleWithWebIdentity operation, it + // can also mean that the claim has expired or has been explicitly revoked. + ErrCodeIDPRejectedClaimException = "IDPRejectedClaim" + + // ErrCodeInvalidAuthorizationMessageException for service response error code + // "InvalidAuthorizationMessageException". + // + // The error returned if the message passed to DecodeAuthorizationMessage was + // invalid. This can happen if the token contains invalid characters, such as + // linebreaks. + ErrCodeInvalidAuthorizationMessageException = "InvalidAuthorizationMessageException" + + // ErrCodeInvalidIdentityTokenException for service response error code + // "InvalidIdentityToken". + // + // The web identity token that was passed could not be validated by AWS. Get + // a new identity token from the identity provider and then retry the request. + ErrCodeInvalidIdentityTokenException = "InvalidIdentityToken" + + // ErrCodeMalformedPolicyDocumentException for service response error code + // "MalformedPolicyDocument". + // + // The request was rejected because the policy document was malformed. The error + // message describes the specific error. + ErrCodeMalformedPolicyDocumentException = "MalformedPolicyDocument" + + // ErrCodePackedPolicyTooLargeException for service response error code + // "PackedPolicyTooLarge". + // + // The request was rejected because the policy document was too large. The error + // message describes how big the policy document is, in packed form, as a percentage + // of what the API allows. + ErrCodePackedPolicyTooLargeException = "PackedPolicyTooLarge" + + // ErrCodeRegionDisabledException for service response error code + // "RegionDisabledException". + // + // STS is not activated in the requested region for the account that is being + // asked to generate credentials. The account administrator must use the IAM + // console to activate STS in that region. For more information, see Activating + // and Deactivating AWS STS in an AWS Region (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html) + // in the IAM User Guide. + ErrCodeRegionDisabledException = "RegionDisabledException" +) diff --git a/vendor/github.com/aws/aws-sdk-go/service/sts/service.go b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go new file mode 100644 index 0000000..1ee5839 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/service/sts/service.go @@ -0,0 +1,93 @@ +// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. + +package sts + +import ( + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/client" + "github.com/aws/aws-sdk-go/aws/client/metadata" + "github.com/aws/aws-sdk-go/aws/request" + "github.com/aws/aws-sdk-go/aws/signer/v4" + "github.com/aws/aws-sdk-go/private/protocol/query" +) + +// STS provides the API operation methods for making requests to +// AWS Security Token Service. See this package's package overview docs +// for details on the service. +// +// STS methods are safe to use concurrently. It is not safe to +// modify mutate any of the struct's properties though. +type STS struct { + *client.Client +} + +// Used for custom client initialization logic +var initClient func(*client.Client) + +// Used for custom request initialization logic +var initRequest func(*request.Request) + +// Service information constants +const ( + ServiceName = "sts" // Service endpoint prefix API calls made to. + EndpointsID = ServiceName // Service ID for Regions and Endpoints metadata. +) + +// New creates a new instance of the STS client with a session. +// If additional configuration is needed for the client instance use the optional +// aws.Config parameter to add your extra config. +// +// Example: +// // Create a STS client from just a session. +// svc := sts.New(mySession) +// +// // Create a STS client with additional configuration +// svc := sts.New(mySession, aws.NewConfig().WithRegion("us-west-2")) +func New(p client.ConfigProvider, cfgs ...*aws.Config) *STS { + c := p.ClientConfig(EndpointsID, cfgs...) + return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion, c.SigningName) +} + +// newClient creates, initializes and returns a new service client instance. +func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion, signingName string) *STS { + svc := &STS{ + Client: client.New( + cfg, + metadata.ClientInfo{ + ServiceName: ServiceName, + SigningName: signingName, + SigningRegion: signingRegion, + Endpoint: endpoint, + APIVersion: "2011-06-15", + }, + handlers, + ), + } + + // Handlers + svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) + svc.Handlers.Build.PushBackNamed(query.BuildHandler) + svc.Handlers.Unmarshal.PushBackNamed(query.UnmarshalHandler) + svc.Handlers.UnmarshalMeta.PushBackNamed(query.UnmarshalMetaHandler) + svc.Handlers.UnmarshalError.PushBackNamed(query.UnmarshalErrorHandler) + + // Run custom client initialization if present + if initClient != nil { + initClient(svc.Client) + } + + return svc +} + +// newRequest creates a new request for a STS operation and runs any +// custom request initialization. +func (c *STS) newRequest(op *request.Operation, params, data interface{}) *request.Request { + req := c.NewRequest(op, params, data) + + // Run custom request initialization if present + if initRequest != nil { + initRequest(req) + } + + return req +} diff --git a/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/go-ini/ini/.gitignore new file mode 100644 index 0000000..c5203bf --- /dev/null +++ b/vendor/github.com/go-ini/ini/.gitignore @@ -0,0 +1,5 @@ +testdata/conf_out.ini +ini.sublime-project +ini.sublime-workspace +testdata/conf_reflect.ini +.idea diff --git a/vendor/github.com/go-ini/ini/.travis.yml b/vendor/github.com/go-ini/ini/.travis.yml new file mode 100644 index 0000000..0064ba1 --- /dev/null +++ b/vendor/github.com/go-ini/ini/.travis.yml @@ -0,0 +1,16 @@ +sudo: false +language: go + +go: + - 1.4 + - 1.5 + - 1.6 + - tip + +script: + - go get -v github.com/smartystreets/goconvey + - go test -v -cover -race + +notifications: + email: + - u@gogs.io diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE new file mode 100644 index 0000000..37ec93a --- /dev/null +++ b/vendor/github.com/go-ini/ini/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile new file mode 100644 index 0000000..ac034e5 --- /dev/null +++ b/vendor/github.com/go-ini/ini/Makefile @@ -0,0 +1,12 @@ +.PHONY: build test bench vet + +build: vet bench + +test: + go test -v -cover -race + +bench: + go test -v -cover -race -test.bench=. -test.benchmem + +vet: + go vet diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md new file mode 100644 index 0000000..8539af1 --- /dev/null +++ b/vendor/github.com/go-ini/ini/README.md @@ -0,0 +1,740 @@ +INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) +=== + +![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) + +Package ini provides INI file read and write functionality in Go. + +[简体中文](README_ZH.md) + +## Feature + +- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites. +- Read with recursion values. +- Read with parent-child sections. +- Read with auto-increment key names. +- Read with multiple-line values. +- Read with tons of helper methods. +- Read and convert values to Go types. +- Read and **WRITE** comments of sections and keys. +- Manipulate sections, keys and comments with ease. +- Keep sections and keys in order as you parse and save. + +## Installation + +To use a tagged revision: + + go get gopkg.in/ini.v1 + +To use with latest changes: + + go get github.com/go-ini/ini + +Please add `-u` flag to update in the future. + +### Testing + +If you want to test on your machine, please apply `-t` flag: + + go get -t gopkg.in/ini.v1 + +Please add `-u` flag to update in the future. + +## Getting Started + +### Loading from data sources + +A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error. + +```go +cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) +``` + +Or start with an empty object: + +```go +cfg := ini.Empty() +``` + +When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later. + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error. + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual. + +#### Ignore cases of key name + +When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing. + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 and sec2 are the exactly same section object +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 and key2 are the exactly same key object +key1, err := cfg.GetKey("Key") +key2, err := cfg.GetKey("KeY") +``` + +#### MySQL-like boolean key + +MySQL's configuration allows a key without value as follows: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read. + +To generate such keys in your program, you could use `NewBooleanKey`: + +```go +key, err := sec.NewBooleanKey("skip-host-cache") +``` + +#### Comment + +Take care that following format will be treated as comment: + +1. Line begins with `#` or `;` +2. Words after `#` or `;` +3. Words after section name (i.e words after `[some section name]`) + +If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```. + +### Working with sections + +To get a section, you would need to: + +```go +section, err := cfg.GetSection("section name") +``` + +For a shortcut for default section, just give an empty string as name: + +```go +section, err := cfg.GetSection("") +``` + +When you're pretty sure the section exists, following code could make your life easier: + +```go +section := cfg.Section("section name") +``` + +What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. + +To create a new section: + +```go +err := cfg.NewSection("new section") +``` + +To get a list of sections or section names: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### Working with keys + +To get a key under a section: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +Same rule applies to key operations: + +```go +key := cfg.Section("").Key("key name") +``` + +To check if a key exists: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +To create a new key: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +To get a list of keys or key names: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +To get a clone hash of keys and corresponding values: + +```go +hash := cfg.Section("").KeysHash() +``` + +### Working with values + +To get a string value: + +```go +val := cfg.Section("").Key("key name").String() +``` + +To validate key value on the fly: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +To check if raw value exists: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +To get value with types: + +```go +// For boolean values: +// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// Methods start with Must also accept one argument for default value +// when key not found or fail to parse value to given type. +// Except method MustString, which you have to pass a default value. + +v = cfg.Section("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +What if my value is three-line long? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +Not a problem! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +That's cool, how about continuation lines? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +Piece of cake! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +Well, I hate continuation lines, how do I disable that? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +Holy crap! + +Note that single quotes around values will be stripped: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +That's all? Hmm, no. + +#### Helper methods of working with values + +To get value with given candidates: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. + +To validate value in a given range: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### Auto-split values into a slice + +To use zero value of type for invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +To exclude invalid values out of result slice: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +Or to return nothing but error when have invalid inputs: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### Save your configuration + +Finally, it's time to save your configuration to somewhere. + +A typical way to save configuration is writing it to a file: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +Another way to save is writing to a `io.Writer` interface: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +By default, spaces are used to align "=" sign between key and values, to disable that: + +```go +ini.PrettyFormat = false +``` + +## Advanced Usage + +### Recursive Values + +For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### Parent-child Sections + +You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### Retrieve parent keys available to a child section + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + +### Unparseable Sections + +Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`: + +```go +cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +<1> This slide has the fuel listed in the wrong units `)) + +body := cfg.Section("COMMENTS").Body() + +/* --- start --- +<1> This slide has the fuel listed in the wrong units +------ end --- */ +``` + +### Auto-increment Key Names + +If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### Map To Struct + +Want more objective way to play with INI? Cool. + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // Things can be simpler. + err = ini.MapTo(p, "path/to/ini") + // ... + + // Just map a section? Fine. + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +Can I have default value for field? Absolutely. + +Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +It's really cool, but what's the point if you can't give me my file back from struct? + +### Reflect From Struct + +Why not? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +So, what do I get? + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +places = HangZhou,Boston +``` + +#### Name Mapper + +To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. + +There are 2 built-in name mappers: + +- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. +- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. + +To use them: + +```go +type Info struct { + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. + +#### Value Mapper + +To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`. + +#### Other Notes On Map/Reflect + +Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +Example configuration: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## Getting Help + +- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) +- [File An Issue](https://github.com/go-ini/ini/issues/new) + +## FAQs + +### What does `BlockMode` field do? + +By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. + +### Why another INI library? + +Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. + +To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) + +## License + +This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md new file mode 100644 index 0000000..163432d --- /dev/null +++ b/vendor/github.com/go-ini/ini/README_ZH.md @@ -0,0 +1,727 @@ +本包提供了 Go 语言中读写 INI 文件的功能。 + +## 功能特性 + +- 支持覆盖加载多个数据源(`[]byte`、文件和 `io.ReadCloser`) +- 支持递归读取键值 +- 支持读取父子分区 +- 支持读取自增键名 +- 支持读取多行的键值 +- 支持大量辅助方法 +- 支持在读取时直接转换为 Go 语言类型 +- 支持读取和 **写入** 分区和键的注释 +- 轻松操作分区、键值和注释 +- 在保存文件时分区和键值会保持原有的顺序 + +## 下载安装 + +使用一个特定版本: + + go get gopkg.in/ini.v1 + +使用最新版: + + go get github.com/go-ini/ini + +如需更新请添加 `-u` 选项。 + +### 测试安装 + +如果您想要在自己的机器上运行测试,请使用 `-t` 标记: + + go get -t gopkg.in/ini.v1 + +如需更新请添加 `-u` 选项。 + +## 开始使用 + +### 从数据源加载 + +一个 **数据源** 可以是 `[]byte` 类型的原始数据,`string` 类型的文件路径或 `io.ReadCloser`。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 + +```go +cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) +``` + +或者从一个空白的文件开始: + +```go +cfg := ini.Empty() +``` + +当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。 + +```go +err := cfg.Append("other file", []byte("other raw data")) +``` + +当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误): + +```go +cfg, err := ini.LooseLoad("filename", "filename_404") +``` + +更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。 + +#### 忽略键名的大小写 + +有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写: + +```go +cfg, err := ini.InsensitiveLoad("filename") +//... + +// sec1 和 sec2 指向同一个分区对象 +sec1, err := cfg.GetSection("Section") +sec2, err := cfg.GetSection("SecTIOn") + +// key1 和 key2 指向同一个键对象 +key1, err := cfg.GetKey("Key") +key2, err := cfg.GetKey("KeY") +``` + +#### 类似 MySQL 配置中的布尔值键 + +MySQL 的配置文件中会出现没有具体值的布尔类型的键: + +```ini +[mysqld] +... +skip-host-cache +skip-name-resolve +``` + +默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理: + +```go +cfg, err := LoadSources(LoadOptions{AllowBooleanKeys: true}, "my.cnf")) +``` + +这些键的值永远为 `true`,且在保存到文件时也只会输出键名。 + +如果您想要通过程序来生成此类键,则可以使用 `NewBooleanKey`: + +```go +key, err := sec.NewBooleanKey("skip-host-cache") +``` + +#### 关于注释 + +下述几种情况的内容将被视为注释: + +1. 所有以 `#` 或 `;` 开头的行 +2. 所有在 `#` 或 `;` 之后的内容 +3. 分区标签后的文字 (即 `[分区名]` 之后的内容) + +如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。 + +### 操作分区(Section) + +获取指定分区: + +```go +section, err := cfg.GetSection("section name") +``` + +如果您想要获取默认分区,则可以用空字符串代替分区名: + +```go +section, err := cfg.GetSection("") +``` + +当您非常确定某个分区是存在的,可以使用以下简便方法: + +```go +section := cfg.Section("section name") +``` + +如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 + +创建一个分区: + +```go +err := cfg.NewSection("new section") +``` + +获取所有分区对象或名称: + +```go +sections := cfg.Sections() +names := cfg.SectionStrings() +``` + +### 操作键(Key) + +获取某个分区下的键: + +```go +key, err := cfg.Section("").GetKey("key name") +``` + +和分区一样,您也可以直接获取键而忽略错误处理: + +```go +key := cfg.Section("").Key("key name") +``` + +判断某个键是否存在: + +```go +yes := cfg.Section("").HasKey("key name") +``` + +创建一个新的键: + +```go +err := cfg.Section("").NewKey("name", "value") +``` + +获取分区下的所有键或键名: + +```go +keys := cfg.Section("").Keys() +names := cfg.Section("").KeyStrings() +``` + +获取分区下的所有键值对的克隆: + +```go +hash := cfg.Section("").KeysHash() +``` + +### 操作键值(Value) + +获取一个类型为字符串(string)的值: + +```go +val := cfg.Section("").Key("key name").String() +``` + +获取值的同时通过自定义函数进行处理验证: + +```go +val := cfg.Section("").Key("key name").Validate(func(in string) string { + if len(in) == 0 { + return "default" + } + return in +}) +``` + +如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳): + +```go +val := cfg.Section("").Key("key name").Value() +``` + +判断某个原值是否存在: + +```go +yes := cfg.Section("").HasValue("test value") +``` + +获取其它类型的值: + +```go +// 布尔值的规则: +// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On +// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off +v, err = cfg.Section("").Key("BOOL").Bool() +v, err = cfg.Section("").Key("FLOAT64").Float64() +v, err = cfg.Section("").Key("INT").Int() +v, err = cfg.Section("").Key("INT64").Int64() +v, err = cfg.Section("").Key("UINT").Uint() +v, err = cfg.Section("").Key("UINT64").Uint64() +v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) +v, err = cfg.Section("").Key("TIME").Time() // RFC3339 + +v = cfg.Section("").Key("BOOL").MustBool() +v = cfg.Section("").Key("FLOAT64").MustFloat64() +v = cfg.Section("").Key("INT").MustInt() +v = cfg.Section("").Key("INT64").MustInt64() +v = cfg.Section("").Key("UINT").MustUint() +v = cfg.Section("").Key("UINT64").MustUint64() +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) +v = cfg.Section("").Key("TIME").MustTime() // RFC3339 + +// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值, +// 当键不存在或者转换失败时,则会直接返回该默认值。 +// 但是,MustString 方法必须传递一个默认值。 + +v = cfg.Seciont("").Key("String").MustString("default") +v = cfg.Section("").Key("BOOL").MustBool(true) +v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) +v = cfg.Section("").Key("INT").MustInt(10) +v = cfg.Section("").Key("INT64").MustInt64(99) +v = cfg.Section("").Key("UINT").MustUint(3) +v = cfg.Section("").Key("UINT64").MustUint64(6) +v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) +v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 +``` + +如果我的值有好多行怎么办? + +```ini +[advance] +ADDRESS = """404 road, +NotFound, State, 5000 +Earth""" +``` + +嗯哼?小 case! + +```go +cfg.Section("advance").Key("ADDRESS").String() + +/* --- start --- +404 road, +NotFound, State, 5000 +Earth +------ end --- */ +``` + +赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办? + +```ini +[advance] +two_lines = how about \ + continuation lines? +lots_of_lines = 1 \ + 2 \ + 3 \ + 4 +``` + +简直是小菜一碟! + +```go +cfg.Section("advance").Key("two_lines").String() // how about continuation lines? +cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 +``` + +可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢? + +```go +cfg, err := ini.LoadSources(ini.LoadOptions{ + IgnoreContinuation: true, +}, "filename") +``` + +哇靠给力啊! + +需要注意的是,值两侧的单引号会被自动剔除: + +```ini +foo = "some value" // foo: some value +bar = 'some value' // bar: some value +``` + +这就是全部了?哈哈,当然不是。 + +#### 操作键值的辅助方法 + +获取键值时设定候选值: + +```go +v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) +v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) +v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) +v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) +v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) +v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) +v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) +v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 +``` + +如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。 + +验证获取的值是否在指定范围内: + +```go +vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) +vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) +vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) +vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) +vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) +vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) +vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 +``` + +##### 自动分割键值到切片(slice) + +当存在无效输入时,使用零值代替: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] +vals = cfg.Section("").Key("STRINGS").Strings(",") +vals = cfg.Section("").Key("FLOAT64S").Float64s(",") +vals = cfg.Section("").Key("INTS").Ints(",") +vals = cfg.Section("").Key("INT64S").Int64s(",") +vals = cfg.Section("").Key("UINTS").Uints(",") +vals = cfg.Section("").Key("UINT64S").Uint64s(",") +vals = cfg.Section("").Key("TIMES").Times(",") +``` + +从结果切片中剔除无效输入: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> [2.2] +vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") +vals = cfg.Section("").Key("INTS").ValidInts(",") +vals = cfg.Section("").Key("INT64S").ValidInt64s(",") +vals = cfg.Section("").Key("UINTS").ValidUints(",") +vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") +vals = cfg.Section("").Key("TIMES").ValidTimes(",") +``` + +当存在无效输入时,直接返回错误: + +```go +// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] +// Input: how, 2.2, are, you -> error +vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") +vals = cfg.Section("").Key("INTS").StrictInts(",") +vals = cfg.Section("").Key("INT64S").StrictInt64s(",") +vals = cfg.Section("").Key("UINTS").StrictUints(",") +vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") +vals = cfg.Section("").Key("TIMES").StrictTimes(",") +``` + +### 保存配置 + +终于到了这个时刻,是时候保存一下配置了。 + +比较原始的做法是输出配置到某个文件: + +```go +// ... +err = cfg.SaveTo("my.ini") +err = cfg.SaveToIndent("my.ini", "\t") +``` + +另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中: + +```go +// ... +cfg.WriteTo(writer) +cfg.WriteToIndent(writer, "\t") +``` + +默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能: + +```go +ini.PrettyFormat = false +``` + +## 高级用法 + +### 递归读取键值 + +在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 + +```ini +NAME = ini + +[author] +NAME = Unknwon +GITHUB = https://github.com/%(NAME)s + +[package] +FULL_NAME = github.com/go-ini/%(NAME)s +``` + +```go +cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon +cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini +``` + +### 读取父子分区 + +您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 + +```ini +NAME = ini +VERSION = v1 +IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s + +[package] +CLONE_URL = https://%(IMPORT_PATH)s + +[package.sub] +``` + +```go +cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 +``` + +#### 获取上级父分区下的所有键名 + +```go +cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] +``` + +### 无法解析的分区 + +如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理: + +```go +cfg, err := LoadSources(LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] +<1> This slide has the fuel listed in the wrong units `)) + +body := cfg.Section("COMMENTS").Body() + +/* --- start --- +<1> This slide has the fuel listed in the wrong units +------ end --- */ +``` + +### 读取自增键名 + +如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 + +```ini +[features] +-: Support read/write comments of keys and sections +-: Support auto-increment of key names +-: Support load multiple files to overwrite key values +``` + +```go +cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} +``` + +### 映射到结构 + +想要使用更加面向对象的方式玩转 INI 吗?好主意。 + +```ini +Name = Unknwon +age = 21 +Male = true +Born = 1993-01-01T20:17:05Z + +[Note] +Content = Hi is a good man! +Cities = HangZhou, Boston +``` + +```go +type Note struct { + Content string + Cities []string +} + +type Person struct { + Name string + Age int `ini:"age"` + Male bool + Born time.Time + Note + Created time.Time `ini:"-"` +} + +func main() { + cfg, err := ini.Load("path/to/ini") + // ... + p := new(Person) + err = cfg.MapTo(p) + // ... + + // 一切竟可以如此的简单。 + err = ini.MapTo(p, "path/to/ini") + // ... + + // 嗯哼?只需要映射一个分区吗? + n := new(Note) + err = cfg.Section("Note").MapTo(n) + // ... +} +``` + +结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。 + +```go +// ... +p := &Person{ + Name: "Joe", +} +// ... +``` + +这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用? + +### 从结构反射 + +可是,我有说不能吗? + +```go +type Embeded struct { + Dates []time.Time `delim:"|"` + Places []string `ini:"places,omitempty"` + None []int `ini:",omitempty"` +} + +type Author struct { + Name string `ini:"NAME"` + Male bool + Age int + GPA float64 + NeverMind string `ini:"-"` + *Embeded +} + +func main() { + a := &Author{"Unknwon", true, 21, 2.8, "", + &Embeded{ + []time.Time{time.Now(), time.Now()}, + []string{"HangZhou", "Boston"}, + []int{}, + }} + cfg := ini.Empty() + err = ini.ReflectFrom(cfg, a) + // ... +} +``` + +瞧瞧,奇迹发生了。 + +```ini +NAME = Unknwon +Male = true +Age = 21 +GPA = 2.8 + +[Embeded] +Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 +places = HangZhou,Boston +``` + +#### 名称映射器(Name Mapper) + +为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。 + +目前有 2 款内置的映射器: + +- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。 +- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。 + +使用方法: + +```go +type Info struct{ + PackageName string +} + +func main() { + err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) + // ... + + cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) + // ... + info := new(Info) + cfg.NameMapper = ini.AllCapsUnderscore + err = cfg.MapTo(info) + // ... +} +``` + +使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 + +#### 值映射器(Value Mapper) + +值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量: + +```go +type Env struct { + Foo string `ini:"foo"` +} + +func main() { + cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") + cfg.ValueMapper = os.ExpandEnv + // ... + env := &Env{} + err = cfg.Section("env").MapTo(env) +} +``` + +本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。 + +#### 映射/反射的其它说明 + +任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon + +[Child] +Age = 21 +``` + +很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚! + +```go +type Child struct { + Age string +} + +type Parent struct { + Name string + Child `ini:"Parent"` +} + +type Config struct { + City string + Parent +} +``` + +示例配置文件: + +```ini +City = Boston + +[Parent] +Name = Unknwon +Age = 21 +``` + +## 获取帮助 + +- [API 文档](https://gowalker.org/gopkg.in/ini.v1) +- [创建工单](https://github.com/go-ini/ini/issues/new) + +## 常见问题 + +### 字段 `BlockMode` 是什么? + +默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。 + +### 为什么要写另一个 INI 解析库? + +许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。 + +为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了) diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go new file mode 100644 index 0000000..80afe74 --- /dev/null +++ b/vendor/github.com/go-ini/ini/error.go @@ -0,0 +1,32 @@ +// Copyright 2016 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" +) + +type ErrDelimiterNotFound struct { + Line string +} + +func IsErrDelimiterNotFound(err error) bool { + _, ok := err.(ErrDelimiterNotFound) + return ok +} + +func (err ErrDelimiterNotFound) Error() string { + return fmt.Sprintf("key-value delimiter not found: %s", err.Line) +} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go new file mode 100644 index 0000000..c343ded --- /dev/null +++ b/vendor/github.com/go-ini/ini/ini.go @@ -0,0 +1,538 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +// Package ini provides INI file read and write functionality in Go. +package ini + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +const ( + // Name for default section. You can use this constant or the string literal. + // In most of cases, an empty string is all you need to access the section. + DEFAULT_SECTION = "DEFAULT" + + // Maximum allowed depth when recursively substituing variable names. + _DEPTH_VALUES = 99 + _VERSION = "1.24.0" +) + +// Version returns current package version literal. +func Version() string { + return _VERSION +} + +var ( + // Delimiter to determine or compose a new line. + // This variable will be changed to "\r\n" automatically on Windows + // at package init time. + LineBreak = "\n" + + // Variable regexp pattern: %(variable)s + varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) + + // Indicate whether to align "=" sign with spaces to produce pretty output + // or reduce all possible spaces for compact format. + PrettyFormat = true + + // Explicitly write DEFAULT section header + DefaultHeader = false +) + +func init() { + if runtime.GOOS == "windows" { + LineBreak = "\r\n" + } +} + +func inSlice(str string, s []string) bool { + for _, v := range s { + if str == v { + return true + } + } + return false +} + +// dataSource is an interface that returns object which can be read and closed. +type dataSource interface { + ReadCloser() (io.ReadCloser, error) +} + +// sourceFile represents an object that contains content on the local file system. +type sourceFile struct { + name string +} + +func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { + return os.Open(s.name) +} + +type bytesReadCloser struct { + reader io.Reader +} + +func (rc *bytesReadCloser) Read(p []byte) (n int, err error) { + return rc.reader.Read(p) +} + +func (rc *bytesReadCloser) Close() error { + return nil +} + +// sourceData represents an object that contains content in memory. +type sourceData struct { + data []byte +} + +func (s *sourceData) ReadCloser() (io.ReadCloser, error) { + return ioutil.NopCloser(bytes.NewReader(s.data)), nil +} + +// sourceReadCloser represents an input stream with Close method. +type sourceReadCloser struct { + reader io.ReadCloser +} + +func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { + return s.reader, nil +} + +// File represents a combination of a or more INI file(s) in memory. +type File struct { + // Should make things safe, but sometimes doesn't matter. + BlockMode bool + // Make sure data is safe in multiple goroutines. + lock sync.RWMutex + + // Allow combination of multiple data sources. + dataSources []dataSource + // Actual data is stored here. + sections map[string]*Section + + // To keep data in order. + sectionList []string + + options LoadOptions + + NameMapper + ValueMapper +} + +// newFile initializes File object with given data sources. +func newFile(dataSources []dataSource, opts LoadOptions) *File { + return &File{ + BlockMode: true, + dataSources: dataSources, + sections: make(map[string]*Section), + sectionList: make([]string, 0, 10), + options: opts, + } +} + +func parseDataSource(source interface{}) (dataSource, error) { + switch s := source.(type) { + case string: + return sourceFile{s}, nil + case []byte: + return &sourceData{s}, nil + case io.ReadCloser: + return &sourceReadCloser{s}, nil + default: + return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) + } +} + +type LoadOptions struct { + // Loose indicates whether the parser should ignore nonexistent files or return error. + Loose bool + // Insensitive indicates whether the parser forces all section and key names to lowercase. + Insensitive bool + // IgnoreContinuation indicates whether to ignore continuation lines while parsing. + IgnoreContinuation bool + // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. + // This type of keys are mostly used in my.cnf. + AllowBooleanKeys bool + // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise + // conform to key/value pairs. Specify the names of those blocks here. + UnparseableSections []string +} + +func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { + sources := make([]dataSource, len(others)+1) + sources[0], err = parseDataSource(source) + if err != nil { + return nil, err + } + for i := range others { + sources[i+1], err = parseDataSource(others[i]) + if err != nil { + return nil, err + } + } + f := newFile(sources, opts) + if err = f.Reload(); err != nil { + return nil, err + } + return f, nil +} + +// Load loads and parses from INI data sources. +// Arguments can be mixed of file name with string type, or raw data in []byte. +// It will return error if list contains nonexistent files. +func Load(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{}, source, others...) +} + +// LooseLoad has exactly same functionality as Load function +// except it ignores nonexistent files instead of returning error. +func LooseLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Loose: true}, source, others...) +} + +// InsensitiveLoad has exactly same functionality as Load function +// except it forces all section and key names to be lowercased. +func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { + return LoadSources(LoadOptions{Insensitive: true}, source, others...) +} + +// Empty returns an empty file object. +func Empty() *File { + // Ignore error here, we sure our data is good. + f, _ := Load([]byte("")) + return f +} + +// NewSection creates a new section. +func (f *File) NewSection(name string) (*Section, error) { + if len(name) == 0 { + return nil, errors.New("error creating new section: empty section name") + } else if f.options.Insensitive && name != DEFAULT_SECTION { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if inSlice(name, f.sectionList) { + return f.sections[name], nil + } + + f.sectionList = append(f.sectionList, name) + f.sections[name] = newSection(f, name) + return f.sections[name], nil +} + +// NewRawSection creates a new section with an unparseable body. +func (f *File) NewRawSection(name, body string) (*Section, error) { + section, err := f.NewSection(name) + if err != nil { + return nil, err + } + + section.isRawSection = true + section.rawBody = body + return section, nil +} + +// NewSections creates a list of sections. +func (f *File) NewSections(names ...string) (err error) { + for _, name := range names { + if _, err = f.NewSection(name); err != nil { + return err + } + } + return nil +} + +// GetSection returns section by given name. +func (f *File) GetSection(name string) (*Section, error) { + if len(name) == 0 { + name = DEFAULT_SECTION + } else if f.options.Insensitive { + name = strings.ToLower(name) + } + + if f.BlockMode { + f.lock.RLock() + defer f.lock.RUnlock() + } + + sec := f.sections[name] + if sec == nil { + return nil, fmt.Errorf("section '%s' does not exist", name) + } + return sec, nil +} + +// Section assumes named section exists and returns a zero-value when not. +func (f *File) Section(name string) *Section { + sec, err := f.GetSection(name) + if err != nil { + // Note: It's OK here because the only possible error is empty section name, + // but if it's empty, this piece of code won't be executed. + sec, _ = f.NewSection(name) + return sec + } + return sec +} + +// Section returns list of Section. +func (f *File) Sections() []*Section { + sections := make([]*Section, len(f.sectionList)) + for i := range f.sectionList { + sections[i] = f.Section(f.sectionList[i]) + } + return sections +} + +// SectionStrings returns list of section names. +func (f *File) SectionStrings() []string { + list := make([]string, len(f.sectionList)) + copy(list, f.sectionList) + return list +} + +// DeleteSection deletes a section. +func (f *File) DeleteSection(name string) { + if f.BlockMode { + f.lock.Lock() + defer f.lock.Unlock() + } + + if len(name) == 0 { + name = DEFAULT_SECTION + } + + for i, s := range f.sectionList { + if s == name { + f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) + delete(f.sections, name) + return + } + } +} + +func (f *File) reload(s dataSource) error { + r, err := s.ReadCloser() + if err != nil { + return err + } + defer r.Close() + + return f.parse(r) +} + +// Reload reloads and parses all data sources. +func (f *File) Reload() (err error) { + for _, s := range f.dataSources { + if err = f.reload(s); err != nil { + // In loose mode, we create an empty default section for nonexistent files. + if os.IsNotExist(err) && f.options.Loose { + f.parse(bytes.NewBuffer(nil)) + continue + } + return err + } + } + return nil +} + +// Append appends one or more data sources and reloads automatically. +func (f *File) Append(source interface{}, others ...interface{}) error { + ds, err := parseDataSource(source) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + for _, s := range others { + ds, err = parseDataSource(s) + if err != nil { + return err + } + f.dataSources = append(f.dataSources, ds) + } + return f.Reload() +} + +// WriteToIndent writes content into io.Writer with given indention. +// If PrettyFormat has been set to be true, +// it will align "=" sign with spaces under each section. +func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) { + equalSign := "=" + if PrettyFormat { + equalSign = " = " + } + + // Use buffer to make sure target is safe until finish encoding. + buf := bytes.NewBuffer(nil) + for i, sname := range f.sectionList { + sec := f.Section(sname) + if len(sec.Comment) > 0 { + if sec.Comment[0] != '#' && sec.Comment[0] != ';' { + sec.Comment = "; " + sec.Comment + } + if _, err = buf.WriteString(sec.Comment + LineBreak); err != nil { + return 0, err + } + } + + if i > 0 || DefaultHeader { + if _, err = buf.WriteString("[" + sname + "]" + LineBreak); err != nil { + return 0, err + } + } else { + // Write nothing if default section is empty + if len(sec.keyList) == 0 { + continue + } + } + + if sec.isRawSection { + if _, err = buf.WriteString(sec.rawBody); err != nil { + return 0, err + } + continue + } + + // Count and generate alignment length and buffer spaces using the + // longest key. Keys may be modifed if they contain certain characters so + // we need to take that into account in our calculation. + alignLength := 0 + if PrettyFormat { + for _, kname := range sec.keyList { + keyLength := len(kname) + // First case will surround key by ` and second by """ + if strings.ContainsAny(kname, "\"=:") { + keyLength += 2 + } else if strings.Contains(kname, "`") { + keyLength += 6 + } + + if keyLength > alignLength { + alignLength = keyLength + } + } + } + alignSpaces := bytes.Repeat([]byte(" "), alignLength) + + for _, kname := range sec.keyList { + key := sec.Key(kname) + if len(key.Comment) > 0 { + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + if key.Comment[0] != '#' && key.Comment[0] != ';' { + key.Comment = "; " + key.Comment + } + if _, err = buf.WriteString(key.Comment + LineBreak); err != nil { + return 0, err + } + } + + if len(indent) > 0 && sname != DEFAULT_SECTION { + buf.WriteString(indent) + } + + switch { + case key.isAutoIncrement: + kname = "-" + case strings.ContainsAny(kname, "\"=:"): + kname = "`" + kname + "`" + case strings.Contains(kname, "`"): + kname = `"""` + kname + `"""` + } + if _, err = buf.WriteString(kname); err != nil { + return 0, err + } + + if key.isBooleanType { + if kname != sec.keyList[len(sec.keyList)-1] { + buf.WriteString(LineBreak) + } + continue + } + + // Write out alignment spaces before "=" sign + if PrettyFormat { + buf.Write(alignSpaces[:alignLength-len(kname)]) + } + + val := key.value + // In case key value contains "\n", "`", "\"", "#" or ";" + if strings.ContainsAny(val, "\n`") { + val = `"""` + val + `"""` + } else if strings.ContainsAny(val, "#;") { + val = "`" + val + "`" + } + if _, err = buf.WriteString(equalSign + val + LineBreak); err != nil { + return 0, err + } + } + + // Put a line between sections + if _, err = buf.WriteString(LineBreak); err != nil { + return 0, err + } + } + + return buf.WriteTo(w) +} + +// WriteTo writes file content into io.Writer. +func (f *File) WriteTo(w io.Writer) (int64, error) { + return f.WriteToIndent(w, "") +} + +// SaveToIndent writes content to file system with given value indention. +func (f *File) SaveToIndent(filename, indent string) error { + // Note: Because we are truncating with os.Create, + // so it's safer to save to a temporary file location and rename afte done. + tmpPath := filename + "." + strconv.Itoa(time.Now().Nanosecond()) + ".tmp" + defer os.Remove(tmpPath) + + fw, err := os.Create(tmpPath) + if err != nil { + return err + } + + if _, err = f.WriteToIndent(fw, indent); err != nil { + fw.Close() + return err + } + fw.Close() + + // Remove old file and rename the new one. + os.Remove(filename) + return os.Rename(tmpPath, filename) +} + +// SaveTo writes content to file system. +func (f *File) SaveTo(filename string) error { + return f.SaveToIndent(filename, "") +} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go new file mode 100644 index 0000000..9738c55 --- /dev/null +++ b/vendor/github.com/go-ini/ini/key.go @@ -0,0 +1,633 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "fmt" + "strconv" + "strings" + "time" +) + +// Key represents a key under a section. +type Key struct { + s *Section + name string + value string + isAutoIncrement bool + isBooleanType bool + + Comment string +} + +// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv +type ValueMapper func(string) string + +// Name returns name of key. +func (k *Key) Name() string { + return k.name +} + +// Value returns raw value of key for performance purpose. +func (k *Key) Value() string { + return k.value +} + +// String returns string representation of value. +func (k *Key) String() string { + val := k.value + if k.s.f.ValueMapper != nil { + val = k.s.f.ValueMapper(val) + } + if strings.Index(val, "%") == -1 { + return val + } + + for i := 0; i < _DEPTH_VALUES; i++ { + vr := varPattern.FindString(val) + if len(vr) == 0 { + break + } + + // Take off leading '%(' and trailing ')s'. + noption := strings.TrimLeft(vr, "%(") + noption = strings.TrimRight(noption, ")s") + + // Search in the same section. + nk, err := k.s.GetKey(noption) + if err != nil { + // Search again in default section. + nk, _ = k.s.f.Section("").GetKey(noption) + } + + // Substitute by new value and take off leading '%(' and trailing ')s'. + val = strings.Replace(val, vr, nk.value, -1) + } + return val +} + +// Validate accepts a validate function which can +// return modifed result as key value. +func (k *Key) Validate(fn func(string) string) string { + return fn(k.String()) +} + +// parseBool returns the boolean value represented by the string. +// +// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, +// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. +// Any other value returns an error. +func parseBool(str string) (value bool, err error) { + switch str { + case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": + return true, nil + case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": + return false, nil + } + return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) +} + +// Bool returns bool type value. +func (k *Key) Bool() (bool, error) { + return parseBool(k.String()) +} + +// Float64 returns float64 type value. +func (k *Key) Float64() (float64, error) { + return strconv.ParseFloat(k.String(), 64) +} + +// Int returns int type value. +func (k *Key) Int() (int, error) { + return strconv.Atoi(k.String()) +} + +// Int64 returns int64 type value. +func (k *Key) Int64() (int64, error) { + return strconv.ParseInt(k.String(), 10, 64) +} + +// Uint returns uint type valued. +func (k *Key) Uint() (uint, error) { + u, e := strconv.ParseUint(k.String(), 10, 64) + return uint(u), e +} + +// Uint64 returns uint64 type value. +func (k *Key) Uint64() (uint64, error) { + return strconv.ParseUint(k.String(), 10, 64) +} + +// Duration returns time.Duration type value. +func (k *Key) Duration() (time.Duration, error) { + return time.ParseDuration(k.String()) +} + +// TimeFormat parses with given format and returns time.Time type value. +func (k *Key) TimeFormat(format string) (time.Time, error) { + return time.Parse(format, k.String()) +} + +// Time parses with RFC3339 format and returns time.Time type value. +func (k *Key) Time() (time.Time, error) { + return k.TimeFormat(time.RFC3339) +} + +// MustString returns default value if key value is empty. +func (k *Key) MustString(defaultVal string) string { + val := k.String() + if len(val) == 0 { + k.value = defaultVal + return defaultVal + } + return val +} + +// MustBool always returns value without error, +// it returns false if error occurs. +func (k *Key) MustBool(defaultVal ...bool) bool { + val, err := k.Bool() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatBool(defaultVal[0]) + return defaultVal[0] + } + return val +} + +// MustFloat64 always returns value without error, +// it returns 0.0 if error occurs. +func (k *Key) MustFloat64(defaultVal ...float64) float64 { + val, err := k.Float64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) + return defaultVal[0] + } + return val +} + +// MustInt always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt(defaultVal ...int) int { + val, err := k.Int() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(int64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustInt64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustInt64(defaultVal ...int64) int64 { + val, err := k.Int64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatInt(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustUint always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint(defaultVal ...uint) uint { + val, err := k.Uint() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) + return defaultVal[0] + } + return val +} + +// MustUint64 always returns value without error, +// it returns 0 if error occurs. +func (k *Key) MustUint64(defaultVal ...uint64) uint64 { + val, err := k.Uint64() + if len(defaultVal) > 0 && err != nil { + k.value = strconv.FormatUint(defaultVal[0], 10) + return defaultVal[0] + } + return val +} + +// MustDuration always returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { + val, err := k.Duration() + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].String() + return defaultVal[0] + } + return val +} + +// MustTimeFormat always parses with given format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { + val, err := k.TimeFormat(format) + if len(defaultVal) > 0 && err != nil { + k.value = defaultVal[0].Format(format) + return defaultVal[0] + } + return val +} + +// MustTime always parses with RFC3339 format and returns value without error, +// it returns zero value if error occurs. +func (k *Key) MustTime(defaultVal ...time.Time) time.Time { + return k.MustTimeFormat(time.RFC3339, defaultVal...) +} + +// In always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) In(defaultVal string, candidates []string) string { + val := k.String() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InFloat64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { + val := k.MustFloat64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt(defaultVal int, candidates []int) int { + val := k.MustInt() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InInt64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { + val := k.MustInt64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint(defaultVal uint, candidates []uint) uint { + val := k.MustUint() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InUint64 always returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { + val := k.MustUint64() + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTimeFormat always parses with given format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { + val := k.MustTimeFormat(format) + for _, cand := range candidates { + if val == cand { + return val + } + } + return defaultVal +} + +// InTime always parses with RFC3339 format and returns value without error, +// it returns default value if error occurs or doesn't fit into candidates. +func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { + return k.InTimeFormat(time.RFC3339, defaultVal, candidates) +} + +// RangeFloat64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { + val := k.MustFloat64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt(defaultVal, min, max int) int { + val := k.MustInt() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeInt64 checks if value is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { + val := k.MustInt64() + if val < min || val > max { + return defaultVal + } + return val +} + +// RangeTimeFormat checks if value with given format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { + val := k.MustTimeFormat(format) + if val.Unix() < min.Unix() || val.Unix() > max.Unix() { + return defaultVal + } + return val +} + +// RangeTime checks if value with RFC3339 format is in given range inclusively, +// and returns default value if it's not. +func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { + return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) +} + +// Strings returns list of string divided by given delimiter. +func (k *Key) Strings(delim string) []string { + str := k.String() + if len(str) == 0 { + return []string{} + } + + vals := strings.Split(str, delim) + for i := range vals { + vals[i] = strings.TrimSpace(vals[i]) + } + return vals +} + +// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Float64s(delim string) []float64 { + vals, _ := k.getFloat64s(delim, true, false) + return vals +} + +// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Ints(delim string) []int { + vals, _ := k.getInts(delim, true, false) + return vals +} + +// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Int64s(delim string) []int64 { + vals, _ := k.getInt64s(delim, true, false) + return vals +} + +// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uints(delim string) []uint { + vals, _ := k.getUints(delim, true, false) + return vals +} + +// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. +func (k *Key) Uint64s(delim string) []uint64 { + vals, _ := k.getUint64s(delim, true, false) + return vals +} + +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) TimesFormat(format, delim string) []time.Time { + vals, _ := k.getTimesFormat(format, delim, true, false) + return vals +} + +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. +// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). +func (k *Key) Times(delim string) []time.Time { + return k.TimesFormat(time.RFC3339, delim) +} + +// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then +// it will not be included to result list. +func (k *Key) ValidFloat64s(delim string) []float64 { + vals, _ := k.getFloat64s(delim, false, false) + return vals +} + +// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will +// not be included to result list. +func (k *Key) ValidInts(delim string) []int { + vals, _ := k.getInts(delim, false, false) + return vals +} + +// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, +// then it will not be included to result list. +func (k *Key) ValidInt64s(delim string) []int64 { + vals, _ := k.getInt64s(delim, false, false) + return vals +} + +// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, +// then it will not be included to result list. +func (k *Key) ValidUints(delim string) []uint { + vals, _ := k.getUints(delim, false, false) + return vals +} + +// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned +// integer, then it will not be included to result list. +func (k *Key) ValidUint64s(delim string) []uint64 { + vals, _ := k.getUint64s(delim, false, false) + return vals +} + +// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimesFormat(format, delim string) []time.Time { + vals, _ := k.getTimesFormat(format, delim, false, false) + return vals +} + +// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. +func (k *Key) ValidTimes(delim string) []time.Time { + return k.ValidTimesFormat(time.RFC3339, delim) +} + +// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictFloat64s(delim string) ([]float64, error) { + return k.getFloat64s(delim, false, true) +} + +// StrictInts returns list of int divided by given delimiter or error on first invalid input. +func (k *Key) StrictInts(delim string) ([]int, error) { + return k.getInts(delim, false, true) +} + +// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictInt64s(delim string) ([]int64, error) { + return k.getInt64s(delim, false, true) +} + +// StrictUints returns list of uint divided by given delimiter or error on first invalid input. +func (k *Key) StrictUints(delim string) ([]uint, error) { + return k.getUints(delim, false, true) +} + +// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. +func (k *Key) StrictUint64s(delim string) ([]uint64, error) { + return k.getUint64s(delim, false, true) +} + +// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { + return k.getTimesFormat(format, delim, false, true) +} + +// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter +// or error on first invalid input. +func (k *Key) StrictTimes(delim string) ([]time.Time, error) { + return k.StrictTimesFormat(time.RFC3339, delim) +} + +// getFloat64s returns list of float64 divided by given delimiter. +func (k *Key) getFloat64s(delim string, addInvalid, returnOnInvalid bool) ([]float64, error) { + strs := k.Strings(delim) + vals := make([]float64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseFloat(str, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getInts returns list of int divided by given delimiter. +func (k *Key) getInts(delim string, addInvalid, returnOnInvalid bool) ([]int, error) { + strs := k.Strings(delim) + vals := make([]int, 0, len(strs)) + for _, str := range strs { + val, err := strconv.Atoi(str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getInt64s returns list of int64 divided by given delimiter. +func (k *Key) getInt64s(delim string, addInvalid, returnOnInvalid bool) ([]int64, error) { + strs := k.Strings(delim) + vals := make([]int64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseInt(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getUints returns list of uint divided by given delimiter. +func (k *Key) getUints(delim string, addInvalid, returnOnInvalid bool) ([]uint, error) { + strs := k.Strings(delim) + vals := make([]uint, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 0) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, uint(val)) + } + } + return vals, nil +} + +// getUint64s returns list of uint64 divided by given delimiter. +func (k *Key) getUint64s(delim string, addInvalid, returnOnInvalid bool) ([]uint64, error) { + strs := k.Strings(delim) + vals := make([]uint64, 0, len(strs)) + for _, str := range strs { + val, err := strconv.ParseUint(str, 10, 64) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// getTimesFormat parses with given format and returns list of time.Time divided by given delimiter. +func (k *Key) getTimesFormat(format, delim string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { + strs := k.Strings(delim) + vals := make([]time.Time, 0, len(strs)) + for _, str := range strs { + val, err := time.Parse(format, str) + if err != nil && returnOnInvalid { + return nil, err + } + if err == nil || addInvalid { + vals = append(vals, val) + } + } + return vals, nil +} + +// SetValue changes key value. +func (k *Key) SetValue(v string) { + if k.s.f.BlockMode { + k.s.f.lock.Lock() + defer k.s.f.lock.Unlock() + } + + k.value = v + k.s.keysHash[k.name] = v +} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go new file mode 100644 index 0000000..01ff20a --- /dev/null +++ b/vendor/github.com/go-ini/ini/parser.go @@ -0,0 +1,359 @@ +// Copyright 2015 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strconv" + "strings" + "unicode" +) + +type tokenType int + +const ( + _TOKEN_INVALID tokenType = iota + _TOKEN_COMMENT + _TOKEN_SECTION + _TOKEN_KEY +) + +type parser struct { + buf *bufio.Reader + isEOF bool + count int + comment *bytes.Buffer +} + +func newParser(r io.Reader) *parser { + return &parser{ + buf: bufio.NewReader(r), + count: 1, + comment: &bytes.Buffer{}, + } +} + +// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. +// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding +func (p *parser) BOM() error { + mask, err := p.buf.Peek(2) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 2 { + return nil + } + + switch { + case mask[0] == 254 && mask[1] == 255: + fallthrough + case mask[0] == 255 && mask[1] == 254: + p.buf.Read(mask) + case mask[0] == 239 && mask[1] == 187: + mask, err := p.buf.Peek(3) + if err != nil && err != io.EOF { + return err + } else if len(mask) < 3 { + return nil + } + if mask[2] == 191 { + p.buf.Read(mask) + } + } + return nil +} + +func (p *parser) readUntil(delim byte) ([]byte, error) { + data, err := p.buf.ReadBytes(delim) + if err != nil { + if err == io.EOF { + p.isEOF = true + } else { + return nil, err + } + } + return data, nil +} + +func cleanComment(in []byte) ([]byte, bool) { + i := bytes.IndexAny(in, "#;") + if i == -1 { + return nil, false + } + return in[i:], true +} + +func readKeyName(in []byte) (string, int, error) { + line := string(in) + + // Check if key name surrounded by quotes. + var keyQuote string + if line[0] == '"' { + if len(line) > 6 && string(line[0:3]) == `"""` { + keyQuote = `"""` + } else { + keyQuote = `"` + } + } else if line[0] == '`' { + keyQuote = "`" + } + + // Get out key name + endIdx := -1 + if len(keyQuote) > 0 { + startIdx := len(keyQuote) + // FIXME: fail case -> """"""name"""=value + pos := strings.Index(line[startIdx:], keyQuote) + if pos == -1 { + return "", -1, fmt.Errorf("missing closing key quote: %s", line) + } + pos += startIdx + + // Find key-value delimiter + i := strings.IndexAny(line[pos+startIdx:], "=:") + if i < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + endIdx = pos + i + return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil + } + + endIdx = strings.IndexAny(line, "=:") + if endIdx < 0 { + return "", -1, ErrDelimiterNotFound{line} + } + return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil +} + +func (p *parser) readMultilines(line, val, valQuote string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := string(data) + + pos := strings.LastIndex(next, valQuote) + if pos > -1 { + val += next[:pos] + + comment, has := cleanComment([]byte(next[pos:])) + if has { + p.comment.Write(bytes.TrimSpace(comment)) + } + break + } + val += next + if p.isEOF { + return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) + } + } + return val, nil +} + +func (p *parser) readContinuationLines(val string) (string, error) { + for { + data, err := p.readUntil('\n') + if err != nil { + return "", err + } + next := strings.TrimSpace(string(data)) + + if len(next) == 0 { + break + } + val += next + if val[len(val)-1] != '\\' { + break + } + val = val[:len(val)-1] + } + return val, nil +} + +// hasSurroundedQuote check if and only if the first and last characters +// are quotes \" or \'. +// It returns false if any other parts also contain same kind of quotes. +func hasSurroundedQuote(in string, quote byte) bool { + return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote && + strings.IndexByte(in[1:], quote) == len(in)-2 +} + +func (p *parser) readValue(in []byte, ignoreContinuation bool) (string, error) { + line := strings.TrimLeftFunc(string(in), unicode.IsSpace) + if len(line) == 0 { + return "", nil + } + + var valQuote string + if len(line) > 3 && string(line[0:3]) == `"""` { + valQuote = `"""` + } else if line[0] == '`' { + valQuote = "`" + } + + if len(valQuote) > 0 { + startIdx := len(valQuote) + pos := strings.LastIndex(line[startIdx:], valQuote) + // Check for multi-line value + if pos == -1 { + return p.readMultilines(line, line[startIdx:], valQuote) + } + + return line[startIdx : pos+startIdx], nil + } + + // Won't be able to reach here if value only contains whitespace. + line = strings.TrimSpace(line) + + // Check continuation lines when desired. + if !ignoreContinuation && line[len(line)-1] == '\\' { + return p.readContinuationLines(line[:len(line)-1]) + } + + i := strings.IndexAny(line, "#;") + if i > -1 { + p.comment.WriteString(line[i:]) + line = strings.TrimSpace(line[:i]) + } + + // Trim single quotes + if hasSurroundedQuote(line, '\'') || + hasSurroundedQuote(line, '"') { + line = line[1 : len(line)-1] + } + return line, nil +} + +// parse parses data through an io.Reader. +func (f *File) parse(reader io.Reader) (err error) { + p := newParser(reader) + if err = p.BOM(); err != nil { + return fmt.Errorf("BOM: %v", err) + } + + // Ignore error because default section name is never empty string. + section, _ := f.NewSection(DEFAULT_SECTION) + + var line []byte + var inUnparseableSection bool + for !p.isEOF { + line, err = p.readUntil('\n') + if err != nil { + return err + } + + line = bytes.TrimLeftFunc(line, unicode.IsSpace) + if len(line) == 0 { + continue + } + + // Comments + if line[0] == '#' || line[0] == ';' { + // Note: we do not care ending line break, + // it is needed for adding second line, + // so just clean it once at the end when set to value. + p.comment.Write(line) + continue + } + + // Section + if line[0] == '[' { + // Read to the next ']' (TODO: support quoted strings) + // TODO(unknwon): use LastIndexByte when stop supporting Go1.4 + closeIdx := bytes.LastIndex(line, []byte("]")) + if closeIdx == -1 { + return fmt.Errorf("unclosed section: %s", line) + } + + name := string(line[1:closeIdx]) + section, err = f.NewSection(name) + if err != nil { + return err + } + + comment, has := cleanComment(line[closeIdx+1:]) + if has { + p.comment.Write(comment) + } + + section.Comment = strings.TrimSpace(p.comment.String()) + + // Reset aotu-counter and comments + p.comment.Reset() + p.count = 1 + + inUnparseableSection = false + for i := range f.options.UnparseableSections { + if f.options.UnparseableSections[i] == name || + (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { + inUnparseableSection = true + continue + } + } + continue + } + + if inUnparseableSection { + section.isRawSection = true + section.rawBody += string(line) + continue + } + + kname, offset, err := readKeyName(line) + if err != nil { + // Treat as boolean key when desired, and whole line is key name. + if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { + kname, err := p.readValue(line, f.options.IgnoreContinuation) + if err != nil { + return err + } + key, err := section.NewBooleanKey(kname) + if err != nil { + return err + } + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + continue + } + return err + } + + // Auto increment. + isAutoIncr := false + if kname == "-" { + isAutoIncr = true + kname = "#" + strconv.Itoa(p.count) + p.count++ + } + + key, err := section.NewKey(kname, "") + if err != nil { + return err + } + key.isAutoIncrement = isAutoIncr + + value, err := p.readValue(line[offset:], f.options.IgnoreContinuation) + if err != nil { + return err + } + key.SetValue(value) + key.Comment = strings.TrimSpace(p.comment.String()) + p.comment.Reset() + } + return nil +} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go new file mode 100644 index 0000000..806f149 --- /dev/null +++ b/vendor/github.com/go-ini/ini/section.go @@ -0,0 +1,232 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "errors" + "fmt" + "strings" +) + +// Section represents a config section. +type Section struct { + f *File + Comment string + name string + keys map[string]*Key + keyList []string + keysHash map[string]string + + isRawSection bool + rawBody string +} + +func newSection(f *File, name string) *Section { + return &Section{ + f: f, + name: name, + keys: make(map[string]*Key), + keyList: make([]string, 0, 10), + keysHash: make(map[string]string), + } +} + +// Name returns name of Section. +func (s *Section) Name() string { + return s.name +} + +// Body returns rawBody of Section if the section was marked as unparseable. +// It still follows the other rules of the INI format surrounding leading/trailing whitespace. +func (s *Section) Body() string { + return strings.TrimSpace(s.rawBody) +} + +// NewKey creates a new key to given section. +func (s *Section) NewKey(name, val string) (*Key, error) { + if len(name) == 0 { + return nil, errors.New("error creating new key: empty key name") + } else if s.f.options.Insensitive { + name = strings.ToLower(name) + } + + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + if inSlice(name, s.keyList) { + s.keys[name].value = val + return s.keys[name], nil + } + + s.keyList = append(s.keyList, name) + s.keys[name] = &Key{ + s: s, + name: name, + value: val, + } + s.keysHash[name] = val + return s.keys[name], nil +} + +// NewBooleanKey creates a new boolean type key to given section. +func (s *Section) NewBooleanKey(name string) (*Key, error) { + key, err := s.NewKey(name, "true") + if err != nil { + return nil, err + } + + key.isBooleanType = true + return key, nil +} + +// GetKey returns key in section by given name. +func (s *Section) GetKey(name string) (*Key, error) { + // FIXME: change to section level lock? + if s.f.BlockMode { + s.f.lock.RLock() + } + if s.f.options.Insensitive { + name = strings.ToLower(name) + } + key := s.keys[name] + if s.f.BlockMode { + s.f.lock.RUnlock() + } + + if key == nil { + // Check if it is a child-section. + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + return sec.GetKey(name) + } else { + break + } + } + return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) + } + return key, nil +} + +// HasKey returns true if section contains a key with given name. +func (s *Section) HasKey(name string) bool { + key, _ := s.GetKey(name) + return key != nil +} + +// Haskey is a backwards-compatible name for HasKey. +func (s *Section) Haskey(name string) bool { + return s.HasKey(name) +} + +// HasValue returns true if section contains given raw value. +func (s *Section) HasValue(value string) bool { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + for _, k := range s.keys { + if value == k.value { + return true + } + } + return false +} + +// Key assumes named Key exists in section and returns a zero-value when not. +func (s *Section) Key(name string) *Key { + key, err := s.GetKey(name) + if err != nil { + // It's OK here because the only possible error is empty key name, + // but if it's empty, this piece of code won't be executed. + key, _ = s.NewKey(name, "") + return key + } + return key +} + +// Keys returns list of keys of section. +func (s *Section) Keys() []*Key { + keys := make([]*Key, len(s.keyList)) + for i := range s.keyList { + keys[i] = s.Key(s.keyList[i]) + } + return keys +} + +// ParentKeys returns list of keys of parent section. +func (s *Section) ParentKeys() []*Key { + var parentKeys []*Key + sname := s.name + for { + if i := strings.LastIndex(sname, "."); i > -1 { + sname = sname[:i] + sec, err := s.f.GetSection(sname) + if err != nil { + continue + } + parentKeys = append(parentKeys, sec.Keys()...) + } else { + break + } + + } + return parentKeys +} + +// KeyStrings returns list of key names of section. +func (s *Section) KeyStrings() []string { + list := make([]string, len(s.keyList)) + copy(list, s.keyList) + return list +} + +// KeysHash returns keys hash consisting of names and values. +func (s *Section) KeysHash() map[string]string { + if s.f.BlockMode { + s.f.lock.RLock() + defer s.f.lock.RUnlock() + } + + hash := map[string]string{} + for key, value := range s.keysHash { + hash[key] = value + } + return hash +} + +// DeleteKey deletes a key from section. +func (s *Section) DeleteKey(name string) { + if s.f.BlockMode { + s.f.lock.Lock() + defer s.f.lock.Unlock() + } + + for i, k := range s.keyList { + if k == name { + s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) + delete(s.keys, name) + return + } + } +} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go new file mode 100644 index 0000000..5ef38d8 --- /dev/null +++ b/vendor/github.com/go-ini/ini/struct.go @@ -0,0 +1,431 @@ +// Copyright 2014 Unknwon +// +// Licensed under the Apache License, Version 2.0 (the "License"): you may +// not use this file except in compliance with the License. You may obtain +// a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +// License for the specific language governing permissions and limitations +// under the License. + +package ini + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "strings" + "time" + "unicode" +) + +// NameMapper represents a ini tag name mapper. +type NameMapper func(string) string + +// Built-in name getters. +var ( + // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. + AllCapsUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + } + newstr = append(newstr, unicode.ToUpper(chr)) + } + return string(newstr) + } + // TitleUnderscore converts to format title_underscore. + TitleUnderscore NameMapper = func(raw string) string { + newstr := make([]rune, 0, len(raw)) + for i, chr := range raw { + if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { + if i > 0 { + newstr = append(newstr, '_') + } + chr -= ('A' - 'a') + } + newstr = append(newstr, chr) + } + return string(newstr) + } +) + +func (s *Section) parseFieldName(raw, actual string) string { + if len(actual) > 0 { + return actual + } + if s.f.NameMapper != nil { + return s.f.NameMapper(raw) + } + return raw +} + +func parseDelim(actual string) string { + if len(actual) > 0 { + return actual + } + return "," +} + +var reflectTime = reflect.TypeOf(time.Now()).Kind() + +// setSliceWithProperType sets proper values to slice based on its type. +func setSliceWithProperType(key *Key, field reflect.Value, delim string) error { + strs := key.Strings(delim) + numVals := len(strs) + if numVals == 0 { + return nil + } + + var vals interface{} + + sliceOf := field.Type().Elem().Kind() + switch sliceOf { + case reflect.String: + vals = strs + case reflect.Int: + vals = key.Ints(delim) + case reflect.Int64: + vals = key.Int64s(delim) + case reflect.Uint: + vals = key.Uints(delim) + case reflect.Uint64: + vals = key.Uint64s(delim) + case reflect.Float64: + vals = key.Float64s(delim) + case reflectTime: + vals = key.Times(delim) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + + slice := reflect.MakeSlice(field.Type(), numVals, numVals) + for i := 0; i < numVals; i++ { + switch sliceOf { + case reflect.String: + slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) + case reflect.Int: + slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) + case reflect.Int64: + slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) + case reflect.Uint: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) + case reflect.Uint64: + slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) + case reflect.Float64: + slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) + case reflectTime: + slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) + } + } + field.Set(slice) + return nil +} + +// setWithProperType sets proper value to field based on its type, +// but it does not return error for failing parsing, +// because we want to use default value that is already assigned to strcut. +func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + if len(key.String()) == 0 { + return nil + } + field.SetString(key.String()) + case reflect.Bool: + boolVal, err := key.Bool() + if err != nil { + return nil + } + field.SetBool(boolVal) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + intVal, err := key.Int64() + if err != nil || intVal == 0 { + return nil + } + field.SetInt(intVal) + // byte is an alias for uint8, so supporting uint8 breaks support for byte + case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: + durationVal, err := key.Duration() + // Skip zero value + if err == nil && int(durationVal) > 0 { + field.Set(reflect.ValueOf(durationVal)) + return nil + } + + uintVal, err := key.Uint64() + if err != nil { + return nil + } + field.SetUint(uintVal) + + case reflect.Float32, reflect.Float64: + floatVal, err := key.Float64() + if err != nil { + return nil + } + field.SetFloat(floatVal) + case reflectTime: + timeVal, err := key.Time() + if err != nil { + return nil + } + field.Set(reflect.ValueOf(timeVal)) + case reflect.Slice: + return setSliceWithProperType(key, field, delim) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +func (s *Section) mapTo(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + opts := strings.SplitN(tag, ",", 2) // strip off possible omitempty + fieldName := s.parseFieldName(tpField.Name, opts[0]) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous + isStruct := tpField.Type.Kind() == reflect.Struct + if isAnonymous { + field.Set(reflect.New(tpField.Type.Elem())) + } + + if isAnonymous || isStruct { + if sec, err := s.f.GetSection(fieldName); err == nil { + if err = sec.mapTo(field); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + continue + } + } + + if key, err := s.GetKey(fieldName); err == nil { + if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error mapping field(%s): %v", fieldName, err) + } + } + } + return nil +} + +// MapTo maps section to given struct. +func (s *Section) MapTo(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot map to non-pointer struct") + } + + return s.mapTo(val) +} + +// MapTo maps file to given struct. +func (f *File) MapTo(v interface{}) error { + return f.Section("").MapTo(v) +} + +// MapTo maps data sources to given struct with name mapper. +func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { + cfg, err := Load(source, others...) + if err != nil { + return err + } + cfg.NameMapper = mapper + return cfg.MapTo(v) +} + +// MapTo maps data sources to given struct. +func MapTo(v, source interface{}, others ...interface{}) error { + return MapToWithMapper(v, nil, source, others...) +} + +// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. +func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { + slice := field.Slice(0, field.Len()) + if field.Len() == 0 { + return nil + } + + var buf bytes.Buffer + sliceOf := field.Type().Elem().Kind() + for i := 0; i < field.Len(); i++ { + switch sliceOf { + case reflect.String: + buf.WriteString(slice.Index(i).String()) + case reflect.Int, reflect.Int64: + buf.WriteString(fmt.Sprint(slice.Index(i).Int())) + case reflect.Uint, reflect.Uint64: + buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) + case reflect.Float64: + buf.WriteString(fmt.Sprint(slice.Index(i).Float())) + case reflectTime: + buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) + default: + return fmt.Errorf("unsupported type '[]%s'", sliceOf) + } + buf.WriteString(delim) + } + key.SetValue(buf.String()[:buf.Len()-1]) + return nil +} + +// reflectWithProperType does the opposite thing as setWithProperType. +func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { + switch t.Kind() { + case reflect.String: + key.SetValue(field.String()) + case reflect.Bool: + key.SetValue(fmt.Sprint(field.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + key.SetValue(fmt.Sprint(field.Int())) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + key.SetValue(fmt.Sprint(field.Uint())) + case reflect.Float32, reflect.Float64: + key.SetValue(fmt.Sprint(field.Float())) + case reflectTime: + key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) + case reflect.Slice: + return reflectSliceWithProperType(key, field, delim) + default: + return fmt.Errorf("unsupported type '%s'", t) + } + return nil +} + +// CR: copied from encoding/json/encode.go with modifications of time.Time support. +// TODO: add more test coverage. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflectTime: + return v.Interface().(time.Time).IsZero() + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func (s *Section) reflectFrom(val reflect.Value) error { + if val.Kind() == reflect.Ptr { + val = val.Elem() + } + typ := val.Type() + + for i := 0; i < typ.NumField(); i++ { + field := val.Field(i) + tpField := typ.Field(i) + + tag := tpField.Tag.Get("ini") + if tag == "-" { + continue + } + + opts := strings.SplitN(tag, ",", 2) + if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) { + continue + } + + fieldName := s.parseFieldName(tpField.Name, opts[0]) + if len(fieldName) == 0 || !field.CanSet() { + continue + } + + if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || + (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { + // Note: The only error here is section doesn't exist. + sec, err := s.f.GetSection(fieldName) + if err != nil { + // Note: fieldName can never be empty here, ignore error. + sec, _ = s.f.NewSection(fieldName) + } + if err = sec.reflectFrom(field); err != nil { + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + } + continue + } + + // Note: Same reason as secion. + key, err := s.GetKey(fieldName) + if err != nil { + key, _ = s.NewKey(fieldName, "") + } + if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { + return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) + } + + } + return nil +} + +// ReflectFrom reflects secion from given struct. +func (s *Section) ReflectFrom(v interface{}) error { + typ := reflect.TypeOf(v) + val := reflect.ValueOf(v) + if typ.Kind() == reflect.Ptr { + typ = typ.Elem() + val = val.Elem() + } else { + return errors.New("cannot reflect from non-pointer struct") + } + + return s.reflectFrom(val) +} + +// ReflectFrom reflects file from given struct. +func (f *File) ReflectFrom(v interface{}) error { + return f.Section("").ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct with name mapper. +func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { + cfg.NameMapper = mapper + return cfg.ReflectFrom(v) +} + +// ReflectFrom reflects data sources from given struct. +func ReflectFrom(cfg *File, v interface{}) error { + return ReflectFromWithMapper(cfg, v, nil) +} diff --git a/Godeps/_workspace/src/github.com/inconshreveable/mousetrap/LICENSE b/vendor/github.com/inconshreveable/mousetrap/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/inconshreveable/mousetrap/LICENSE rename to vendor/github.com/inconshreveable/mousetrap/LICENSE diff --git a/Godeps/_workspace/src/github.com/inconshreveable/mousetrap/README.md b/vendor/github.com/inconshreveable/mousetrap/README.md similarity index 100% rename from Godeps/_workspace/src/github.com/inconshreveable/mousetrap/README.md rename to vendor/github.com/inconshreveable/mousetrap/README.md diff --git a/Godeps/_workspace/src/github.com/inconshreveable/mousetrap/trap_others.go b/vendor/github.com/inconshreveable/mousetrap/trap_others.go similarity index 100% rename from Godeps/_workspace/src/github.com/inconshreveable/mousetrap/trap_others.go rename to vendor/github.com/inconshreveable/mousetrap/trap_others.go diff --git a/Godeps/_workspace/src/github.com/inconshreveable/mousetrap/trap_windows.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows.go similarity index 100% rename from Godeps/_workspace/src/github.com/inconshreveable/mousetrap/trap_windows.go rename to vendor/github.com/inconshreveable/mousetrap/trap_windows.go diff --git a/Godeps/_workspace/src/github.com/inconshreveable/mousetrap/trap_windows_1.4.go b/vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go similarity index 100% rename from Godeps/_workspace/src/github.com/inconshreveable/mousetrap/trap_windows_1.4.go rename to vendor/github.com/inconshreveable/mousetrap/trap_windows_1.4.go diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore new file mode 100644 index 0000000..5091fb0 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/.gitignore @@ -0,0 +1,4 @@ +/jpgo +jmespath-fuzz.zip +cpu.out +go-jmespath.test diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml new file mode 100644 index 0000000..1f98077 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/.travis.yml @@ -0,0 +1,9 @@ +language: go + +sudo: false + +go: + - 1.4 + +install: go get -v -t ./... +script: make test diff --git a/vendor/github.com/jmespath/go-jmespath/LICENSE b/vendor/github.com/jmespath/go-jmespath/LICENSE new file mode 100644 index 0000000..b03310a --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/LICENSE @@ -0,0 +1,13 @@ +Copyright 2015 James Saryerwinnie + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile new file mode 100644 index 0000000..a828d28 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/Makefile @@ -0,0 +1,44 @@ + +CMD = jpgo + +help: + @echo "Please use \`make ' where is one of" + @echo " test to run all the tests" + @echo " build to build the library and jp executable" + @echo " generate to run codegen" + + +generate: + go generate ./... + +build: + rm -f $(CMD) + go build ./... + rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./... + mv cmd/$(CMD)/$(CMD) . + +test: + go test -v ./... + +check: + go vet ./... + @echo "golint ./..." + @lint=`golint ./...`; \ + lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \ + echo "$$lint"; \ + if [ "$$lint" != "" ]; then exit 1; fi + +htmlc: + go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov + +buildfuzz: + go-fuzz-build github.com/jmespath/go-jmespath/fuzz + +fuzz: buildfuzz + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata + +bench: + go test -bench . -cpuprofile cpu.out + +pprof-cpu: + go tool pprof ./go-jmespath.test ./cpu.out diff --git a/vendor/github.com/jmespath/go-jmespath/README.md b/vendor/github.com/jmespath/go-jmespath/README.md new file mode 100644 index 0000000..187ef67 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/README.md @@ -0,0 +1,7 @@ +# go-jmespath - A JMESPath implementation in Go + +[![Build Status](https://img.shields.io/travis/jmespath/go-jmespath.svg)](https://travis-ci.org/jmespath/go-jmespath) + + + +See http://jmespath.org for more info. diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go new file mode 100644 index 0000000..9cfa988 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -0,0 +1,49 @@ +package jmespath + +import "strconv" + +// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is +// safe for concurrent use by multiple goroutines. +type JMESPath struct { + ast ASTNode + intr *treeInterpreter +} + +// Compile parses a JMESPath expression and returns, if successful, a JMESPath +// object that can be used to match against data. +func Compile(expression string) (*JMESPath, error) { + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + jmespath := &JMESPath{ast: ast, intr: newInterpreter()} + return jmespath, nil +} + +// MustCompile is like Compile but panics if the expression cannot be parsed. +// It simplifies safe initialization of global variables holding compiled +// JMESPaths. +func MustCompile(expression string) *JMESPath { + jmespath, err := Compile(expression) + if err != nil { + panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) + } + return jmespath +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func (jp *JMESPath) Search(data interface{}) (interface{}, error) { + return jp.intr.Execute(jp.ast, data) +} + +// Search evaluates a JMESPath expression against input data and returns the result. +func Search(expression string, data interface{}) (interface{}, error) { + intr := newInterpreter() + parser := NewParser() + ast, err := parser.Parse(expression) + if err != nil { + return nil, err + } + return intr.Execute(ast, data) +} diff --git a/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go new file mode 100644 index 0000000..1cd2d23 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/astnodetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type astNodeType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection" + +var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307} + +func (i astNodeType) String() string { + if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) { + return fmt.Sprintf("astNodeType(%d)", i) + } + return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go new file mode 100644 index 0000000..9b7cd89 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/functions.go @@ -0,0 +1,842 @@ +package jmespath + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "unicode/utf8" +) + +type jpFunction func(arguments []interface{}) (interface{}, error) + +type jpType string + +const ( + jpUnknown jpType = "unknown" + jpNumber jpType = "number" + jpString jpType = "string" + jpArray jpType = "array" + jpObject jpType = "object" + jpArrayNumber jpType = "array[number]" + jpArrayString jpType = "array[string]" + jpExpref jpType = "expref" + jpAny jpType = "any" +) + +type functionEntry struct { + name string + arguments []argSpec + handler jpFunction + hasExpRef bool +} + +type argSpec struct { + types []jpType + variadic bool +} + +type byExprString struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprString) Len() int { + return len(a.items) +} +func (a *byExprString) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprString) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(string) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(string) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type byExprFloat struct { + intr *treeInterpreter + node ASTNode + items []interface{} + hasError bool +} + +func (a *byExprFloat) Len() int { + return len(a.items) +} +func (a *byExprFloat) Swap(i, j int) { + a.items[i], a.items[j] = a.items[j], a.items[i] +} +func (a *byExprFloat) Less(i, j int) bool { + first, err := a.intr.Execute(a.node, a.items[i]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + ith, ok := first.(float64) + if !ok { + a.hasError = true + return true + } + second, err := a.intr.Execute(a.node, a.items[j]) + if err != nil { + a.hasError = true + // Return a dummy value. + return true + } + jth, ok := second.(float64) + if !ok { + a.hasError = true + return true + } + return ith < jth +} + +type functionCaller struct { + functionTable map[string]functionEntry +} + +func newFunctionCaller() *functionCaller { + caller := &functionCaller{} + caller.functionTable = map[string]functionEntry{ + "length": { + name: "length", + arguments: []argSpec{ + {types: []jpType{jpString, jpArray, jpObject}}, + }, + handler: jpfLength, + }, + "starts_with": { + name: "starts_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfStartsWith, + }, + "abs": { + name: "abs", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfAbs, + }, + "avg": { + name: "avg", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfAvg, + }, + "ceil": { + name: "ceil", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfCeil, + }, + "contains": { + name: "contains", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + {types: []jpType{jpAny}}, + }, + handler: jpfContains, + }, + "ends_with": { + name: "ends_with", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpString}}, + }, + handler: jpfEndsWith, + }, + "floor": { + name: "floor", + arguments: []argSpec{ + {types: []jpType{jpNumber}}, + }, + handler: jpfFloor, + }, + "map": { + name: "amp", + arguments: []argSpec{ + {types: []jpType{jpExpref}}, + {types: []jpType{jpArray}}, + }, + handler: jpfMap, + hasExpRef: true, + }, + "max": { + name: "max", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMax, + }, + "merge": { + name: "merge", + arguments: []argSpec{ + {types: []jpType{jpObject}, variadic: true}, + }, + handler: jpfMerge, + }, + "max_by": { + name: "max_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMaxBy, + hasExpRef: true, + }, + "sum": { + name: "sum", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber}}, + }, + handler: jpfSum, + }, + "min": { + name: "min", + arguments: []argSpec{ + {types: []jpType{jpArrayNumber, jpArrayString}}, + }, + handler: jpfMin, + }, + "min_by": { + name: "min_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfMinBy, + hasExpRef: true, + }, + "type": { + name: "type", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfType, + }, + "keys": { + name: "keys", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfKeys, + }, + "values": { + name: "values", + arguments: []argSpec{ + {types: []jpType{jpObject}}, + }, + handler: jpfValues, + }, + "sort": { + name: "sort", + arguments: []argSpec{ + {types: []jpType{jpArrayString, jpArrayNumber}}, + }, + handler: jpfSort, + }, + "sort_by": { + name: "sort_by", + arguments: []argSpec{ + {types: []jpType{jpArray}}, + {types: []jpType{jpExpref}}, + }, + handler: jpfSortBy, + hasExpRef: true, + }, + "join": { + name: "join", + arguments: []argSpec{ + {types: []jpType{jpString}}, + {types: []jpType{jpArrayString}}, + }, + handler: jpfJoin, + }, + "reverse": { + name: "reverse", + arguments: []argSpec{ + {types: []jpType{jpArray, jpString}}, + }, + handler: jpfReverse, + }, + "to_array": { + name: "to_array", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToArray, + }, + "to_string": { + name: "to_string", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToString, + }, + "to_number": { + name: "to_number", + arguments: []argSpec{ + {types: []jpType{jpAny}}, + }, + handler: jpfToNumber, + }, + "not_null": { + name: "not_null", + arguments: []argSpec{ + {types: []jpType{jpAny}, variadic: true}, + }, + handler: jpfNotNull, + }, + } + return caller +} + +func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) { + if len(e.arguments) == 0 { + return arguments, nil + } + if !e.arguments[len(e.arguments)-1].variadic { + if len(e.arguments) != len(arguments) { + return nil, errors.New("incorrect number of args") + } + for i, spec := range e.arguments { + userArg := arguments[i] + err := spec.typeCheck(userArg) + if err != nil { + return nil, err + } + } + return arguments, nil + } + if len(arguments) < len(e.arguments) { + return nil, errors.New("Invalid arity.") + } + return arguments, nil +} + +func (a *argSpec) typeCheck(arg interface{}) error { + for _, t := range a.types { + switch t { + case jpNumber: + if _, ok := arg.(float64); ok { + return nil + } + case jpString: + if _, ok := arg.(string); ok { + return nil + } + case jpArray: + if isSliceType(arg) { + return nil + } + case jpObject: + if _, ok := arg.(map[string]interface{}); ok { + return nil + } + case jpArrayNumber: + if _, ok := toArrayNum(arg); ok { + return nil + } + case jpArrayString: + if _, ok := toArrayStr(arg); ok { + return nil + } + case jpAny: + return nil + case jpExpref: + if _, ok := arg.(expRef); ok { + return nil + } + } + } + return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types) +} + +func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) { + entry, ok := f.functionTable[name] + if !ok { + return nil, errors.New("unknown function: " + name) + } + resolvedArgs, err := entry.resolveArgs(arguments) + if err != nil { + return nil, err + } + if entry.hasExpRef { + var extra []interface{} + extra = append(extra, intr) + resolvedArgs = append(extra, resolvedArgs...) + } + return entry.handler(resolvedArgs) +} + +func jpfAbs(arguments []interface{}) (interface{}, error) { + num := arguments[0].(float64) + return math.Abs(num), nil +} + +func jpfLength(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if c, ok := arg.(string); ok { + return float64(utf8.RuneCountInString(c)), nil + } else if isSliceType(arg) { + v := reflect.ValueOf(arg) + return float64(v.Len()), nil + } else if c, ok := arg.(map[string]interface{}); ok { + return float64(len(c)), nil + } + return nil, errors.New("could not compute length()") +} + +func jpfStartsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + prefix := arguments[1].(string) + return strings.HasPrefix(search, prefix), nil +} + +func jpfAvg(arguments []interface{}) (interface{}, error) { + // We've already type checked the value so we can safely use + // type assertions. + args := arguments[0].([]interface{}) + length := float64(len(args)) + numerator := 0.0 + for _, n := range args { + numerator += n.(float64) + } + return numerator / length, nil +} +func jpfCeil(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Ceil(val), nil +} +func jpfContains(arguments []interface{}) (interface{}, error) { + search := arguments[0] + el := arguments[1] + if searchStr, ok := search.(string); ok { + if elStr, ok := el.(string); ok { + return strings.Index(searchStr, elStr) != -1, nil + } + return false, nil + } + // Otherwise this is a generic contains for []interface{} + general := search.([]interface{}) + for _, item := range general { + if item == el { + return true, nil + } + } + return false, nil +} +func jpfEndsWith(arguments []interface{}) (interface{}, error) { + search := arguments[0].(string) + suffix := arguments[1].(string) + return strings.HasSuffix(search, suffix), nil +} +func jpfFloor(arguments []interface{}) (interface{}, error) { + val := arguments[0].(float64) + return math.Floor(val), nil +} +func jpfMap(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + exp := arguments[1].(expRef) + node := exp.ref + arr := arguments[2].([]interface{}) + mapped := make([]interface{}, 0, len(arr)) + for _, value := range arr { + current, err := intr.Execute(node, value) + if err != nil { + return nil, err + } + mapped = append(mapped, current) + } + return mapped, nil +} +func jpfMax(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil + } + // Otherwise we're dealing with a max() of strings. + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item > best { + best = item + } + } + return best, nil +} +func jpfMerge(arguments []interface{}) (interface{}, error) { + final := make(map[string]interface{}) + for _, m := range arguments { + mapped := m.(map[string]interface{}) + for key, value := range mapped { + final[key] = value + } + } + return final, nil +} +func jpfMaxBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + switch t := start.(type) { + case float64: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + case string: + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current > bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + default: + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfSum(arguments []interface{}) (interface{}, error) { + items, _ := toArrayNum(arguments[0]) + sum := 0.0 + for _, item := range items { + sum += item + } + return sum, nil +} + +func jpfMin(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil + } + items, _ := toArrayStr(arguments[0]) + if len(items) == 0 { + return nil, nil + } + if len(items) == 1 { + return items[0], nil + } + best := items[0] + for _, item := range items[1:] { + if item < best { + best = item + } + } + return best, nil +} + +func jpfMinBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return nil, nil + } else if len(arr) == 1 { + return arr[0], nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if t, ok := start.(float64); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(float64) + if !ok { + return nil, errors.New("invalid type, must be number") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else if t, ok := start.(string); ok { + bestVal := t + bestItem := arr[0] + for _, item := range arr[1:] { + result, err := intr.Execute(node, item) + if err != nil { + return nil, err + } + current, ok := result.(string) + if !ok { + return nil, errors.New("invalid type, must be string") + } + if current < bestVal { + bestVal = current + bestItem = item + } + } + return bestItem, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfType(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if _, ok := arg.(float64); ok { + return "number", nil + } + if _, ok := arg.(string); ok { + return "string", nil + } + if _, ok := arg.([]interface{}); ok { + return "array", nil + } + if _, ok := arg.(map[string]interface{}); ok { + return "object", nil + } + if arg == nil { + return "null", nil + } + if arg == true || arg == false { + return "boolean", nil + } + return nil, errors.New("unknown type") +} +func jpfKeys(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for key := range arg { + collected = append(collected, key) + } + return collected, nil +} +func jpfValues(arguments []interface{}) (interface{}, error) { + arg := arguments[0].(map[string]interface{}) + collected := make([]interface{}, 0, len(arg)) + for _, value := range arg { + collected = append(collected, value) + } + return collected, nil +} +func jpfSort(arguments []interface{}) (interface{}, error) { + if items, ok := toArrayNum(arguments[0]); ok { + d := sort.Float64Slice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil + } + // Otherwise we're dealing with sort()'ing strings. + items, _ := toArrayStr(arguments[0]) + d := sort.StringSlice(items) + sort.Stable(d) + final := make([]interface{}, len(d)) + for i, val := range d { + final[i] = val + } + return final, nil +} +func jpfSortBy(arguments []interface{}) (interface{}, error) { + intr := arguments[0].(*treeInterpreter) + arr := arguments[1].([]interface{}) + exp := arguments[2].(expRef) + node := exp.ref + if len(arr) == 0 { + return arr, nil + } else if len(arr) == 1 { + return arr, nil + } + start, err := intr.Execute(node, arr[0]) + if err != nil { + return nil, err + } + if _, ok := start.(float64); ok { + sortable := &byExprFloat{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else if _, ok := start.(string); ok { + sortable := &byExprString{intr, node, arr, false} + sort.Stable(sortable) + if sortable.hasError { + return nil, errors.New("error in sort_by comparison") + } + return arr, nil + } else { + return nil, errors.New("invalid type, must be number of string") + } +} +func jpfJoin(arguments []interface{}) (interface{}, error) { + sep := arguments[0].(string) + // We can't just do arguments[1].([]string), we have to + // manually convert each item to a string. + arrayStr := []string{} + for _, item := range arguments[1].([]interface{}) { + arrayStr = append(arrayStr, item.(string)) + } + return strings.Join(arrayStr, sep), nil +} +func jpfReverse(arguments []interface{}) (interface{}, error) { + if s, ok := arguments[0].(string); ok { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r), nil + } + items := arguments[0].([]interface{}) + length := len(items) + reversed := make([]interface{}, length) + for i, item := range items { + reversed[length-(i+1)] = item + } + return reversed, nil +} +func jpfToArray(arguments []interface{}) (interface{}, error) { + if _, ok := arguments[0].([]interface{}); ok { + return arguments[0], nil + } + return arguments[:1:1], nil +} +func jpfToString(arguments []interface{}) (interface{}, error) { + if v, ok := arguments[0].(string); ok { + return v, nil + } + result, err := json.Marshal(arguments[0]) + if err != nil { + return nil, err + } + return string(result), nil +} +func jpfToNumber(arguments []interface{}) (interface{}, error) { + arg := arguments[0] + if v, ok := arg.(float64); ok { + return v, nil + } + if v, ok := arg.(string); ok { + conv, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, nil + } + return conv, nil + } + if _, ok := arg.([]interface{}); ok { + return nil, nil + } + if _, ok := arg.(map[string]interface{}); ok { + return nil, nil + } + if arg == nil { + return nil, nil + } + if arg == true || arg == false { + return nil, nil + } + return nil, errors.New("unknown type") +} +func jpfNotNull(arguments []interface{}) (interface{}, error) { + for _, arg := range arguments { + if arg != nil { + return arg, nil + } + } + return nil, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter.go b/vendor/github.com/jmespath/go-jmespath/interpreter.go new file mode 100644 index 0000000..13c7460 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/interpreter.go @@ -0,0 +1,418 @@ +package jmespath + +import ( + "errors" + "reflect" + "unicode" + "unicode/utf8" +) + +/* This is a tree based interpreter. It walks the AST and directly + interprets the AST to search through a JSON document. +*/ + +type treeInterpreter struct { + fCall *functionCaller +} + +func newInterpreter() *treeInterpreter { + interpreter := treeInterpreter{} + interpreter.fCall = newFunctionCaller() + return &interpreter +} + +type expRef struct { + ref ASTNode +} + +// Execute takes an ASTNode and input data and interprets the AST directly. +// It will produce the result of applying the JMESPath expression associated +// with the ASTNode to the input data "value". +func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) { + switch node.nodeType { + case ASTComparator: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + right, err := intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + switch node.value { + case tEQ: + return objsEqual(left, right), nil + case tNE: + return !objsEqual(left, right), nil + } + leftNum, ok := left.(float64) + if !ok { + return nil, nil + } + rightNum, ok := right.(float64) + if !ok { + return nil, nil + } + switch node.value { + case tGT: + return leftNum > rightNum, nil + case tGTE: + return leftNum >= rightNum, nil + case tLT: + return leftNum < rightNum, nil + case tLTE: + return leftNum <= rightNum, nil + } + case ASTExpRef: + return expRef{ref: node.children[0]}, nil + case ASTFunctionExpression: + resolvedArgs := []interface{}{} + for _, arg := range node.children { + current, err := intr.Execute(arg, value) + if err != nil { + return nil, err + } + resolvedArgs = append(resolvedArgs, current) + } + return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr) + case ASTField: + if m, ok := value.(map[string]interface{}); ok { + key := node.value.(string) + return m[key], nil + } + return intr.fieldFromStruct(node.value.(string), value) + case ASTFilterProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.filterProjectionWithReflection(node, left) + } + return nil, nil + } + compareNode := node.children[2] + collected := []interface{}{} + for _, element := range sliceType { + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil + case ASTFlatten: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + sliceType, ok := left.([]interface{}) + if !ok { + // If we can't type convert to []interface{}, there's + // a chance this could still work via reflection if we're + // dealing with user provided types. + if isSliceType(left) { + return intr.flattenWithReflection(left) + } + return nil, nil + } + flattened := []interface{}{} + for _, element := range sliceType { + if elementSlice, ok := element.([]interface{}); ok { + flattened = append(flattened, elementSlice...) + } else if isSliceType(element) { + reflectFlat := []interface{}{} + v := reflect.ValueOf(element) + for i := 0; i < v.Len(); i++ { + reflectFlat = append(reflectFlat, v.Index(i).Interface()) + } + flattened = append(flattened, reflectFlat...) + } else { + flattened = append(flattened, element) + } + } + return flattened, nil + case ASTIdentity, ASTCurrentNode: + return value, nil + case ASTIndex: + if sliceType, ok := value.([]interface{}); ok { + index := node.value.(int) + if index < 0 { + index += len(sliceType) + } + if index < len(sliceType) && index >= 0 { + return sliceType[index], nil + } + return nil, nil + } + // Otherwise try via reflection. + rv := reflect.ValueOf(value) + if rv.Kind() == reflect.Slice { + index := node.value.(int) + if index < 0 { + index += rv.Len() + } + if index < rv.Len() && index >= 0 { + v := rv.Index(index) + return v.Interface(), nil + } + } + return nil, nil + case ASTKeyValPair: + return intr.Execute(node.children[0], value) + case ASTLiteral: + return node.value, nil + case ASTMultiSelectHash: + if value == nil { + return nil, nil + } + collected := make(map[string]interface{}) + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + key := child.value.(string) + collected[key] = current + } + return collected, nil + case ASTMultiSelectList: + if value == nil { + return nil, nil + } + collected := []interface{}{} + for _, child := range node.children { + current, err := intr.Execute(child, value) + if err != nil { + return nil, err + } + collected = append(collected, current) + } + return collected, nil + case ASTOrExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + matched, err = intr.Execute(node.children[1], value) + if err != nil { + return nil, err + } + } + return matched, nil + case ASTAndExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return matched, nil + } + return intr.Execute(node.children[1], value) + case ASTNotExpression: + matched, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + if isFalse(matched) { + return true, nil + } + return false, nil + case ASTPipe: + result := value + var err error + for _, child := range node.children { + result, err = intr.Execute(child, result) + if err != nil { + return nil, err + } + } + return result, nil + case ASTProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + sliceType, ok := left.([]interface{}) + if !ok { + if isSliceType(left) { + return intr.projectWithReflection(node, left) + } + return nil, nil + } + collected := []interface{}{} + var current interface{} + for _, element := range sliceType { + current, err = intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + case ASTSubexpression, ASTIndexExpression: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, err + } + return intr.Execute(node.children[1], left) + case ASTSlice: + sliceType, ok := value.([]interface{}) + if !ok { + if isSliceType(value) { + return intr.sliceWithReflection(node, value) + } + return nil, nil + } + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + return slice(sliceType, sliceParams) + case ASTValueProjection: + left, err := intr.Execute(node.children[0], value) + if err != nil { + return nil, nil + } + mapType, ok := left.(map[string]interface{}) + if !ok { + return nil, nil + } + values := make([]interface{}, len(mapType)) + for _, value := range mapType { + values = append(values, value) + } + collected := []interface{}{} + for _, element := range values { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + return collected, nil + } + return nil, errors.New("Unknown AST node: " + node.nodeType.String()) +} + +func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) { + rv := reflect.ValueOf(value) + first, n := utf8.DecodeRuneInString(key) + fieldName := string(unicode.ToUpper(first)) + key[n:] + if rv.Kind() == reflect.Struct { + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } else if rv.Kind() == reflect.Ptr { + // Handle multiple levels of indirection? + if rv.IsNil() { + return nil, nil + } + rv = rv.Elem() + v := rv.FieldByName(fieldName) + if !v.IsValid() { + return nil, nil + } + return v.Interface(), nil + } + return nil, nil +} + +func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + flattened := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + if reflect.TypeOf(element).Kind() == reflect.Slice { + // Then insert the contents of the element + // slice into the flattened slice, + // i.e flattened = append(flattened, mySlice...) + elementV := reflect.ValueOf(element) + for j := 0; j < elementV.Len(); j++ { + flattened = append( + flattened, elementV.Index(j).Interface()) + } + } else { + flattened = append(flattened, element) + } + } + return flattened, nil +} + +func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) { + v := reflect.ValueOf(value) + parts := node.value.([]*int) + sliceParams := make([]sliceParam, 3) + for i, part := range parts { + if part != nil { + sliceParams[i].Specified = true + sliceParams[i].N = *part + } + } + final := []interface{}{} + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + final = append(final, element) + } + return slice(final, sliceParams) +} + +func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) { + compareNode := node.children[2] + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(compareNode, element) + if err != nil { + return nil, err + } + if !isFalse(result) { + current, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if current != nil { + collected = append(collected, current) + } + } + } + return collected, nil +} + +func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) { + collected := []interface{}{} + v := reflect.ValueOf(value) + for i := 0; i < v.Len(); i++ { + element := v.Index(i).Interface() + result, err := intr.Execute(node.children[1], element) + if err != nil { + return nil, err + } + if result != nil { + collected = append(collected, result) + } + } + return collected, nil +} diff --git a/vendor/github.com/jmespath/go-jmespath/lexer.go b/vendor/github.com/jmespath/go-jmespath/lexer.go new file mode 100644 index 0000000..817900c --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/lexer.go @@ -0,0 +1,420 @@ +package jmespath + +import ( + "bytes" + "encoding/json" + "fmt" + "strconv" + "strings" + "unicode/utf8" +) + +type token struct { + tokenType tokType + value string + position int + length int +} + +type tokType int + +const eof = -1 + +// Lexer contains information about the expression being tokenized. +type Lexer struct { + expression string // The expression provided by the user. + currentPos int // The current position in the string. + lastWidth int // The width of the current rune. This + buf bytes.Buffer // Internal buffer used for building up values. +} + +// SyntaxError is the main error used whenever a lexing or parsing error occurs. +type SyntaxError struct { + msg string // Error message displayed to user + Expression string // Expression that generated a SyntaxError + Offset int // The location in the string where the error occurred +} + +func (e SyntaxError) Error() string { + // In the future, it would be good to underline the specific + // location where the error occurred. + return "SyntaxError: " + e.msg +} + +// HighlightLocation will show where the syntax error occurred. +// It will place a "^" character on a line below the expression +// at the point where the syntax error occurred. +func (e SyntaxError) HighlightLocation() string { + return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^" +} + +//go:generate stringer -type=tokType +const ( + tUnknown tokType = iota + tStar + tDot + tFilter + tFlatten + tLparen + tRparen + tLbracket + tRbracket + tLbrace + tRbrace + tOr + tPipe + tNumber + tUnquotedIdentifier + tQuotedIdentifier + tComma + tColon + tLT + tLTE + tGT + tGTE + tEQ + tNE + tJSONLiteral + tStringLiteral + tCurrent + tExpref + tAnd + tNot + tEOF +) + +var basicTokens = map[rune]tokType{ + '.': tDot, + '*': tStar, + ',': tComma, + ':': tColon, + '{': tLbrace, + '}': tRbrace, + ']': tRbracket, // tLbracket not included because it could be "[]" + '(': tLparen, + ')': tRparen, + '@': tCurrent, +} + +// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64. +// When using this bitmask just be sure to shift the rune down 64 bits +// before checking against identifierStartBits. +const identifierStartBits uint64 = 576460745995190270 + +// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s. +var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270} + +var whiteSpace = map[rune]bool{ + ' ': true, '\t': true, '\n': true, '\r': true, +} + +func (t token) String() string { + return fmt.Sprintf("Token{%+v, %s, %d, %d}", + t.tokenType, t.value, t.position, t.length) +} + +// NewLexer creates a new JMESPath lexer. +func NewLexer() *Lexer { + lexer := Lexer{} + return &lexer +} + +func (lexer *Lexer) next() rune { + if lexer.currentPos >= len(lexer.expression) { + lexer.lastWidth = 0 + return eof + } + r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:]) + lexer.lastWidth = w + lexer.currentPos += w + return r +} + +func (lexer *Lexer) back() { + lexer.currentPos -= lexer.lastWidth +} + +func (lexer *Lexer) peek() rune { + t := lexer.next() + lexer.back() + return t +} + +// tokenize takes an expression and returns corresponding tokens. +func (lexer *Lexer) tokenize(expression string) ([]token, error) { + var tokens []token + lexer.expression = expression + lexer.currentPos = 0 + lexer.lastWidth = 0 +loop: + for { + r := lexer.next() + if identifierStartBits&(1<<(uint64(r)-64)) > 0 { + t := lexer.consumeUnquotedIdentifier() + tokens = append(tokens, t) + } else if val, ok := basicTokens[r]; ok { + // Basic single char token. + t := token{ + tokenType: val, + value: string(r), + position: lexer.currentPos - lexer.lastWidth, + length: 1, + } + tokens = append(tokens, t) + } else if r == '-' || (r >= '0' && r <= '9') { + t := lexer.consumeNumber() + tokens = append(tokens, t) + } else if r == '[' { + t := lexer.consumeLBracket() + tokens = append(tokens, t) + } else if r == '"' { + t, err := lexer.consumeQuotedIdentifier() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '\'' { + t, err := lexer.consumeRawStringLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '`' { + t, err := lexer.consumeLiteral() + if err != nil { + return tokens, err + } + tokens = append(tokens, t) + } else if r == '|' { + t := lexer.matchOrElse(r, '|', tOr, tPipe) + tokens = append(tokens, t) + } else if r == '<' { + t := lexer.matchOrElse(r, '=', tLTE, tLT) + tokens = append(tokens, t) + } else if r == '>' { + t := lexer.matchOrElse(r, '=', tGTE, tGT) + tokens = append(tokens, t) + } else if r == '!' { + t := lexer.matchOrElse(r, '=', tNE, tNot) + tokens = append(tokens, t) + } else if r == '=' { + t := lexer.matchOrElse(r, '=', tEQ, tUnknown) + tokens = append(tokens, t) + } else if r == '&' { + t := lexer.matchOrElse(r, '&', tAnd, tExpref) + tokens = append(tokens, t) + } else if r == eof { + break loop + } else if _, ok := whiteSpace[r]; ok { + // Ignore whitespace + } else { + return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r))) + } + } + tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0}) + return tokens, nil +} + +// Consume characters until the ending rune "r" is reached. +// If the end of the expression is reached before seeing the +// terminating rune "r", then an error is returned. +// If no error occurs then the matching substring is returned. +// The returned string will not include the ending rune. +func (lexer *Lexer) consumeUntil(end rune) (string, error) { + start := lexer.currentPos + current := lexer.next() + for current != end && current != eof { + if current == '\\' && lexer.peek() != eof { + lexer.next() + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return "", SyntaxError{ + msg: "Unclosed delimiter: " + string(end), + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil +} + +func (lexer *Lexer) consumeLiteral() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('`') + if err != nil { + return token{}, err + } + value = strings.Replace(value, "\\`", "`", -1) + return token{ + tokenType: tJSONLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) consumeRawStringLiteral() (token, error) { + start := lexer.currentPos + currentIndex := start + current := lexer.next() + for current != '\'' && lexer.peek() != eof { + if current == '\\' && lexer.peek() == '\'' { + chunk := lexer.expression[currentIndex : lexer.currentPos-1] + lexer.buf.WriteString(chunk) + lexer.buf.WriteString("'") + lexer.next() + currentIndex = lexer.currentPos + } + current = lexer.next() + } + if lexer.lastWidth == 0 { + // Then we hit an EOF so we never reached the closing + // delimiter. + return token{}, SyntaxError{ + msg: "Unclosed delimiter: '", + Expression: lexer.expression, + Offset: len(lexer.expression), + } + } + if currentIndex < lexer.currentPos { + lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1]) + } + value := lexer.buf.String() + // Reset the buffer so it can reused again. + lexer.buf.Reset() + return token{ + tokenType: tStringLiteral, + value: value, + position: start, + length: len(value), + }, nil +} + +func (lexer *Lexer) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: lexer.expression, + Offset: lexer.currentPos - 1, + } +} + +// Checks for a two char token, otherwise matches a single character +// token. This is used whenever a two char token overlaps a single +// char token, e.g. "||" -> tPipe, "|" -> tOr. +func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token { + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == second { + t = token{ + tokenType: matchedType, + value: string(first) + string(second), + position: start, + length: 2, + } + } else { + lexer.back() + t = token{ + tokenType: singleCharType, + value: string(first), + position: start, + length: 1, + } + } + return t +} + +func (lexer *Lexer) consumeLBracket() token { + // There's three options here: + // 1. A filter expression "[?" + // 2. A flatten operator "[]" + // 3. A bare rbracket "[" + start := lexer.currentPos - lexer.lastWidth + nextRune := lexer.next() + var t token + if nextRune == '?' { + t = token{ + tokenType: tFilter, + value: "[?", + position: start, + length: 2, + } + } else if nextRune == ']' { + t = token{ + tokenType: tFlatten, + value: "[]", + position: start, + length: 2, + } + } else { + t = token{ + tokenType: tLbracket, + value: "[", + position: start, + length: 1, + } + lexer.back() + } + return t +} + +func (lexer *Lexer) consumeQuotedIdentifier() (token, error) { + start := lexer.currentPos + value, err := lexer.consumeUntil('"') + if err != nil { + return token{}, err + } + var decoded string + asJSON := []byte("\"" + value + "\"") + if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil { + return token{}, err + } + return token{ + tokenType: tQuotedIdentifier, + value: decoded, + position: start - 1, + length: len(decoded), + }, nil +} + +func (lexer *Lexer) consumeUnquotedIdentifier() token { + // Consume runes until we reach the end of an unquoted + // identifier. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tUnquotedIdentifier, + value: value, + position: start, + length: lexer.currentPos - start, + } +} + +func (lexer *Lexer) consumeNumber() token { + // Consume runes until we reach something that's not a number. + start := lexer.currentPos - lexer.lastWidth + for { + r := lexer.next() + if r < '0' || r > '9' { + lexer.back() + break + } + } + value := lexer.expression[start:lexer.currentPos] + return token{ + tokenType: tNumber, + value: value, + position: start, + length: lexer.currentPos - start, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/parser.go b/vendor/github.com/jmespath/go-jmespath/parser.go new file mode 100644 index 0000000..1240a17 --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/parser.go @@ -0,0 +1,603 @@ +package jmespath + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" +) + +type astNodeType int + +//go:generate stringer -type astNodeType +const ( + ASTEmpty astNodeType = iota + ASTComparator + ASTCurrentNode + ASTExpRef + ASTFunctionExpression + ASTField + ASTFilterProjection + ASTFlatten + ASTIdentity + ASTIndex + ASTIndexExpression + ASTKeyValPair + ASTLiteral + ASTMultiSelectHash + ASTMultiSelectList + ASTOrExpression + ASTAndExpression + ASTNotExpression + ASTPipe + ASTProjection + ASTSubexpression + ASTSlice + ASTValueProjection +) + +// ASTNode represents the abstract syntax tree of a JMESPath expression. +type ASTNode struct { + nodeType astNodeType + value interface{} + children []ASTNode +} + +func (node ASTNode) String() string { + return node.PrettyPrint(0) +} + +// PrettyPrint will pretty print the parsed AST. +// The AST is an implementation detail and this pretty print +// function is provided as a convenience method to help with +// debugging. You should not rely on its output as the internal +// structure of the AST may change at any time. +func (node ASTNode) PrettyPrint(indent int) string { + spaces := strings.Repeat(" ", indent) + output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType) + nextIndent := indent + 2 + if node.value != nil { + if converted, ok := node.value.(fmt.Stringer); ok { + // Account for things like comparator nodes + // that are enums with a String() method. + output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String()) + } else { + output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value) + } + } + lastIndex := len(node.children) + if lastIndex > 0 { + output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent)) + childIndent := nextIndent + 2 + for _, elem := range node.children { + output += elem.PrettyPrint(childIndent) + } + } + output += fmt.Sprintf("%s}\n", spaces) + return output +} + +var bindingPowers = map[tokType]int{ + tEOF: 0, + tUnquotedIdentifier: 0, + tQuotedIdentifier: 0, + tRbracket: 0, + tRparen: 0, + tComma: 0, + tRbrace: 0, + tNumber: 0, + tCurrent: 0, + tExpref: 0, + tColon: 0, + tPipe: 1, + tOr: 2, + tAnd: 3, + tEQ: 5, + tLT: 5, + tLTE: 5, + tGT: 5, + tGTE: 5, + tNE: 5, + tFlatten: 9, + tStar: 20, + tFilter: 21, + tDot: 40, + tNot: 45, + tLbrace: 50, + tLbracket: 55, + tLparen: 60, +} + +// Parser holds state about the current expression being parsed. +type Parser struct { + expression string + tokens []token + index int +} + +// NewParser creates a new JMESPath parser. +func NewParser() *Parser { + p := Parser{} + return &p +} + +// Parse will compile a JMESPath expression. +func (p *Parser) Parse(expression string) (ASTNode, error) { + lexer := NewLexer() + p.expression = expression + p.index = 0 + tokens, err := lexer.tokenize(expression) + if err != nil { + return ASTNode{}, err + } + p.tokens = tokens + parsed, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() != tEOF { + return ASTNode{}, p.syntaxError(fmt.Sprintf( + "Unexpected token at the end of the expresssion: %s", p.current())) + } + return parsed, nil +} + +func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) { + var err error + leftToken := p.lookaheadToken(0) + p.advance() + leftNode, err := p.nud(leftToken) + if err != nil { + return ASTNode{}, err + } + currentToken := p.current() + for bindingPower < bindingPowers[currentToken] { + p.advance() + leftNode, err = p.led(currentToken, leftNode) + if err != nil { + return ASTNode{}, err + } + currentToken = p.current() + } + return leftNode, nil +} + +func (p *Parser) parseIndexExpression() (ASTNode, error) { + if p.lookahead(0) == tColon || p.lookahead(1) == tColon { + return p.parseSliceExpression() + } + indexStr := p.lookaheadToken(0).value + parsedInt, err := strconv.Atoi(indexStr) + if err != nil { + return ASTNode{}, err + } + indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt} + p.advance() + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return indexNode, nil +} + +func (p *Parser) parseSliceExpression() (ASTNode, error) { + parts := []*int{nil, nil, nil} + index := 0 + current := p.current() + for current != tRbracket && index < 3 { + if current == tColon { + index++ + p.advance() + } else if current == tNumber { + parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value) + if err != nil { + return ASTNode{}, err + } + parts[index] = &parsedInt + p.advance() + } else { + return ASTNode{}, p.syntaxError( + "Expected tColon or tNumber" + ", received: " + p.current().String()) + } + current = p.current() + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTSlice, + value: parts, + }, nil +} + +func (p *Parser) match(tokenType tokType) error { + if p.current() == tokenType { + p.advance() + return nil + } + return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String()) +} + +func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) { + switch tokenType { + case tDot: + if p.current() != tStar { + right, err := p.parseDotRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTSubexpression, + children: []ASTNode{node, right}, + }, err + } + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tDot]) + return ASTNode{ + nodeType: ASTValueProjection, + children: []ASTNode{node, right}, + }, err + case tPipe: + right, err := p.parseExpression(bindingPowers[tPipe]) + return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err + case tOr: + right, err := p.parseExpression(bindingPowers[tOr]) + return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err + case tAnd: + right, err := p.parseExpression(bindingPowers[tAnd]) + return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err + case tLparen: + name := node.value + var args []ASTNode + for p.current() != tRparen { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if p.current() == tComma { + if err := p.match(tComma); err != nil { + return ASTNode{}, err + } + } + args = append(args, expression) + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTFunctionExpression, + value: name, + children: args, + }, nil + case tFilter: + return p.parseFilter(node) + case tFlatten: + left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}} + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{left, right}, + }, err + case tEQ, tNE, tGT, tGTE, tLT, tLTE: + right, err := p.parseExpression(bindingPowers[tokenType]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTComparator, + value: tokenType, + children: []ASTNode{node, right}, + }, nil + case tLbracket: + tokenType := p.current() + var right ASTNode + var err error + if tokenType == tNumber || tokenType == tColon { + right, err = p.parseIndexExpression() + if err != nil { + return ASTNode{}, err + } + return p.projectIfSlice(node, right) + } + // Otherwise this is a projection. + if err := p.match(tStar); err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{node, right}, + }, nil + } + return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String()) +} + +func (p *Parser) nud(token token) (ASTNode, error) { + switch token.tokenType { + case tJSONLiteral: + var parsed interface{} + err := json.Unmarshal([]byte(token.value), &parsed) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTLiteral, value: parsed}, nil + case tStringLiteral: + return ASTNode{nodeType: ASTLiteral, value: token.value}, nil + case tUnquotedIdentifier: + return ASTNode{ + nodeType: ASTField, + value: token.value, + }, nil + case tQuotedIdentifier: + node := ASTNode{nodeType: ASTField, value: token.value} + if p.current() == tLparen { + return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token) + } + return node, nil + case tStar: + left := ASTNode{nodeType: ASTIdentity} + var right ASTNode + var err error + if p.current() == tRbracket { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tStar]) + } + return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err + case tFilter: + return p.parseFilter(ASTNode{nodeType: ASTIdentity}) + case tLbrace: + return p.parseMultiSelectHash() + case tFlatten: + left := ASTNode{ + nodeType: ASTFlatten, + children: []ASTNode{{nodeType: ASTIdentity}}, + } + right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil + case tLbracket: + tokenType := p.current() + //var right ASTNode + if tokenType == tNumber || tokenType == tColon { + right, err := p.parseIndexExpression() + if err != nil { + return ASTNode{}, nil + } + return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right) + } else if tokenType == tStar && p.lookahead(1) == tRbracket { + p.advance() + p.advance() + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{{nodeType: ASTIdentity}, right}, + }, nil + } else { + return p.parseMultiSelectList() + } + case tCurrent: + return ASTNode{nodeType: ASTCurrentNode}, nil + case tExpref: + expression, err := p.parseExpression(bindingPowers[tExpref]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil + case tNot: + expression, err := p.parseExpression(bindingPowers[tNot]) + if err != nil { + return ASTNode{}, err + } + return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil + case tLparen: + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRparen); err != nil { + return ASTNode{}, err + } + return expression, nil + case tEOF: + return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token) + } + + return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token) +} + +func (p *Parser) parseMultiSelectList() (ASTNode, error) { + var expressions []ASTNode + for { + expression, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + expressions = append(expressions, expression) + if p.current() == tRbracket { + break + } + err = p.match(tComma) + if err != nil { + return ASTNode{}, err + } + } + err := p.match(tRbracket) + if err != nil { + return ASTNode{}, err + } + return ASTNode{ + nodeType: ASTMultiSelectList, + children: expressions, + }, nil +} + +func (p *Parser) parseMultiSelectHash() (ASTNode, error) { + var children []ASTNode + for { + keyToken := p.lookaheadToken(0) + if err := p.match(tUnquotedIdentifier); err != nil { + if err := p.match(tQuotedIdentifier); err != nil { + return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier") + } + } + keyName := keyToken.value + err := p.match(tColon) + if err != nil { + return ASTNode{}, err + } + value, err := p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + node := ASTNode{ + nodeType: ASTKeyValPair, + value: keyName, + children: []ASTNode{value}, + } + children = append(children, node) + if p.current() == tComma { + err := p.match(tComma) + if err != nil { + return ASTNode{}, nil + } + } else if p.current() == tRbrace { + err := p.match(tRbrace) + if err != nil { + return ASTNode{}, nil + } + break + } + } + return ASTNode{ + nodeType: ASTMultiSelectHash, + children: children, + }, nil +} + +func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) { + indexExpr := ASTNode{ + nodeType: ASTIndexExpression, + children: []ASTNode{left, right}, + } + if right.nodeType == ASTSlice { + right, err := p.parseProjectionRHS(bindingPowers[tStar]) + return ASTNode{ + nodeType: ASTProjection, + children: []ASTNode{indexExpr, right}, + }, err + } + return indexExpr, nil +} +func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) { + var right, condition ASTNode + var err error + condition, err = p.parseExpression(0) + if err != nil { + return ASTNode{}, err + } + if err := p.match(tRbracket); err != nil { + return ASTNode{}, err + } + if p.current() == tFlatten { + right = ASTNode{nodeType: ASTIdentity} + } else { + right, err = p.parseProjectionRHS(bindingPowers[tFilter]) + if err != nil { + return ASTNode{}, err + } + } + + return ASTNode{ + nodeType: ASTFilterProjection, + children: []ASTNode{node, right, condition}, + }, nil +} + +func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) { + lookahead := p.current() + if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) { + return p.parseExpression(bindingPower) + } else if lookahead == tLbracket { + if err := p.match(tLbracket); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectList() + } else if lookahead == tLbrace { + if err := p.match(tLbrace); err != nil { + return ASTNode{}, err + } + return p.parseMultiSelectHash() + } + return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace") +} + +func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) { + current := p.current() + if bindingPowers[current] < 10 { + return ASTNode{nodeType: ASTIdentity}, nil + } else if current == tLbracket { + return p.parseExpression(bindingPower) + } else if current == tFilter { + return p.parseExpression(bindingPower) + } else if current == tDot { + err := p.match(tDot) + if err != nil { + return ASTNode{}, err + } + return p.parseDotRHS(bindingPower) + } else { + return ASTNode{}, p.syntaxError("Error") + } +} + +func (p *Parser) lookahead(number int) tokType { + return p.lookaheadToken(number).tokenType +} + +func (p *Parser) current() tokType { + return p.lookahead(0) +} + +func (p *Parser) lookaheadToken(number int) token { + return p.tokens[p.index+number] +} + +func (p *Parser) advance() { + p.index++ +} + +func tokensOneOf(elements []tokType, token tokType) bool { + for _, elem := range elements { + if elem == token { + return true + } + } + return false +} + +func (p *Parser) syntaxError(msg string) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: p.lookaheadToken(0).position, + } +} + +// Create a SyntaxError based on the provided token. +// This differs from syntaxError() which creates a SyntaxError +// based on the current lookahead token. +func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError { + return SyntaxError{ + msg: msg, + Expression: p.expression, + Offset: t.position, + } +} diff --git a/vendor/github.com/jmespath/go-jmespath/toktype_string.go b/vendor/github.com/jmespath/go-jmespath/toktype_string.go new file mode 100644 index 0000000..dae79cb --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/toktype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=tokType; DO NOT EDIT + +package jmespath + +import "fmt" + +const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF" + +var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214} + +func (i tokType) String() string { + if i < 0 || i >= tokType(len(_tokType_index)-1) { + return fmt.Sprintf("tokType(%d)", i) + } + return _tokType_name[_tokType_index[i]:_tokType_index[i+1]] +} diff --git a/vendor/github.com/jmespath/go-jmespath/util.go b/vendor/github.com/jmespath/go-jmespath/util.go new file mode 100644 index 0000000..ddc1b7d --- /dev/null +++ b/vendor/github.com/jmespath/go-jmespath/util.go @@ -0,0 +1,185 @@ +package jmespath + +import ( + "errors" + "reflect" +) + +// IsFalse determines if an object is false based on the JMESPath spec. +// JMESPath defines false values to be any of: +// - An empty string array, or hash. +// - The boolean value false. +// - nil +func isFalse(value interface{}) bool { + switch v := value.(type) { + case bool: + return !v + case []interface{}: + return len(v) == 0 + case map[string]interface{}: + return len(v) == 0 + case string: + return len(v) == 0 + case nil: + return true + } + // Try the reflection cases before returning false. + rv := reflect.ValueOf(value) + switch rv.Kind() { + case reflect.Struct: + // A struct type will never be false, even if + // all of its values are the zero type. + return false + case reflect.Slice, reflect.Map: + return rv.Len() == 0 + case reflect.Ptr: + if rv.IsNil() { + return true + } + // If it's a pointer type, we'll try to deref the pointer + // and evaluate the pointer value for isFalse. + element := rv.Elem() + return isFalse(element.Interface()) + } + return false +} + +// ObjsEqual is a generic object equality check. +// It will take two arbitrary objects and recursively determine +// if they are equal. +func objsEqual(left interface{}, right interface{}) bool { + return reflect.DeepEqual(left, right) +} + +// SliceParam refers to a single part of a slice. +// A slice consists of a start, a stop, and a step, similar to +// python slices. +type sliceParam struct { + N int + Specified bool +} + +// Slice supports [start:stop:step] style slicing that's supported in JMESPath. +func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) { + computed, err := computeSliceParams(len(slice), parts) + if err != nil { + return nil, err + } + start, stop, step := computed[0], computed[1], computed[2] + result := []interface{}{} + if step > 0 { + for i := start; i < stop; i += step { + result = append(result, slice[i]) + } + } else { + for i := start; i > stop; i += step { + result = append(result, slice[i]) + } + } + return result, nil +} + +func computeSliceParams(length int, parts []sliceParam) ([]int, error) { + var start, stop, step int + if !parts[2].Specified { + step = 1 + } else if parts[2].N == 0 { + return nil, errors.New("Invalid slice, step cannot be 0") + } else { + step = parts[2].N + } + var stepValueNegative bool + if step < 0 { + stepValueNegative = true + } else { + stepValueNegative = false + } + + if !parts[0].Specified { + if stepValueNegative { + start = length - 1 + } else { + start = 0 + } + } else { + start = capSlice(length, parts[0].N, step) + } + + if !parts[1].Specified { + if stepValueNegative { + stop = -1 + } else { + stop = length + } + } else { + stop = capSlice(length, parts[1].N, step) + } + return []int{start, stop, step}, nil +} + +func capSlice(length int, actual int, step int) int { + if actual < 0 { + actual += length + if actual < 0 { + if step < 0 { + actual = -1 + } else { + actual = 0 + } + } + } else if actual >= length { + if step < 0 { + actual = length - 1 + } else { + actual = length + } + } + return actual +} + +// ToArrayNum converts an empty interface type to a slice of float64. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. +func toArrayNum(data interface{}) ([]float64, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]float64, len(d)) + for i, el := range d { + item, ok := el.(float64) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +// ToArrayStr converts an empty interface type to a slice of strings. +// If any element in the array cannot be converted, then nil is returned +// along with a second value of false. If the input data could be entirely +// converted, then the converted data, along with a second value of true, +// will be returned. +func toArrayStr(data interface{}) ([]string, bool) { + // Is there a better way to do this with reflect? + if d, ok := data.([]interface{}); ok { + result := make([]string, len(d)) + for i, el := range d { + item, ok := el.(string) + if !ok { + return nil, false + } + result[i] = item + } + return result, true + } + return nil, false +} + +func isSliceType(v interface{}) bool { + if v == nil { + return false + } + return reflect.TypeOf(v).Kind() == reflect.Slice +} diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/.gitignore b/vendor/github.com/spf13/cobra/.gitignore similarity index 56% rename from Godeps/_workspace/src/github.com/spf13/cobra/.gitignore rename to vendor/github.com/spf13/cobra/.gitignore index 36d1a84..1b8c7c2 100644 --- a/Godeps/_workspace/src/github.com/spf13/cobra/.gitignore +++ b/vendor/github.com/spf13/cobra/.gitignore @@ -19,6 +19,18 @@ _cgo_export.* _testmain.go +# Vim files https://github.com/github/gitignore/blob/master/Global/Vim.gitignore +# swap +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +# session +Session.vim +# temporary +.netrwhist +*~ +# auto-generated tag files +tags + *.exe cobra.test diff --git a/vendor/github.com/spf13/cobra/.mailmap b/vendor/github.com/spf13/cobra/.mailmap new file mode 100644 index 0000000..94ec530 --- /dev/null +++ b/vendor/github.com/spf13/cobra/.mailmap @@ -0,0 +1,3 @@ +Steve Francia +Bjørn Erik Pedersen +Fabiano Franz diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml new file mode 100644 index 0000000..bd72adf --- /dev/null +++ b/vendor/github.com/spf13/cobra/.travis.yml @@ -0,0 +1,24 @@ +language: go + +matrix: + include: + - go: 1.4.3 + env: NOVET=true # No bundled vet. + - go: 1.5.4 + - go: 1.6.3 + - go: 1.7 + - go: tip + allow_failures: + - go: tip + +before_install: + - mkdir -p bin + - curl -Lso bin/shellcheck https://github.com/caarlos0/shellcheck-docker/releases/download/v0.4.3/shellcheck + - chmod +x bin/shellcheck +script: + - PATH=$PATH:$PWD/bin go test -v ./... + - go build + - diff -u <(echo -n) <(gofmt -d -s .) + - if [ -z $NOVET ]; then + diff -u <(echo -n) <(go tool vet . 2>&1 | grep -vE 'ExampleCommand|bash_completions.*Fprint'); + fi diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/LICENSE.txt b/vendor/github.com/spf13/cobra/LICENSE.txt similarity index 100% rename from Godeps/_workspace/src/github.com/spf13/cobra/LICENSE.txt rename to vendor/github.com/spf13/cobra/LICENSE.txt diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md new file mode 100644 index 0000000..5d2504b --- /dev/null +++ b/vendor/github.com/spf13/cobra/README.md @@ -0,0 +1,898 @@ +![cobra logo](https://cloud.githubusercontent.com/assets/173412/10886352/ad566232-814f-11e5-9cd0-aa101788c117.png) + +Cobra is both a library for creating powerful modern CLI applications as well as a program to generate applications and command files. + +Many of the most widely used Go projects are built using Cobra including: + +* [Kubernetes](http://kubernetes.io/) +* [Hugo](http://gohugo.io) +* [rkt](https://github.com/coreos/rkt) +* [etcd](https://github.com/coreos/etcd) +* [Docker (distribution)](https://github.com/docker/distribution) +* [OpenShift](https://www.openshift.com/) +* [Delve](https://github.com/derekparker/delve) +* [GopherJS](http://www.gopherjs.org/) +* [CockroachDB](http://www.cockroachlabs.com/) +* [Bleve](http://www.blevesearch.com/) +* [ProjectAtomic (enterprise)](http://www.projectatomic.io/) +* [Parse (CLI)](https://parse.com/) +* [GiantSwarm's swarm](https://github.com/giantswarm/cli) +* [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) + + +[![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) +[![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra) +[![GoDoc](https://godoc.org/github.com/spf13/cobra?status.svg)](https://godoc.org/github.com/spf13/cobra) + +![cobra](https://cloud.githubusercontent.com/assets/173412/10911369/84832a8e-8212-11e5-9f82-cc96660a4794.gif) + +# Overview + +Cobra is a library providing a simple interface to create powerful modern CLI +interfaces similar to git & go tools. + +Cobra is also an application that will generate your application scaffolding to rapidly +develop a Cobra-based application. + +Cobra provides: +* Easy subcommand-based CLIs: `app server`, `app fetch`, etc. +* Fully POSIX-compliant flags (including short & long versions) +* Nested subcommands +* Global, local and cascading flags +* Easy generation of applications & commands with `cobra create appname` & `cobra add cmdname` +* Intelligent suggestions (`app srver`... did you mean `app server`?) +* Automatic help generation for commands and flags +* Automatic detailed help for `app help [command]` +* Automatic help flag recognition of `-h`, `--help`, etc. +* Automatically generated bash autocomplete for your application +* Automatically generated man pages for your application +* Command aliases so you can change things without breaking them +* The flexibilty to define your own help, usage, etc. +* Optional tight integration with [viper](http://github.com/spf13/viper) for 12-factor apps + +Cobra has an exceptionally clean interface and simple design without needless +constructors or initialization methods. + +Applications built with Cobra commands are designed to be as user-friendly as +possible. Flags can be placed before or after the command (as long as a +confusing space isn’t provided). Both short and long flags can be used. A +command need not even be fully typed. Help is automatically generated and +available for the application or for a specific command using either the help +command or the `--help` flag. + +# Concepts + +Cobra is built on a structure of commands, arguments & flags. + +**Commands** represent actions, **Args** are things and **Flags** are modifiers for those actions. + +The best applications will read like sentences when used. Users will know how +to use the application because they will natively understand how to use it. + +The pattern to follow is +`APPNAME VERB NOUN --ADJECTIVE.` + or +`APPNAME COMMAND ARG --FLAG` + +A few good real world examples may better illustrate this point. + +In the following example, 'server' is a command, and 'port' is a flag: + + > hugo server --port=1313 + +In this command we are telling Git to clone the url bare. + + > git clone URL --bare + +## Commands + +Command is the central point of the application. Each interaction that +the application supports will be contained in a Command. A command can +have children commands and optionally run an action. + +In the example above, 'server' is the command. + +A Command has the following structure: + +```go +type Command struct { + Use string // The one-line usage message. + Short string // The short description shown in the 'help' output. + Long string // The long message shown in the 'help ' output. + Run func(cmd *Command, args []string) // Run runs the command. +} +``` + +## Flags + +A Flag is a way to modify the behavior of a command. Cobra supports +fully POSIX-compliant flags as well as the Go [flag package](https://golang.org/pkg/flag/). +A Cobra command can define flags that persist through to children commands +and flags that are only available to that command. + +In the example above, 'port' is the flag. + +Flag functionality is provided by the [pflag +library](https://github.com/ogier/pflag), a fork of the flag standard library +which maintains the same interface while adding POSIX compliance. + +## Usage + +Cobra works by creating a set of commands and then organizing them into a tree. +The tree defines the structure of the application. + +Once each command is defined with its corresponding flags, then the +tree is assigned to the commander which is finally executed. + +# Installing +Using Cobra is easy. First, use `go get` to install the latest version +of the library. This command will install the `cobra` generator executible +along with the library: + + > go get -v github.com/spf13/cobra/cobra + +Next, include Cobra in your application: + +```go +import "github.com/spf13/cobra" +``` + +# Getting Started + +While you are welcome to provide your own organization, typically a Cobra based +application will follow the following organizational structure. + +``` + ▾ appName/ + ▾ cmd/ + add.go + your.go + commands.go + here.go + main.go +``` + +In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. + +```go +package main + +import "{pathToYourApp}/cmd" + +func main() { + if err := cmd.RootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(-1) + } +} +``` + +## Using the Cobra Generator + +Cobra provides its own program that will create your application and add any +commands you want. It's the easiest way to incorporate Cobra into your application. + +In order to use the cobra command, compile it using the following command: + + > go install github.com/spf13/cobra/cobra + +This will create the cobra executable under your go path bin directory! + +### cobra init + +The `cobra init [yourApp]` command will create your initial application code +for you. It is a very powerful application that will populate your program with +the right structure so you can immediately enjoy all the benefits of Cobra. It +will also automatically apply the license you specify to your application. + +Cobra init is pretty smart. You can provide it a full path, or simply a path +similar to what is expected in the import. + +``` +cobra init github.com/spf13/newAppName +``` + +### cobra add + +Once an application is initialized Cobra can create additional commands for you. +Let's say you created an app and you wanted the following commands for it: + +* app serve +* app config +* app config create + +In your project directory (where your main.go file is) you would run the following: + +``` +cobra add serve +cobra add config +cobra add create -p 'configCmd' +``` + +Once you have run these three commands you would have an app structure that would look like: + +``` + ▾ app/ + ▾ cmd/ + serve.go + config.go + create.go + main.go +``` + +at this point you can run `go run main.go` and it would run your app. `go run +main.go serve`, `go run main.go config`, `go run main.go config create` along +with `go run main.go help serve`, etc would all work. + +Obviously you haven't added your own code to these yet, the commands are ready +for you to give them their tasks. Have fun. + +### Configuring the cobra generator + +The cobra generator will be easier to use if you provide a simple configuration +file which will help you eliminate providing a bunch of repeated information in +flags over and over. + +An example ~/.cobra.yaml file: + +```yaml +author: Steve Francia +license: MIT +``` + +You can specify no license by setting `license` to `none` or you can specify +a custom license: + +```yaml +license: + header: This file is part of {{ .appName }}. + text: | + {{ .copyright }} + + This is my license. There are many like it, but this one is mine. + My license is my best friend. It is my life. I must master it as I must + master my life. +``` + +## Manually implementing Cobra + +To manually implement cobra you need to create a bare main.go file and a RootCmd file. +You will optionally provide additional commands as you see fit. + +### Create the root command + +The root command represents your binary itself. + + +#### Manually create rootCmd + +Cobra doesn't require any special constructors. Simply create your commands. + +Ideally you place this in app/cmd/root.go: + +```go +var RootCmd = &cobra.Command{ + Use: "hugo", + Short: "Hugo is a very fast static site generator", + Long: `A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + Complete documentation is available at http://hugo.spf13.com`, + Run: func(cmd *cobra.Command, args []string) { + // Do Stuff Here + }, +} +``` + +You will additionally define flags and handle configuration in your init() function. + +for example cmd/root.go: + +```go +func init() { + cobra.OnInitialize(initConfig) + RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") + RootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/") + RootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution") + RootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)") + RootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration") + viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author")) + viper.BindPFlag("projectbase", RootCmd.PersistentFlags().Lookup("projectbase")) + viper.BindPFlag("useViper", RootCmd.PersistentFlags().Lookup("viper")) + viper.SetDefault("author", "NAME HERE ") + viper.SetDefault("license", "apache") +} +``` + +### Create your main.go + +With the root command you need to have your main function execute it. +Execute should be run on the root for clarity, though it can be called on any command. + +In a Cobra app, typically the main.go file is very bare. It serves, one purpose, to initialize Cobra. + +```go +package main + +import "{pathToYourApp}/cmd" + +func main() { + if err := cmd.RootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(-1) + } +} +``` + + +### Create additional commands + +Additional commands can be defined and typically are each given their own file +inside of the cmd/ directory. + +If you wanted to create a version command you would create cmd/version.go and +populate it with the following: + +```go +package cmd + +import ( + "github.com/spf13/cobra" +) + +func init() { + RootCmd.AddCommand(versionCmd) +} + +var versionCmd = &cobra.Command{ + Use: "version", + Short: "Print the version number of Hugo", + Long: `All software has versions. This is Hugo's`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Hugo Static Site Generator v0.9 -- HEAD") + }, +} +``` + +### Attach command to its parent + + +If you notice in the above example we attach the command to its parent. In +this case the parent is the rootCmd. In this example we are attaching it to the +root, but commands can be attached at any level. + +```go +RootCmd.AddCommand(versionCmd) +``` + +### Remove a command from its parent + +Removing a command is not a common action in simple programs, but it allows 3rd +parties to customize an existing command tree. + +In this example, we remove the existing `VersionCmd` command of an existing +root command, and we replace it with our own version: + +```go +mainlib.RootCmd.RemoveCommand(mainlib.VersionCmd) +mainlib.RootCmd.AddCommand(versionCmd) +``` + +## Working with Flags + +Flags provide modifiers to control how the action command operates. + +### Assign flags to a command + +Since the flags are defined and used in different locations, we need to +define a variable outside with the correct scope to assign the flag to +work with. + +```go +var Verbose bool +var Source string +``` + +There are two different approaches to assign a flag. + +### Persistent Flags + +A flag can be 'persistent' meaning that this flag will be available to the +command it's assigned to as well as every command under that command. For +global flags, assign a flag as a persistent flag on the root. + +```go +RootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") +``` + +### Local Flags + +A flag can also be assigned locally which will only apply to that specific command. + +```go +RootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") +``` + + +## Example + +In the example below, we have defined three commands. Two are at the top level +and one (cmdTimes) is a child of one of the top commands. In this case the root +is not executable meaning that a subcommand is required. This is accomplished +by not providing a 'Run' for the 'rootCmd'. + +We have only defined one flag for a single command. + +More documentation about flags is available at https://github.com/spf13/pflag + +```go +package main + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +func main() { + + var echoTimes int + + var cmdPrint = &cobra.Command{ + Use: "print [string to print]", + Short: "Print anything to the screen", + Long: `print is for printing anything back to the screen. + For many years people have printed back to the screen. + `, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdEcho = &cobra.Command{ + Use: "echo [string to echo]", + Short: "Echo anything to the screen", + Long: `echo is for echoing anything back. + Echo works a lot like print, except it has a child command. + `, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("Print: " + strings.Join(args, " ")) + }, + } + + var cmdTimes = &cobra.Command{ + Use: "times [# times] [string to echo]", + Short: "Echo anything to the screen more times", + Long: `echo things multiple times back to the user by providing + a count and a string.`, + Run: func(cmd *cobra.Command, args []string) { + for i := 0; i < echoTimes; i++ { + fmt.Println("Echo: " + strings.Join(args, " ")) + } + }, + } + + cmdTimes.Flags().IntVarP(&echoTimes, "times", "t", 1, "times to echo the input") + + var rootCmd = &cobra.Command{Use: "app"} + rootCmd.AddCommand(cmdPrint, cmdEcho) + cmdEcho.AddCommand(cmdTimes) + rootCmd.Execute() +} +``` + +For a more complete example of a larger application, please checkout [Hugo](http://gohugo.io/). + +## The Help Command + +Cobra automatically adds a help command to your application when you have subcommands. +This will be called when a user runs 'app help'. Additionally, help will also +support all other commands as input. Say, for instance, you have a command called +'create' without any additional configuration; Cobra will work when 'app help +create' is called. Every command will automatically have the '--help' flag added. + +### Example + +The following output is automatically generated by Cobra. Nothing beyond the +command and flag definitions are needed. + + > hugo help + + hugo is the main command, used to build your Hugo site. + + Hugo is a Fast and Flexible Static Site Generator + built with love by spf13 and friends in Go. + + Complete documentation is available at http://gohugo.io/. + + Usage: + hugo [flags] + hugo [command] + + Available Commands: + server Hugo runs its own webserver to render the files + version Print the version number of Hugo + config Print the site configuration + check Check content in the source directory + benchmark Benchmark hugo by building a site a number of times. + convert Convert your content to different formats + new Create new content for your site + list Listing out various types of content + undraft Undraft changes the content's draft status from 'True' to 'False' + genautocomplete Generate shell autocompletion script for Hugo + gendoc Generate Markdown documentation for the Hugo CLI. + genman Generate man page for Hugo + import Import your site from others. + + Flags: + -b, --baseURL="": hostname (and path) to the root, e.g. http://spf13.com/ + -D, --buildDrafts[=false]: include content marked as draft + -F, --buildFuture[=false]: include content with publishdate in the future + --cacheDir="": filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/ + --canonifyURLs[=false]: if true, all relative URLs will be canonicalized using baseURL + --config="": config file (default is path/config.yaml|json|toml) + -d, --destination="": filesystem path to write files to + --disableRSS[=false]: Do not build RSS files + --disableSitemap[=false]: Do not build Sitemap file + --editor="": edit new content with this editor, if provided + --ignoreCache[=false]: Ignores the cache directory for reading but still writes to it + --log[=false]: Enable Logging + --logFile="": Log File path (if set, logging enabled automatically) + --noTimes[=false]: Don't sync modification time of files + --pluralizeListTitles[=true]: Pluralize titles in lists using inflect + --preserveTaxonomyNames[=false]: Preserve taxonomy names as written ("Gérard Depardieu" vs "gerard-depardieu") + -s, --source="": filesystem path to read files relative from + --stepAnalysis[=false]: display memory and timing of different steps of the program + -t, --theme="": theme to use (located in /themes/THEMENAME/) + --uglyURLs[=false]: if true, use /filename.html instead of /filename/ + -v, --verbose[=false]: verbose output + --verboseLog[=false]: verbose logging + -w, --watch[=false]: watch filesystem for changes and recreate as needed + + Use "hugo [command] --help" for more information about a command. + + +Help is just a command like any other. There is no special logic or behavior +around it. In fact, you can provide your own if you want. + +### Defining your own help + +You can provide your own Help command or your own template for the default command to use. + +The default help command is + +```go +func (c *Command) initHelp() { + if c.helpCommand == nil { + c.helpCommand = &Command{ + Use: "help [command]", + Short: "Help about any command", + Long: `Help provides help for any command in the application. + Simply type ` + c.Name() + ` help [path to command] for full details.`, + Run: c.HelpFunc(), + } + } + c.AddCommand(c.helpCommand) +} +``` + +You can provide your own command, function or template through the following methods: + +```go +command.SetHelpCommand(cmd *Command) + +command.SetHelpFunc(f func(*Command, []string)) + +command.SetHelpTemplate(s string) +``` + +The latter two will also apply to any children commands. + +## Usage + +When the user provides an invalid flag or invalid command, Cobra responds by +showing the user the 'usage'. + +### Example +You may recognize this from the help above. That's because the default help +embeds the usage as part of its output. + + Usage: + hugo [flags] + hugo [command] + + Available Commands: + server Hugo runs its own webserver to render the files + version Print the version number of Hugo + config Print the site configuration + check Check content in the source directory + benchmark Benchmark hugo by building a site a number of times. + convert Convert your content to different formats + new Create new content for your site + list Listing out various types of content + undraft Undraft changes the content's draft status from 'True' to 'False' + genautocomplete Generate shell autocompletion script for Hugo + gendoc Generate Markdown documentation for the Hugo CLI. + genman Generate man page for Hugo + import Import your site from others. + + Flags: + -b, --baseURL="": hostname (and path) to the root, e.g. http://spf13.com/ + -D, --buildDrafts[=false]: include content marked as draft + -F, --buildFuture[=false]: include content with publishdate in the future + --cacheDir="": filesystem path to cache directory. Defaults: $TMPDIR/hugo_cache/ + --canonifyURLs[=false]: if true, all relative URLs will be canonicalized using baseURL + --config="": config file (default is path/config.yaml|json|toml) + -d, --destination="": filesystem path to write files to + --disableRSS[=false]: Do not build RSS files + --disableSitemap[=false]: Do not build Sitemap file + --editor="": edit new content with this editor, if provided + --ignoreCache[=false]: Ignores the cache directory for reading but still writes to it + --log[=false]: Enable Logging + --logFile="": Log File path (if set, logging enabled automatically) + --noTimes[=false]: Don't sync modification time of files + --pluralizeListTitles[=true]: Pluralize titles in lists using inflect + --preserveTaxonomyNames[=false]: Preserve taxonomy names as written ("Gérard Depardieu" vs "gerard-depardieu") + -s, --source="": filesystem path to read files relative from + --stepAnalysis[=false]: display memory and timing of different steps of the program + -t, --theme="": theme to use (located in /themes/THEMENAME/) + --uglyURLs[=false]: if true, use /filename.html instead of /filename/ + -v, --verbose[=false]: verbose output + --verboseLog[=false]: verbose logging + -w, --watch[=false]: watch filesystem for changes and recreate as needed + +### Defining your own usage +You can provide your own usage function or template for Cobra to use. + +The default usage function is: + +```go +return func(c *Command) error { + err := tmpl(c.Out(), c.UsageTemplate(), c) + return err +} +``` + +Like help, the function and template are overridable through public methods: + +```go +command.SetUsageFunc(f func(*Command) error) + +command.SetUsageTemplate(s string) +``` + +## PreRun or PostRun Hooks + +It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: + +- `PersistentPreRun` +- `PreRun` +- `Run` +- `PostRun` +- `PersistentPostRun` + +An example of two commands which use all of these features is below. When the subcommand is executed, it will run the root command's `PersistentPreRun` but not the root command's `PersistentPostRun`: + +```go +package main + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +func main() { + + var rootCmd = &cobra.Command{ + Use: "root [sub]", + Short: "My root command", + PersistentPreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPreRun with args: %v\n", args) + }, + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside rootCmd PersistentPostRun with args: %v\n", args) + }, + } + + var subCmd = &cobra.Command{ + Use: "sub [no options!]", + Short: "My subcommand", + PreRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PreRun with args: %v\n", args) + }, + Run: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd Run with args: %v\n", args) + }, + PostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PostRun with args: %v\n", args) + }, + PersistentPostRun: func(cmd *cobra.Command, args []string) { + fmt.Printf("Inside subCmd PersistentPostRun with args: %v\n", args) + }, + } + + rootCmd.AddCommand(subCmd) + + rootCmd.SetArgs([]string{""}) + _ = rootCmd.Execute() + fmt.Print("\n") + rootCmd.SetArgs([]string{"sub", "arg1", "arg2"}) + _ = rootCmd.Execute() +} +``` + + +## Alternative Error Handling + +Cobra also has functions where the return signature is an error. This allows for errors to bubble up to the top, +providing a way to handle the errors in one location. The current list of functions that return an error is: + +* PersistentPreRunE +* PreRunE +* RunE +* PostRunE +* PersistentPostRunE + +If you would like to silence the default `error` and `usage` output in favor of your own, you can set `SilenceUsage` +and `SilenceErrors` to `false` on the command. A child command respects these flags if they are set on the parent +command. + +**Example Usage using RunE:** + +```go +package main + +import ( + "errors" + "log" + + "github.com/spf13/cobra" +) + +func main() { + var rootCmd = &cobra.Command{ + Use: "hugo", + Short: "Hugo is a very fast static site generator", + Long: `A Fast and Flexible Static Site Generator built with + love by spf13 and friends in Go. + Complete documentation is available at http://hugo.spf13.com`, + RunE: func(cmd *cobra.Command, args []string) error { + // Do Stuff Here + return errors.New("some random error") + }, + } + + if err := rootCmd.Execute(); err != nil { + log.Fatal(err) + } +} +``` + +## Suggestions when "unknown command" happens + +Cobra will print automatic suggestions when "unknown command" errors happen. This allows Cobra to behave similarly to the `git` command when a typo happens. For example: + +``` +$ hugo srever +Error: unknown command "srever" for "hugo" + +Did you mean this? + server + +Run 'hugo --help' for usage. +``` + +Suggestions are automatic based on every subcommand registered and use an implementation of [Levenshtein distance](http://en.wikipedia.org/wiki/Levenshtein_distance). Every registered command that matches a minimum distance of 2 (ignoring case) will be displayed as a suggestion. + +If you need to disable suggestions or tweak the string distance in your command, use: + +```go +command.DisableSuggestions = true +``` + +or + +```go +command.SuggestionsMinimumDistance = 1 +``` + +You can also explicitly set names for which a given command will be suggested using the `SuggestFor` attribute. This allows suggestions for strings that are not close in terms of string distance, but makes sense in your set of commands and for some which you don't want aliases. Example: + +``` +$ kubectl remove +Error: unknown command "remove" for "kubectl" + +Did you mean this? + delete + +Run 'kubectl help' for usage. +``` + +## Generating Markdown-formatted documentation for your command + +Cobra can generate a Markdown-formatted document based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Markdown Docs](doc/md_docs.md). + +## Generating man pages for your command + +Cobra can generate a man page based on the subcommands, flags, etc. A simple example of how to do this for your command can be found in [Man Docs](doc/man_docs.md). + +## Generating bash completions for your command + +Cobra can generate a bash-completion file. If you add more information to your command, these completions can be amazingly powerful and flexible. Read more about it in [Bash Completions](bash_completions.md). + +## Debugging + +Cobra provides a ‘DebugFlags’ method on a command which, when called, will print +out everything Cobra knows about the flags for each command. + +### Example + +```go +command.DebugFlags() +``` + +## Release Notes +* **0.9.0** June 17, 2014 + * flags can appears anywhere in the args (provided they are unambiguous) + * --help prints usage screen for app or command + * Prefix matching for commands + * Cleaner looking help and usage output + * Extensive test suite +* **0.8.0** Nov 5, 2013 + * Reworked interface to remove commander completely + * Command now primary structure + * No initialization needed + * Usage & Help templates & functions definable at any level + * Updated Readme +* **0.7.0** Sept 24, 2013 + * Needs more eyes + * Test suite + * Support for automatic error messages + * Support for help command + * Support for printing to any io.Writer instead of os.Stderr + * Support for persistent flags which cascade down tree + * Ready for integration into Hugo +* **0.1.0** Sept 3, 2013 + * Implement first draft + +## Extensions + +Libraries for extending Cobra: + +* [cmdns](https://github.com/gosuri/cmdns): Enables name spacing a command's immediate children. It provides an alternative way to structure subcommands, similar to `heroku apps:create` and `ovrclk clusters:launch`. + +## ToDo +* Launch proper documentation site + +## Contributing + +1. Fork it +2. Create your feature branch (`git checkout -b my-new-feature`) +3. Commit your changes (`git commit -am 'Add some feature'`) +4. Push to the branch (`git push origin my-new-feature`) +5. Create new Pull Request + +## Contributors + +Names in no particular order: + +* [spf13](https://github.com/spf13), +[eparis](https://github.com/eparis), +[bep](https://github.com/bep), and many more! + +## License + +Cobra is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/cobra/blob/master/LICENSE.txt) + + +[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/spf13/cobra/trend.png)](https://bitdeli.com/free "Bitdeli Badge") diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go new file mode 100644 index 0000000..7a5bd4d --- /dev/null +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -0,0 +1,641 @@ +package cobra + +import ( + "fmt" + "io" + "os" + "sort" + "strings" + + "github.com/spf13/pflag" +) + +const ( + BashCompFilenameExt = "cobra_annotation_bash_completion_filename_extensions" + BashCompCustom = "cobra_annotation_bash_completion_custom" + BashCompOneRequiredFlag = "cobra_annotation_bash_completion_one_required_flag" + BashCompSubdirsInDir = "cobra_annotation_bash_completion_subdirs_in_dir" +) + +func preamble(out io.Writer, name string) error { + _, err := fmt.Fprintf(out, "# bash completion for %-36s -*- shell-script -*-\n", name) + if err != nil { + return err + } + _, err = fmt.Fprint(out, ` +__debug() +{ + if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then + echo "$*" >> "${BASH_COMP_DEBUG_FILE}" + fi +} + +# Homebrew on Macs have version 1.3 of bash-completion which doesn't include +# _init_completion. This is a very minimal version of that function. +__my_init_completion() +{ + COMPREPLY=() + _get_comp_words_by_ref "$@" cur prev words cword +} + +__index_of_word() +{ + local w word=$1 + shift + index=0 + for w in "$@"; do + [[ $w = "$word" ]] && return + index=$((index+1)) + done + index=-1 +} + +__contains_word() +{ + local w word=$1; shift + for w in "$@"; do + [[ $w = "$word" ]] && return + done + return 1 +} + +__handle_reply() +{ + __debug "${FUNCNAME[0]}" + case $cur in + -*) + if [[ $(type -t compopt) = "builtin" ]]; then + compopt -o nospace + fi + local allflags + if [ ${#must_have_one_flag[@]} -ne 0 ]; then + allflags=("${must_have_one_flag[@]}") + else + allflags=("${flags[*]} ${two_word_flags[*]}") + fi + COMPREPLY=( $(compgen -W "${allflags[*]}" -- "$cur") ) + if [[ $(type -t compopt) = "builtin" ]]; then + [[ "${COMPREPLY[0]}" == *= ]] || compopt +o nospace + fi + + # complete after --flag=abc + if [[ $cur == *=* ]]; then + if [[ $(type -t compopt) = "builtin" ]]; then + compopt +o nospace + fi + + local index flag + flag="${cur%%=*}" + __index_of_word "${flag}" "${flags_with_completion[@]}" + if [[ ${index} -ge 0 ]]; then + COMPREPLY=() + PREFIX="" + cur="${cur#*=}" + ${flags_completion[${index}]} + if [ -n "${ZSH_VERSION}" ]; then + # zfs completion needs --flag= prefix + eval "COMPREPLY=( \"\${COMPREPLY[@]/#/${flag}=}\" )" + fi + fi + fi + return 0; + ;; + esac + + # check if we are handling a flag with special work handling + local index + __index_of_word "${prev}" "${flags_with_completion[@]}" + if [[ ${index} -ge 0 ]]; then + ${flags_completion[${index}]} + return + fi + + # we are parsing a flag and don't have a special handler, no completion + if [[ ${cur} != "${words[cword]}" ]]; then + return + fi + + local completions + completions=("${commands[@]}") + if [[ ${#must_have_one_noun[@]} -ne 0 ]]; then + completions=("${must_have_one_noun[@]}") + fi + if [[ ${#must_have_one_flag[@]} -ne 0 ]]; then + completions+=("${must_have_one_flag[@]}") + fi + COMPREPLY=( $(compgen -W "${completions[*]}" -- "$cur") ) + + if [[ ${#COMPREPLY[@]} -eq 0 && ${#noun_aliases[@]} -gt 0 && ${#must_have_one_noun[@]} -ne 0 ]]; then + COMPREPLY=( $(compgen -W "${noun_aliases[*]}" -- "$cur") ) + fi + + if [[ ${#COMPREPLY[@]} -eq 0 ]]; then + declare -F __custom_func >/dev/null && __custom_func + fi + + __ltrim_colon_completions "$cur" +} + +# The arguments should be in the form "ext1|ext2|extn" +__handle_filename_extension_flag() +{ + local ext="$1" + _filedir "@(${ext})" +} + +__handle_subdirs_in_dir_flag() +{ + local dir="$1" + pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 +} + +__handle_flag() +{ + __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + # if a command required a flag, and we found it, unset must_have_one_flag() + local flagname=${words[c]} + local flagvalue + # if the word contained an = + if [[ ${words[c]} == *"="* ]]; then + flagvalue=${flagname#*=} # take in as flagvalue after the = + flagname=${flagname%%=*} # strip everything after the = + flagname="${flagname}=" # but put the = back + fi + __debug "${FUNCNAME[0]}: looking for ${flagname}" + if __contains_word "${flagname}" "${must_have_one_flag[@]}"; then + must_have_one_flag=() + fi + + # if you set a flag which only applies to this command, don't show subcommands + if __contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then + commands=() + fi + + # keep flag value with flagname as flaghash + if [ -n "${flagvalue}" ] ; then + flaghash[${flagname}]=${flagvalue} + elif [ -n "${words[ $((c+1)) ]}" ] ; then + flaghash[${flagname}]=${words[ $((c+1)) ]} + else + flaghash[${flagname}]="true" # pad "true" for bool flag + fi + + # skip the argument to a two word flag + if __contains_word "${words[c]}" "${two_word_flags[@]}"; then + c=$((c+1)) + # if we are looking for a flags value, don't show commands + if [[ $c -eq $cword ]]; then + commands=() + fi + fi + + c=$((c+1)) + +} + +__handle_noun() +{ + __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + if __contains_word "${words[c]}" "${must_have_one_noun[@]}"; then + must_have_one_noun=() + elif __contains_word "${words[c]}" "${noun_aliases[@]}"; then + must_have_one_noun=() + fi + + nouns+=("${words[c]}") + c=$((c+1)) +} + +__handle_command() +{ + __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + + local next_command + if [[ -n ${last_command} ]]; then + next_command="_${last_command}_${words[c]//:/__}" + else + if [[ $c -eq 0 ]]; then + next_command="_$(basename "${words[c]//:/__}")" + else + next_command="_${words[c]//:/__}" + fi + fi + c=$((c+1)) + __debug "${FUNCNAME[0]}: looking for ${next_command}" + declare -F $next_command >/dev/null && $next_command +} + +__handle_word() +{ + if [[ $c -ge $cword ]]; then + __handle_reply + return + fi + __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + if [[ "${words[c]}" == -* ]]; then + __handle_flag + elif __contains_word "${words[c]}" "${commands[@]}"; then + __handle_command + elif [[ $c -eq 0 ]] && __contains_word "$(basename "${words[c]}")" "${commands[@]}"; then + __handle_command + else + __handle_noun + fi + __handle_word +} + +`) + return err +} + +func postscript(w io.Writer, name string) error { + name = strings.Replace(name, ":", "__", -1) + _, err := fmt.Fprintf(w, "__start_%s()\n", name) + if err != nil { + return err + } + _, err = fmt.Fprintf(w, `{ + local cur prev words cword + declare -A flaghash 2>/dev/null || : + if declare -F _init_completion >/dev/null 2>&1; then + _init_completion -s || return + else + __my_init_completion -n "=" || return + fi + + local c=0 + local flags=() + local two_word_flags=() + local local_nonpersistent_flags=() + local flags_with_completion=() + local flags_completion=() + local commands=("%s") + local must_have_one_flag=() + local must_have_one_noun=() + local last_command + local nouns=() + + __handle_word +} + +`, name) + if err != nil { + return err + } + _, err = fmt.Fprintf(w, `if [[ $(type -t compopt) = "builtin" ]]; then + complete -o default -F __start_%s %s +else + complete -o default -o nospace -F __start_%s %s +fi + +`, name, name, name, name) + if err != nil { + return err + } + _, err = fmt.Fprintf(w, "# ex: ts=4 sw=4 et filetype=sh\n") + return err +} + +func writeCommands(cmd *Command, w io.Writer) error { + if _, err := fmt.Fprintf(w, " commands=()\n"); err != nil { + return err + } + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c == cmd.helpCommand { + continue + } + if _, err := fmt.Fprintf(w, " commands+=(%q)\n", c.Name()); err != nil { + return err + } + } + _, err := fmt.Fprintf(w, "\n") + return err +} + +func writeFlagHandler(name string, annotations map[string][]string, w io.Writer) error { + for key, value := range annotations { + switch key { + case BashCompFilenameExt: + _, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name) + if err != nil { + return err + } + + if len(value) > 0 { + ext := "__handle_filename_extension_flag " + strings.Join(value, "|") + _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) + } else { + ext := "_filedir" + _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) + } + if err != nil { + return err + } + case BashCompCustom: + _, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name) + if err != nil { + return err + } + if len(value) > 0 { + handlers := strings.Join(value, "; ") + _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", handlers) + } else { + _, err = fmt.Fprintf(w, " flags_completion+=(:)\n") + } + if err != nil { + return err + } + case BashCompSubdirsInDir: + _, err := fmt.Fprintf(w, " flags_with_completion+=(%q)\n", name) + + if len(value) == 1 { + ext := "__handle_subdirs_in_dir_flag " + value[0] + _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) + } else { + ext := "_filedir -d" + _, err = fmt.Fprintf(w, " flags_completion+=(%q)\n", ext) + } + if err != nil { + return err + } + } + } + return nil +} + +func writeShortFlag(flag *pflag.Flag, w io.Writer) error { + b := (len(flag.NoOptDefVal) > 0) + name := flag.Shorthand + format := " " + if !b { + format += "two_word_" + } + format += "flags+=(\"-%s\")\n" + if _, err := fmt.Fprintf(w, format, name); err != nil { + return err + } + return writeFlagHandler("-"+name, flag.Annotations, w) +} + +func writeFlag(flag *pflag.Flag, w io.Writer) error { + b := (len(flag.NoOptDefVal) > 0) + name := flag.Name + format := " flags+=(\"--%s" + if !b { + format += "=" + } + format += "\")\n" + if _, err := fmt.Fprintf(w, format, name); err != nil { + return err + } + return writeFlagHandler("--"+name, flag.Annotations, w) +} + +func writeLocalNonPersistentFlag(flag *pflag.Flag, w io.Writer) error { + b := (len(flag.NoOptDefVal) > 0) + name := flag.Name + format := " local_nonpersistent_flags+=(\"--%s" + if !b { + format += "=" + } + format += "\")\n" + _, err := fmt.Fprintf(w, format, name) + return err +} + +func writeFlags(cmd *Command, w io.Writer) error { + _, err := fmt.Fprintf(w, ` flags=() + two_word_flags=() + local_nonpersistent_flags=() + flags_with_completion=() + flags_completion=() + +`) + if err != nil { + return err + } + localNonPersistentFlags := cmd.LocalNonPersistentFlags() + var visitErr error + cmd.NonInheritedFlags().VisitAll(func(flag *pflag.Flag) { + if nonCompletableFlag(flag) { + return + } + if err := writeFlag(flag, w); err != nil { + visitErr = err + return + } + if len(flag.Shorthand) > 0 { + if err := writeShortFlag(flag, w); err != nil { + visitErr = err + return + } + } + if localNonPersistentFlags.Lookup(flag.Name) != nil { + if err := writeLocalNonPersistentFlag(flag, w); err != nil { + visitErr = err + return + } + } + }) + if visitErr != nil { + return visitErr + } + cmd.InheritedFlags().VisitAll(func(flag *pflag.Flag) { + if nonCompletableFlag(flag) { + return + } + if err := writeFlag(flag, w); err != nil { + visitErr = err + return + } + if len(flag.Shorthand) > 0 { + if err := writeShortFlag(flag, w); err != nil { + visitErr = err + return + } + } + }) + if visitErr != nil { + return visitErr + } + + _, err = fmt.Fprintf(w, "\n") + return err +} + +func writeRequiredFlag(cmd *Command, w io.Writer) error { + if _, err := fmt.Fprintf(w, " must_have_one_flag=()\n"); err != nil { + return err + } + flags := cmd.NonInheritedFlags() + var visitErr error + flags.VisitAll(func(flag *pflag.Flag) { + if nonCompletableFlag(flag) { + return + } + for key := range flag.Annotations { + switch key { + case BashCompOneRequiredFlag: + format := " must_have_one_flag+=(\"--%s" + b := (flag.Value.Type() == "bool") + if !b { + format += "=" + } + format += "\")\n" + if _, err := fmt.Fprintf(w, format, flag.Name); err != nil { + visitErr = err + return + } + + if len(flag.Shorthand) > 0 { + if _, err := fmt.Fprintf(w, " must_have_one_flag+=(\"-%s\")\n", flag.Shorthand); err != nil { + visitErr = err + return + } + } + } + } + }) + return visitErr +} + +func writeRequiredNouns(cmd *Command, w io.Writer) error { + if _, err := fmt.Fprintf(w, " must_have_one_noun=()\n"); err != nil { + return err + } + sort.Sort(sort.StringSlice(cmd.ValidArgs)) + for _, value := range cmd.ValidArgs { + if _, err := fmt.Fprintf(w, " must_have_one_noun+=(%q)\n", value); err != nil { + return err + } + } + return nil +} + +func writeArgAliases(cmd *Command, w io.Writer) error { + if _, err := fmt.Fprintf(w, " noun_aliases=()\n"); err != nil { + return err + } + sort.Sort(sort.StringSlice(cmd.ArgAliases)) + for _, value := range cmd.ArgAliases { + if _, err := fmt.Fprintf(w, " noun_aliases+=(%q)\n", value); err != nil { + return err + } + } + return nil +} + +func gen(cmd *Command, w io.Writer) error { + for _, c := range cmd.Commands() { + if !c.IsAvailableCommand() || c == cmd.helpCommand { + continue + } + if err := gen(c, w); err != nil { + return err + } + } + commandName := cmd.CommandPath() + commandName = strings.Replace(commandName, " ", "_", -1) + commandName = strings.Replace(commandName, ":", "__", -1) + if _, err := fmt.Fprintf(w, "_%s()\n{\n", commandName); err != nil { + return err + } + if _, err := fmt.Fprintf(w, " last_command=%q\n", commandName); err != nil { + return err + } + if err := writeCommands(cmd, w); err != nil { + return err + } + if err := writeFlags(cmd, w); err != nil { + return err + } + if err := writeRequiredFlag(cmd, w); err != nil { + return err + } + if err := writeRequiredNouns(cmd, w); err != nil { + return err + } + if err := writeArgAliases(cmd, w); err != nil { + return err + } + if _, err := fmt.Fprintf(w, "}\n\n"); err != nil { + return err + } + return nil +} + +func (cmd *Command) GenBashCompletion(w io.Writer) error { + if err := preamble(w, cmd.Name()); err != nil { + return err + } + if len(cmd.BashCompletionFunction) > 0 { + if _, err := fmt.Fprintf(w, "%s\n", cmd.BashCompletionFunction); err != nil { + return err + } + } + if err := gen(cmd, w); err != nil { + return err + } + return postscript(w, cmd.Name()) +} + +func nonCompletableFlag(flag *pflag.Flag) bool { + return flag.Hidden || len(flag.Deprecated) > 0 +} + +func (cmd *Command) GenBashCompletionFile(filename string) error { + outFile, err := os.Create(filename) + if err != nil { + return err + } + defer outFile.Close() + + return cmd.GenBashCompletion(outFile) +} + +// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag, if it exists. +func (cmd *Command) MarkFlagRequired(name string) error { + return MarkFlagRequired(cmd.Flags(), name) +} + +// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag, if it exists. +func (cmd *Command) MarkPersistentFlagRequired(name string) error { + return MarkFlagRequired(cmd.PersistentFlags(), name) +} + +// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag in the flag set, if it exists. +func MarkFlagRequired(flags *pflag.FlagSet, name string) error { + return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) +} + +// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func (cmd *Command) MarkFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(cmd.Flags(), name, extensions...) +} + +// MarkFlagCustom adds the BashCompCustom annotation to the named flag, if it exists. +// Generated bash autocompletion will call the bash function f for the flag. +func (cmd *Command) MarkFlagCustom(name string, f string) error { + return MarkFlagCustom(cmd.Flags(), name, f) +} + +// MarkPersistentFlagFilename adds the BashCompFilenameExt annotation to the named persistent flag, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func (cmd *Command) MarkPersistentFlagFilename(name string, extensions ...string) error { + return MarkFlagFilename(cmd.PersistentFlags(), name, extensions...) +} + +// MarkFlagFilename adds the BashCompFilenameExt annotation to the named flag in the flag set, if it exists. +// Generated bash autocompletion will select filenames for the flag, limiting to named extensions if provided. +func MarkFlagFilename(flags *pflag.FlagSet, name string, extensions ...string) error { + return flags.SetAnnotation(name, BashCompFilenameExt, extensions) +} + +// MarkFlagCustom adds the BashCompCustom annotation to the named flag in the flag set, if it exists. +// Generated bash autocompletion will call the bash function f for the flag. +func MarkFlagCustom(flags *pflag.FlagSet, name string, f string) error { + return flags.SetAnnotation(name, BashCompCustom, []string{f}) +} diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md similarity index 73% rename from Godeps/_workspace/src/github.com/spf13/cobra/bash_completions.md rename to vendor/github.com/spf13/cobra/bash_completions.md index 204704e..6e3b71f 100644 --- a/Godeps/_workspace/src/github.com/spf13/cobra/bash_completions.md +++ b/vendor/github.com/spf13/cobra/bash_completions.md @@ -80,7 +80,7 @@ The `BashCompletionFunction` option is really only valid/useful on the root comm In the above example "pod" was assumed to already be typed. But if you want `kubectl get [tab][tab]` to show a list of valid "nouns" you have to set them. Simplified code from `kubectl get` looks like: ```go -validArgs []string = { "pods", "nodes", "services", "replicationControllers" } +validArgs []string = { "pod", "node", "service", "replicationcontroller" } cmd := &cobra.Command{ Use: "get [(-o|--output=)json|yaml|template|...] (RESOURCE [NAME] | RESOURCE/NAME ...)", @@ -99,9 +99,34 @@ Notice we put the "ValidArgs" on the "get" subcommand. Doing so will give result ```bash # kubectl get [tab][tab] -nodes pods replicationControllers services +node pod replicationcontroller service ``` +## Plural form and shortcuts for nouns + +If your nouns have a number of aliases, you can define them alongside `ValidArgs` using `ArgAliases`: + +```go` +argAliases []string = { "pods", "nodes", "services", "svc", "replicationcontrollers", "rc" } + +cmd := &cobra.Command{ + ... + ValidArgs: validArgs, + ArgAliases: argAliases +} +``` + +The aliases are not shown to the user on tab completion, but they are accepted as valid nouns by +the completion algorithm if entered manually, e.g. in: + +```bash +# kubectl get rc [tab][tab] +backend frontend database +``` + +Note that without declaring `rc` as an alias, the completion algorithm would show the list of nouns +in this example again instead of the replication controllers. + ## Mark flags as required Most of the time completions will only show subcommands. But if a flag is required to make a subcommand work, you probably want it to show up when the user types [tab][tab]. Marking a flag as 'Required' is incredibly easy. @@ -147,3 +172,35 @@ hello.yml test.json ``` So while there are many other files in the CWD it only shows me subdirs and those with valid extensions. + +# Specifiy custom flag completion + +Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specifiy +a custom flag completion function with cobra.BashCompCustom: + +```go + annotation := make(map[string][]string) + annotation[cobra.BashCompFilenameExt] = []string{"__kubectl_get_namespaces"} + + flag := &pflag.Flag{ + Name: "namespace", + Usage: usage, + Annotations: annotation, + } + cmd.Flags().AddFlag(flag) +``` + +In addition add the `__handle_namespace_flag` implementation in the `BashCompletionFunction` +value, e.g.: + +```bash +__kubectl_get_namespaces() +{ + local template + template="{{ range .items }}{{ .metadata.name }} {{ end }}" + local kubectl_out + if kubectl_out=$(kubectl get -o template --template="${template}" namespace 2>/dev/null); then + COMPREPLY=( $( compgen -W "${kubectl_out}[*]" -- "$cur" ) ) + fi +} +``` diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go new file mode 100644 index 0000000..b39c715 --- /dev/null +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -0,0 +1,173 @@ +// Copyright © 2013 Steve Francia . +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Commands similar to git, go tools and other modern CLI tools +// inspired by go, go-Commander, gh and subcommand + +package cobra + +import ( + "fmt" + "io" + "reflect" + "strconv" + "strings" + "text/template" + "unicode" +) + +var templateFuncs = template.FuncMap{ + "trim": strings.TrimSpace, + "trimRightSpace": trimRightSpace, + "appendIfNotPresent": appendIfNotPresent, + "rpad": rpad, + "gt": Gt, + "eq": Eq, +} + +var initializers []func() + +// Automatic prefix matching can be a dangerous thing to automatically enable in CLI tools. +// Set this to true to enable it. +var EnablePrefixMatching = false + +// EnableCommandSorting controls sorting of the slice of commands, which is turned on by default. +// To disable sorting, set it to false. +var EnableCommandSorting = true + +// AddTemplateFunc adds a template function that's available to Usage and Help +// template generation. +func AddTemplateFunc(name string, tmplFunc interface{}) { + templateFuncs[name] = tmplFunc +} + +// AddTemplateFuncs adds multiple template functions availalble to Usage and +// Help template generation. +func AddTemplateFuncs(tmplFuncs template.FuncMap) { + for k, v := range tmplFuncs { + templateFuncs[k] = v + } +} + +// OnInitialize takes a series of func() arguments and appends them to a slice of func(). +func OnInitialize(y ...func()) { + initializers = append(initializers, y...) +} + +// Gt takes two types and checks whether the first type is greater than the second. In case of types Arrays, Chans, +// Maps and Slices, Gt will compare their lengths. Ints are compared directly while strings are first parsed as +// ints and then compared. +func Gt(a interface{}, b interface{}) bool { + var left, right int64 + av := reflect.ValueOf(a) + + switch av.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + left = int64(av.Len()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + left = av.Int() + case reflect.String: + left, _ = strconv.ParseInt(av.String(), 10, 64) + } + + bv := reflect.ValueOf(b) + + switch bv.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + right = int64(bv.Len()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + right = bv.Int() + case reflect.String: + right, _ = strconv.ParseInt(bv.String(), 10, 64) + } + + return left > right +} + +// Eq takes two types and checks whether they are equal. Supported types are int and string. Unsupported types will panic. +func Eq(a interface{}, b interface{}) bool { + av := reflect.ValueOf(a) + bv := reflect.ValueOf(b) + + switch av.Kind() { + case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice: + panic("Eq called on unsupported type") + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return av.Int() == bv.Int() + case reflect.String: + return av.String() == bv.String() + } + return false +} + +func trimRightSpace(s string) string { + return strings.TrimRightFunc(s, unicode.IsSpace) +} + +// appendIfNotPresent will append stringToAppend to the end of s, but only if it's not yet present in s. +func appendIfNotPresent(s, stringToAppend string) string { + if strings.Contains(s, stringToAppend) { + return s + } + return s + " " + stringToAppend +} + +// rpad adds padding to the right of a string. +func rpad(s string, padding int) string { + template := fmt.Sprintf("%%-%ds", padding) + return fmt.Sprintf(template, s) +} + +// tmpl executes the given template text on data, writing the result to w. +func tmpl(w io.Writer, text string, data interface{}) error { + t := template.New("top") + t.Funcs(templateFuncs) + template.Must(t.Parse(text)) + return t.Execute(w, data) +} + +// ld compares two strings and returns the levenshtein distance between them. +func ld(s, t string, ignoreCase bool) int { + if ignoreCase { + s = strings.ToLower(s) + t = strings.ToLower(t) + } + d := make([][]int, len(s)+1) + for i := range d { + d[i] = make([]int, len(t)+1) + } + for i := range d { + d[i][0] = i + } + for j := range d[0] { + d[0][j] = j + } + for j := 1; j <= len(t); j++ { + for i := 1; i <= len(s); i++ { + if s[i-1] == t[j-1] { + d[i][j] = d[i-1][j-1] + } else { + min := d[i-1][j] + if d[i][j-1] < min { + min = d[i][j-1] + } + if d[i-1][j-1] < min { + min = d[i-1][j-1] + } + d[i][j] = min + 1 + } + } + + } + return d[len(s)][len(t)] +} diff --git a/Godeps/_workspace/src/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go similarity index 59% rename from Godeps/_workspace/src/github.com/spf13/cobra/command.go rename to vendor/github.com/spf13/cobra/command.go index 74565c2..9ae9836 100644 --- a/Godeps/_workspace/src/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -20,11 +20,10 @@ import ( "fmt" "io" "os" - "runtime" + "path/filepath" + "sort" "strings" - "time" - "github.com/inconshreveable/mousetrap" flag "github.com/spf13/pflag" ) @@ -39,24 +38,35 @@ type Command struct { Use string // An array of aliases that can be used instead of the first word in Use. Aliases []string + // An array of command names for which this command will be suggested - similar to aliases but only suggests. + SuggestFor []string // The short description shown in the 'help' output. Short string // The long message shown in the 'help ' output. Long string // Examples of how to use the command Example string - // List of all valid non-flag arguments, used for bash completions *TODO* actually validate these + // List of all valid non-flag arguments that are accepted in bash completions ValidArgs []string + // List of aliases for ValidArgs. These are not suggested to the user in the bash + // completion, but accepted if entered manually. + ArgAliases []string // Custom functions used by the bash autocompletion generator BashCompletionFunction string // Is this command deprecated and should print this string when used? Deprecated string + // Is this command hidden and should NOT show up in the list of available commands? + Hidden bool // Full set of flags flags *flag.FlagSet // Set of flags childrens of this command will inherit pflags *flag.FlagSet // Flags that are declared specifically by this command (not inherited). lflags *flag.FlagSet + // SilenceErrors is an option to quiet errors down stream + SilenceErrors bool + // Silence Usage is an option to silence usage when an error occurs. + SilenceUsage bool // The *Run functions are executed in the following order: // * PersistentPreRun() // * PreRun() @@ -66,14 +76,26 @@ type Command struct { // All functions get the same args, the arguments after the command name // PersistentPreRun: children of this command will inherit and execute PersistentPreRun func(cmd *Command, args []string) + // PersistentPreRunE: PersistentPreRun but returns an error + PersistentPreRunE func(cmd *Command, args []string) error // PreRun: children of this command will not inherit. PreRun func(cmd *Command, args []string) + // PreRunE: PreRun but returns an error + PreRunE func(cmd *Command, args []string) error // Run: Typically the actual work function. Most commands will only implement this Run func(cmd *Command, args []string) + // RunE: Run but returns an error + RunE func(cmd *Command, args []string) error // PostRun: run after the Run command. PostRun func(cmd *Command, args []string) + // PostRunE: PostRun but returns an error + PostRunE func(cmd *Command, args []string) error // PersistentPostRun: children of this command will inherit and execute after PostRun PersistentPostRun func(cmd *Command, args []string) + // PersistentPostRunE: PersistentPostRun but returns an error + PersistentPostRunE func(cmd *Command, args []string) error + // DisableAutoGenTag remove + DisableAutoGenTag bool // Commands is the list of commands supported by this program. commands []*Command // Parent Command for this command @@ -82,19 +104,28 @@ type Command struct { commandsMaxUseLen int commandsMaxCommandPathLen int commandsMaxNameLen int + // is commands slice are sorted or not + commandsAreSorted bool flagErrorBuf *bytes.Buffer args []string // actual args parsed from flags - output *io.Writer // nil means stderr; use Out() method instead + output *io.Writer // out writer if set in SetOutput(w) usageFunc func(*Command) error // Usage can be defined by application usageTemplate string // Can be defined by Application helpTemplate string // Can be defined by Application helpFunc func(*Command, []string) // Help can be defined by application helpCommand *Command // The help command - helpFlagVal bool // The global normalization function that we can use on every pFlag set and children commands globNormFunc func(f *flag.FlagSet, name string) flag.NormalizedName + + // Disable the suggestions based on Levenshtein distance that go along with 'unknown command' messages + DisableSuggestions bool + // If displaying suggestions, allows to set the minimum levenshtein distance to display, must be > 0 + SuggestionsMinimumDistance int + + // Disable the flag parsing. If this is true all flags will be passed to the command as arguments. + DisableFlagParsing bool } // os.Args[1:] by default, if desired, can be overridden @@ -103,43 +134,23 @@ func (c *Command) SetArgs(a []string) { c.args = a } -func (c *Command) getOut(def io.Writer) io.Writer { - if c.output != nil { - return *c.output - } - - if c.HasParent() { - return c.parent.Out() - } else { - return def - } -} - -func (c *Command) Out() io.Writer { - return c.getOut(os.Stderr) -} - -func (c *Command) getOutOrStdout() io.Writer { - return c.getOut(os.Stdout) -} - // SetOutput sets the destination for usage and error messages. // If output is nil, os.Stderr is used. func (c *Command) SetOutput(output io.Writer) { c.output = &output } -// Usage can be defined by application +// Usage can be defined by application. func (c *Command) SetUsageFunc(f func(*Command) error) { c.usageFunc = f } -// Can be defined by Application +// Can be defined by Application. func (c *Command) SetUsageTemplate(s string) { c.usageTemplate = s } -// Can be defined by Application +// Can be defined by Application. func (c *Command) SetHelpFunc(f func(*Command, []string)) { c.helpFunc = f } @@ -148,7 +159,7 @@ func (c *Command) SetHelpCommand(cmd *Command) { c.helpCommand = cmd } -// Can be defined by Application +// Can be defined by Application. func (c *Command) SetHelpTemplate(s string) { c.helpTemplate = s } @@ -158,7 +169,6 @@ func (c *Command) SetHelpTemplate(s string) { func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { c.Flags().SetNormalizeFunc(n) c.PersistentFlags().SetNormalizeFunc(n) - c.LocalFlags().SetNormalizeFunc(n) c.globNormFunc = n for _, command := range c.commands { @@ -166,6 +176,26 @@ func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string } } +func (c *Command) OutOrStdout() io.Writer { + return c.getOut(os.Stdout) +} + +func (c *Command) OutOrStderr() io.Writer { + return c.getOut(os.Stderr) +} + +func (c *Command) getOut(def io.Writer) io.Writer { + if c.output != nil { + return *c.output + } + if c.HasParent() { + return c.parent.getOut(def) + } + return def +} + +// UsageFunc returns either the function set by SetUsageFunc for this command +// or a parent, or it returns a default usage function. func (c *Command) UsageFunc() (f func(*Command) error) { if c.usageFunc != nil { return c.usageFunc @@ -173,72 +203,86 @@ func (c *Command) UsageFunc() (f func(*Command) error) { if c.HasParent() { return c.parent.UsageFunc() - } else { - return func(c *Command) error { - err := tmpl(c.Out(), c.UsageTemplate(), c) - return err + } + return func(c *Command) error { + c.mergePersistentFlags() + err := tmpl(c.OutOrStderr(), c.UsageTemplate(), c) + if err != nil { + c.Println(err) } + return err } } + +// Usage puts out the usage for the command. +// Used when a user provides invalid input. +// Can be defined by user by overriding UsageFunc. +func (c *Command) Usage() error { + return c.UsageFunc()(c) +} + +// HelpFunc returns either the function set by SetHelpFunc for this command +// or a parent, or it returns a function with default help behavior. func (c *Command) HelpFunc() func(*Command, []string) { - if c.helpFunc != nil { - return c.helpFunc + cmd := c + for cmd != nil { + if cmd.helpFunc != nil { + return cmd.helpFunc + } + cmd = cmd.parent } - - if c.HasParent() { - return c.parent.HelpFunc() - } else { - return func(c *Command, args []string) { - if len(args) == 0 { - // Help called without any topic, calling on root - c.Root().Help() - return - } - - cmd, _, e := c.Root().Find(args) - if cmd == nil || e != nil { - c.Printf("Unknown help topic %#q.", args) - - c.Root().Usage() - } else { - err := cmd.Help() - if err != nil { - c.Println(err) - } - } + return func(*Command, []string) { + c.mergePersistentFlags() + err := tmpl(c.OutOrStdout(), c.HelpTemplate(), c) + if err != nil { + c.Println(err) } } } -var minUsagePadding int = 25 +// Help puts out the help for the command. +// Used when a user calls help [command]. +// Can be defined by user by overriding HelpFunc. +func (c *Command) Help() error { + c.HelpFunc()(c, []string{}) + return nil +} + +func (c *Command) UsageString() string { + tmpOutput := c.output + bb := new(bytes.Buffer) + c.SetOutput(bb) + c.Usage() + c.output = tmpOutput + return bb.String() +} + +var minUsagePadding = 25 func (c *Command) UsagePadding() int { if c.parent == nil || minUsagePadding > c.parent.commandsMaxUseLen { return minUsagePadding - } else { - return c.parent.commandsMaxUseLen } + return c.parent.commandsMaxUseLen } -var minCommandPathPadding int = 11 +var minCommandPathPadding = 11 // func (c *Command) CommandPathPadding() int { if c.parent == nil || minCommandPathPadding > c.parent.commandsMaxCommandPathLen { return minCommandPathPadding - } else { - return c.parent.commandsMaxCommandPathLen } + return c.parent.commandsMaxCommandPathLen } -var minNamePadding int = 11 +var minNamePadding = 11 func (c *Command) NamePadding() int { if c.parent == nil || minNamePadding > c.parent.commandsMaxNameLen { return minNamePadding - } else { - return c.parent.commandsMaxNameLen } + return c.parent.commandsMaxNameLen } func (c *Command) UsageTemplate() string { @@ -248,10 +292,9 @@ func (c *Command) UsageTemplate() string { if c.HasParent() { return c.parent.UsageTemplate() - } else { - return `{{ $cmd := . }} -Usage: {{if .Runnable}} - {{.UseLine}}{{if .HasFlags}} [flags]{{end}}{{end}}{{if .HasSubCommands}} + } + return `Usage:{{if .Runnable}} + {{if .HasAvailableFlags}}{{appendIfNotPresent .UseLine "[flags]"}}{{else}}{{.UseLine}}{{end}}{{end}}{{if .HasAvailableSubCommands}} {{ .CommandPath}} [command]{{end}}{{if gt .Aliases 0}} Aliases: @@ -259,23 +302,22 @@ Aliases: {{end}}{{if .HasExample}} Examples: -{{ .Example }}{{end}}{{ if .HasNonHelpSubCommands}} +{{ .Example }}{{end}}{{ if .HasAvailableSubCommands}} -Available Commands: {{range .Commands}}{{if (not .IsHelpCommand)}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasLocalFlags}} +Available Commands:{{range .Commands}}{{if .IsAvailableCommand}} + {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableLocalFlags}} Flags: -{{.LocalFlags.FlagUsages}}{{end}}{{ if .HasInheritedFlags}} +{{.LocalFlags.FlagUsages | trimRightSpace}}{{end}}{{ if .HasAvailableInheritedFlags}} Global Flags: -{{.InheritedFlags.FlagUsages}}{{end}}{{if .HasHelpSubCommands}} +{{.InheritedFlags.FlagUsages | trimRightSpace}}{{end}}{{if .HasHelpSubCommands}} -Additional help topics: {{range .Commands}}{{if .IsHelpCommand}} - {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}}{{end}}{{end}}{{ if .HasSubCommands }} +Additional help topics:{{range .Commands}}{{if .IsHelpCommand}} + {{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasAvailableSubCommands }} -Use "{{.CommandPath}} [command] --help" for more information about a command. -{{end}}` - } +Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}} +` } func (c *Command) HelpTemplate() string { @@ -285,14 +327,13 @@ func (c *Command) HelpTemplate() string { if c.HasParent() { return c.parent.HelpTemplate() - } else { - return `{{with or .Long .Short }}{{. | trim}}{{end}} -{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}} -` } + return `{{with or .Long .Short }}{{. | trim}} + +{{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` } -// Really only used when casting a command to a commander +// Really only used when casting a command to a commander. func (c *Command) resetChildrensParents() { for _, x := range c.commands { x.parent = c @@ -420,28 +461,83 @@ func (c *Command) Find(args []string) (*Command, []string, error) { if !commandFound.HasSubCommands() { return commandFound, a, nil } + // root command with subcommands, do subcommand checking if commandFound == c && len(argsWOflags) > 0 { - return commandFound, a, fmt.Errorf("unknown command %q for %q", argsWOflags[0], commandFound.CommandPath()) + suggestionsString := "" + if !c.DisableSuggestions { + if c.SuggestionsMinimumDistance <= 0 { + c.SuggestionsMinimumDistance = 2 + } + if suggestions := c.SuggestionsFor(argsWOflags[0]); len(suggestions) > 0 { + suggestionsString += "\n\nDid you mean this?\n" + for _, s := range suggestions { + suggestionsString += fmt.Sprintf("\t%v\n", s) + } + } + } + return commandFound, a, fmt.Errorf("unknown command %q for %q%s", argsWOflags[0], commandFound.CommandPath(), suggestionsString) } return commandFound, a, nil } +func (c *Command) SuggestionsFor(typedName string) []string { + suggestions := []string{} + for _, cmd := range c.commands { + if cmd.IsAvailableCommand() { + levenshteinDistance := ld(typedName, cmd.Name(), true) + suggestByLevenshtein := levenshteinDistance <= c.SuggestionsMinimumDistance + suggestByPrefix := strings.HasPrefix(strings.ToLower(cmd.Name()), strings.ToLower(typedName)) + if suggestByLevenshtein || suggestByPrefix { + suggestions = append(suggestions, cmd.Name()) + } + for _, explicitSuggestion := range cmd.SuggestFor { + if strings.EqualFold(typedName, explicitSuggestion) { + suggestions = append(suggestions, cmd.Name()) + } + } + } + } + return suggestions +} + +func (c *Command) VisitParents(fn func(*Command)) { + var traverse func(*Command) *Command + + traverse = func(x *Command) *Command { + if x != c { + fn(x) + } + if x.HasParent() { + return traverse(x.parent) + } + return x + } + traverse(c) +} + func (c *Command) Root() *Command { var findRoot func(*Command) *Command findRoot = func(x *Command) *Command { if x.HasParent() { return findRoot(x.parent) - } else { - return x } + return x } return findRoot(c) } +// ArgsLenAtDash will return the length of f.Args at the moment when a -- was +// found during arg parsing. This allows your program to know which args were +// before the -- and which came after. (Description from +// https://godoc.org/github.com/spf13/pflag#FlagSet.ArgsLenAtDash). +func (c *Command) ArgsLenAtDash() int { + return c.Flags().ArgsLenAtDash() +} + func (c *Command) execute(a []string) (err error) { if c == nil { return fmt.Errorf("Called Execute() on a nil Command") @@ -451,36 +547,75 @@ func (c *Command) execute(a []string) (err error) { c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated) } + // initialize help flag as the last point possible to allow for user + // overriding + c.initHelpFlag() + err = c.ParseFlags(a) if err != nil { return err } // If help is called, regardless of other flags, return we want help - // Also say we need help if c.Run is nil. - if c.helpFlagVal || !c.Runnable() { + // Also say we need help if the command isn't runnable. + helpVal, err := c.Flags().GetBool("help") + if err != nil { + // should be impossible to get here as we always declare a help + // flag in initHelpFlag() + c.Println("\"help\" flag declared as non-bool. Please correct your code") + return err + } + + if helpVal || !c.Runnable() { return flag.ErrHelp } c.preRun() + argWoFlags := c.Flags().Args() + if c.DisableFlagParsing { + argWoFlags = a + } for p := c; p != nil; p = p.Parent() { - if p.PersistentPreRun != nil { + if p.PersistentPreRunE != nil { + if err := p.PersistentPreRunE(c, argWoFlags); err != nil { + return err + } + break + } else if p.PersistentPreRun != nil { p.PersistentPreRun(c, argWoFlags) break } } - if c.PreRun != nil { + if c.PreRunE != nil { + if err := c.PreRunE(c, argWoFlags); err != nil { + return err + } + } else if c.PreRun != nil { c.PreRun(c, argWoFlags) } - c.Run(c, argWoFlags) - - if c.PostRun != nil { + if c.RunE != nil { + if err := c.RunE(c, argWoFlags); err != nil { + return err + } + } else { + c.Run(c, argWoFlags) + } + if c.PostRunE != nil { + if err := c.PostRunE(c, argWoFlags); err != nil { + return err + } + } else if c.PostRun != nil { c.PostRun(c, argWoFlags) } for p := c; p != nil; p = p.Parent() { - if p.PersistentPostRun != nil { + if p.PersistentPostRunE != nil { + if err := p.PersistentPostRunE(c, argWoFlags); err != nil { + return err + } + break + } else if p.PersistentPostRun != nil { p.PersistentPostRun(c, argWoFlags) break } @@ -502,36 +637,38 @@ func (c *Command) errorMsgFromParse() string { if len(x) > 0 { return x[0] - } else { - return "" } + return "" } // Call execute to use the args (os.Args[1:] by default) // and run through the command tree finding appropriate matches // for commands and then corresponding flags. -func (c *Command) Execute() (err error) { +func (c *Command) Execute() error { + _, err := c.ExecuteC() + return err +} + +func (c *Command) ExecuteC() (cmd *Command, err error) { // Regardless of what command execute is called on, run on Root only if c.HasParent() { - return c.Root().Execute() + return c.Root().ExecuteC() } - if EnableWindowsMouseTrap && runtime.GOOS == "windows" { - if mousetrap.StartedByExplorer() { - c.Print(MousetrapHelpText) - time.Sleep(5 * time.Second) - os.Exit(1) - } + // windows hook + if preExecHookFn != nil { + preExecHookFn(c) } // initialize help as the last point possible to allow for user // overriding - c.initHelp() + c.initHelpCmd() var args []string - if len(c.args) == 0 { + // Workaround FAIL with "go test -v" or "cobra.test -test.v", see #155 + if c.args == nil && filepath.Base(os.Args[0]) != "cobra.test" { args = os.Args[1:] } else { args = c.args @@ -543,25 +680,44 @@ func (c *Command) Execute() (err error) { if cmd != nil { c = cmd } - c.Println("Error:", err.Error()) - c.Printf("Run '%v --help' for usage.\n", c.CommandPath()) - return err + if !c.SilenceErrors { + c.Println("Error:", err.Error()) + c.Printf("Run '%v --help' for usage.\n", c.CommandPath()) + } + return c, err } - err = cmd.execute(flags) if err != nil { + // Always show help if requested, even if SilenceErrors is in + // effect if err == flag.ErrHelp { - cmd.Help() - return nil + cmd.HelpFunc()(cmd, args) + return cmd, nil } - c.Println(cmd.UsageString()) - c.Println("Error:", err.Error()) - } - return + // If root command has SilentErrors flagged, + // all subcommands should respect it + if !cmd.SilenceErrors && !c.SilenceErrors { + c.Println("Error:", err.Error()) + } + + // If root command has SilentUsage flagged, + // all subcommands should respect it + if !cmd.SilenceUsage && !c.SilenceUsage { + c.Println(cmd.UsageString()) + } + return cmd, err + } + return cmd, nil } -func (c *Command) initHelp() { +func (c *Command) initHelpFlag() { + if c.Flags().Lookup("help") == nil { + c.Flags().BoolP("help", "h", false, "help for "+c.Name()) + } +} + +func (c *Command) initHelpCmd() { if c.helpCommand == nil { if !c.HasSubCommands() { return @@ -572,22 +728,43 @@ func (c *Command) initHelp() { Short: "Help about any command", Long: `Help provides help for any command in the application. Simply type ` + c.Name() + ` help [path to command] for full details.`, - Run: c.HelpFunc(), PersistentPreRun: func(cmd *Command, args []string) {}, PersistentPostRun: func(cmd *Command, args []string) {}, + + Run: func(c *Command, args []string) { + cmd, _, e := c.Root().Find(args) + if cmd == nil || e != nil { + c.Printf("Unknown help topic %#q.", args) + c.Root().Usage() + } else { + cmd.Help() + } + }, } } c.AddCommand(c.helpCommand) } -// Used for testing +// Used for testing. func (c *Command) ResetCommands() { c.commands = nil c.helpCommand = nil } -//Commands returns a slice of child commands. +// Sorts commands by their names. +type commandSorterByName []*Command + +func (c commandSorterByName) Len() int { return len(c) } +func (c commandSorterByName) Swap(i, j int) { c[i], c[j] = c[j], c[i] } +func (c commandSorterByName) Less(i, j int) bool { return c[i].Name() < c[j].Name() } + +// Commands returns a sorted slice of child commands. func (c *Command) Commands() []*Command { + // do not sort commands if it already sorted or sorting was disabled + if EnableCommandSorting && !c.commandsAreSorted { + sort.Sort(commandSorterByName(c.commands)) + c.commandsAreSorted = true + } return c.commands } @@ -611,15 +788,16 @@ func (c *Command) AddCommand(cmds ...*Command) { if nameLen > c.commandsMaxNameLen { c.commandsMaxNameLen = nameLen } - // If glabal normalization function exists, update all children + // If global normalization function exists, update all children if c.globNormFunc != nil { x.SetGlobalNormalizationFunc(c.globNormFunc) } c.commands = append(c.commands, x) + c.commandsAreSorted = false } } -// AddCommand removes one or more commands from a parent command. +// RemoveCommand removes one or more commands from a parent command. func (c *Command) RemoveCommand(cmds ...*Command) { commands := []*Command{} main: @@ -653,50 +831,23 @@ main: } } -// Convenience method to Print to the defined output +// Print is a convenience method to Print to the defined output, fallback to Stderr if not set. func (c *Command) Print(i ...interface{}) { - fmt.Fprint(c.Out(), i...) + fmt.Fprint(c.OutOrStderr(), i...) } -// Convenience method to Println to the defined output +// Println is a convenience method to Println to the defined output, fallback to Stderr if not set. func (c *Command) Println(i ...interface{}) { str := fmt.Sprintln(i...) c.Print(str) } -// Convenience method to Printf to the defined output +// Printf is a convenience method to Printf to the defined output, fallback to Stderr if not set. func (c *Command) Printf(format string, i ...interface{}) { str := fmt.Sprintf(format, i...) c.Print(str) } -// Output the usage for the command -// Used when a user provides invalid input -// Can be defined by user by overriding UsageFunc -func (c *Command) Usage() error { - c.mergePersistentFlags() - err := c.UsageFunc()(c) - return err -} - -// Output the help for the command -// Used when a user calls help [command] -// by the default HelpFunc in the commander -func (c *Command) Help() error { - c.mergePersistentFlags() - err := tmpl(c.getOutOrStdout(), c.HelpTemplate(), c) - return err -} - -func (c *Command) UsageString() string { - tmpOutput := c.output - bb := new(bytes.Buffer) - c.SetOutput(bb) - c.Usage() - c.output = tmpOutput - return bb.String() -} - // CommandPath returns the full path to this command. func (c *Command) CommandPath() string { str := c.Name() @@ -708,7 +859,7 @@ func (c *Command) CommandPath() string { return str } -//The full usage for a given command (including parents) +// UseLine puts out the full usage for a given command (including parents). func (c *Command) UseLine() string { str := "" if c.HasParent() { @@ -718,7 +869,7 @@ func (c *Command) UseLine() string { } // For use in determining which flags have been assigned to which commands -// and which persist +// and which persist. func (c *Command) DebugFlags() { c.Println("DebugFlags called on", c.Name()) var debugflags func(*Command) @@ -775,7 +926,7 @@ func (c *Command) Name() string { return name } -// Determine if a given string is an alias of the command. +// HasAlias determines if a given string is an alias of the command. func (c *Command) HasAlias(s string) bool { for _, a := range c.Aliases { if a == s { @@ -793,66 +944,100 @@ func (c *Command) HasExample() bool { return len(c.Example) > 0 } -// Determine if the command is itself runnable +// Runnable determines if the command is itself runnable. func (c *Command) Runnable() bool { - return c.Run != nil + return c.Run != nil || c.RunE != nil } -// Determine if the command has children commands +// HasSubCommands determines if the command has children commands. func (c *Command) HasSubCommands() bool { return len(c.commands) > 0 } -func (c *Command) IsHelpCommand() bool { - if c.Runnable() { +// IsAvailableCommand determines if a command is available as a non-help command +// (this includes all non deprecated/hidden commands). +func (c *Command) IsAvailableCommand() bool { + if len(c.Deprecated) != 0 || c.Hidden { return false } + + if c.HasParent() && c.Parent().helpCommand == c { + return false + } + + if c.Runnable() || c.HasAvailableSubCommands() { + return true + } + + return false +} + +// IsHelpCommand determines if a command is a 'help' command; a help command is +// determined by the fact that it is NOT runnable/hidden/deprecated, and has no +// sub commands that are runnable/hidden/deprecated. +func (c *Command) IsHelpCommand() bool { + + // if a command is runnable, deprecated, or hidden it is not a 'help' command + if c.Runnable() || len(c.Deprecated) != 0 || c.Hidden { + return false + } + + // if any non-help sub commands are found, the command is not a 'help' command for _, sub := range c.commands { - if len(sub.Deprecated) != 0 { - continue - } if !sub.IsHelpCommand() { return false } } + + // the command either has no sub commands, or no non-help sub commands return true } +// HasHelpSubCommands determines if a command has any available 'help' sub commands +// that need to be shown in the usage/help default template under 'additional help +// topics'. func (c *Command) HasHelpSubCommands() bool { + + // return true on the first found available 'help' sub command for _, sub := range c.commands { - if len(sub.Deprecated) != 0 { - continue - } if sub.IsHelpCommand() { return true } } + + // the command either has no sub commands, or no available 'help' sub commands return false } -func (c *Command) HasNonHelpSubCommands() bool { +// HasAvailableSubCommands determines if a command has available sub commands that +// need to be shown in the usage/help default template under 'available commands'. +func (c *Command) HasAvailableSubCommands() bool { + + // return true on the first found available (non deprecated/help/hidden) + // sub command for _, sub := range c.commands { - if len(sub.Deprecated) != 0 { - continue - } - if !sub.IsHelpCommand() { + if sub.IsAvailableCommand() { return true } } + + // the command either has no sub comamnds, or no available (non deprecated/help/hidden) + // sub commands return false } -// Determine if the command is a child command +// HasParent determines if the command is a child command. func (c *Command) HasParent() bool { return c.parent != nil } -// GlobalNormalizationFunc returns the global normalization function or nil if doesn't exists +// GlobalNormalizationFunc returns the global normalization function or nil if doesn't exists. func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName { return c.globNormFunc } -// Get the complete FlagSet that applies to this command (local and persistent declared here and by all parents) +// Flage returns the complete FlagSet that applies +// to this command (local and persistent declared here and by all parents). func (c *Command) Flags() *flag.FlagSet { if c.flags == nil { c.flags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) @@ -860,12 +1045,24 @@ func (c *Command) Flags() *flag.FlagSet { c.flagErrorBuf = new(bytes.Buffer) } c.flags.SetOutput(c.flagErrorBuf) - c.PersistentFlags().BoolVarP(&c.helpFlagVal, "help", "h", false, "help for "+c.Name()) } return c.flags } -// Get the local FlagSet specifically set in the current command +// LocalNonPersistentFlags are flags specific to this command which will NOT persist to subcommands. +func (c *Command) LocalNonPersistentFlags() *flag.FlagSet { + persistentFlags := c.PersistentFlags() + + out := flag.NewFlagSet(c.Name(), flag.ContinueOnError) + c.LocalFlags().VisitAll(func(f *flag.Flag) { + if persistentFlags.Lookup(f.Name) == nil { + out.AddFlag(f) + } + }) + return out +} + +// LocalFlags returns the local FlagSet specifically set in the current command. func (c *Command) LocalFlags() *flag.FlagSet { c.mergePersistentFlags() @@ -873,10 +1070,17 @@ func (c *Command) LocalFlags() *flag.FlagSet { c.lflags.VisitAll(func(f *flag.Flag) { local.AddFlag(f) }) + if !c.HasParent() { + flag.CommandLine.VisitAll(func(f *flag.Flag) { + if local.Lookup(f.Name) == nil { + local.AddFlag(f) + } + }) + } return local } -// All Flags which were inherited from parents commands +// InheritedFlags returns all flags which were inherited from parents commands. func (c *Command) InheritedFlags() *flag.FlagSet { c.mergePersistentFlags() @@ -905,12 +1109,12 @@ func (c *Command) InheritedFlags() *flag.FlagSet { return inherited } -// All Flags which were not inherited from parent commands +// NonInheritedFlags returns all flags which were not inherited from parent commands. func (c *Command) NonInheritedFlags() *flag.FlagSet { return c.LocalFlags() } -// Get the Persistent FlagSet specifically set in the current command +// PersistentFlags returns the persistent FlagSet specifically set in the current command. func (c *Command) PersistentFlags() *flag.FlagSet { if c.pflags == nil { c.pflags = flag.NewFlagSet(c.Name(), flag.ContinueOnError) @@ -922,7 +1126,7 @@ func (c *Command) PersistentFlags() *flag.FlagSet { return c.pflags } -// For use in testing +// ResetFlags is used in testing. func (c *Command) ResetFlags() { c.flagErrorBuf = new(bytes.Buffer) c.flagErrorBuf.Reset() @@ -932,26 +1136,50 @@ func (c *Command) ResetFlags() { c.pflags.SetOutput(c.flagErrorBuf) } -// Does the command contain any flags (local plus persistent from the entire structure) +// Does the command contain any flags (local plus persistent from the entire structure). func (c *Command) HasFlags() bool { return c.Flags().HasFlags() } -// Does the command contain persistent flags +// Does the command contain persistent flags. func (c *Command) HasPersistentFlags() bool { return c.PersistentFlags().HasFlags() } -// Does the command has flags specifically declared locally +// Does the command has flags specifically declared locally. func (c *Command) HasLocalFlags() bool { return c.LocalFlags().HasFlags() } +// Does the command have flags inherited from its parent command. func (c *Command) HasInheritedFlags() bool { return c.InheritedFlags().HasFlags() } -// Climbs up the command tree looking for matching flag +// Does the command contain any flags (local plus persistent from the entire +// structure) which are not hidden or deprecated. +func (c *Command) HasAvailableFlags() bool { + return c.Flags().HasAvailableFlags() +} + +// Does the command contain persistent flags which are not hidden or deprecated. +func (c *Command) HasAvailablePersistentFlags() bool { + return c.PersistentFlags().HasAvailableFlags() +} + +// Does the command has flags specifically declared locally which are not hidden +// or deprecated. +func (c *Command) HasAvailableLocalFlags() bool { + return c.LocalFlags().HasAvailableFlags() +} + +// Does the command have flags inherited from its parent command which are +// not hidden or deprecated. +func (c *Command) HasAvailableInheritedFlags() bool { + return c.InheritedFlags().HasAvailableFlags() +} + +// Flag climbs up the command tree looking for matching flag. func (c *Command) Flag(name string) (flag *flag.Flag) { flag = c.Flags().Lookup(name) @@ -962,7 +1190,7 @@ func (c *Command) Flag(name string) (flag *flag.Flag) { return } -// recursively find matching persistent flag +// Recursively find matching persistent flag. func (c *Command) persistentFlag(name string) (flag *flag.Flag) { if c.HasPersistentFlags() { flag = c.PersistentFlags().Lookup(name) @@ -974,13 +1202,17 @@ func (c *Command) persistentFlag(name string) (flag *flag.Flag) { return } -// Parses persistent flag tree & local flags +// ParseFlags parses persistent flag tree and local flags. func (c *Command) ParseFlags(args []string) (err error) { + if c.DisableFlagParsing { + return nil + } c.mergePersistentFlags() err = c.Flags().Parse(args) return } +// Parent returns a commands parent command. func (c *Command) Parent() *Command { return c.parent } diff --git a/vendor/github.com/spf13/cobra/command_notwin.go b/vendor/github.com/spf13/cobra/command_notwin.go new file mode 100644 index 0000000..6159c1c --- /dev/null +++ b/vendor/github.com/spf13/cobra/command_notwin.go @@ -0,0 +1,5 @@ +// +build !windows + +package cobra + +var preExecHookFn func(*Command) diff --git a/vendor/github.com/spf13/cobra/command_win.go b/vendor/github.com/spf13/cobra/command_win.go new file mode 100644 index 0000000..4b0eaa1 --- /dev/null +++ b/vendor/github.com/spf13/cobra/command_win.go @@ -0,0 +1,26 @@ +// +build windows + +package cobra + +import ( + "os" + "time" + + "github.com/inconshreveable/mousetrap" +) + +var preExecHookFn = preExecHook + +// enables an information splash screen on Windows if the CLI is started from explorer.exe. +var MousetrapHelpText string = `This is a command line tool + +You need to open cmd.exe and run it from there. +` + +func preExecHook(c *Command) { + if mousetrap.StartedByExplorer() { + c.Print(MousetrapHelpText) + time.Sleep(5 * time.Second) + os.Exit(1) + } +} diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml new file mode 100644 index 0000000..0a7c136 --- /dev/null +++ b/vendor/github.com/spf13/pflag/.travis.yml @@ -0,0 +1,21 @@ +sudo: false + +language: go + +go: + - 1.5.4 + - 1.6.3 + - 1.7 + - tip + +matrix: + allow_failures: + - go: tip +install: + - go get github.com/golang/lint/golint + - export PATH=$GOPATH/bin:$PATH + - go install ./... + +script: + - verify/all.sh -v + - go test ./... diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/LICENSE b/vendor/github.com/spf13/pflag/LICENSE similarity index 100% rename from Godeps/_workspace/src/github.com/spf13/pflag/LICENSE rename to vendor/github.com/spf13/pflag/LICENSE diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/README.md b/vendor/github.com/spf13/pflag/README.md similarity index 76% rename from Godeps/_workspace/src/github.com/spf13/pflag/README.md rename to vendor/github.com/spf13/pflag/README.md index deee931..08ad945 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/README.md +++ b/vendor/github.com/spf13/pflag/README.md @@ -85,7 +85,7 @@ fmt.Println("flagvar has value ", flagvar) ``` There are helpers function to get values later if you have the FlagSet but -it was difficult to keep up with all of the the flag pointers in your code. +it was difficult to keep up with all of the flag pointers in your code. If you have a pflag.FlagSet with a flag called 'flagname' of type int you can use GetInt() to get the int value. But notice that 'flagname' must exist and it must be an int. GetString("flagname") will fail. @@ -216,6 +216,53 @@ func aliasNormalizeFunc(f *pflag.FlagSet, name string) pflag.NormalizedName { myFlagSet.SetNormalizeFunc(aliasNormalizeFunc) ``` +## Deprecating a flag or its shorthand +It is possible to deprecate a flag, or just its shorthand. Deprecating a flag/shorthand hides it from help text and prints a usage message when the deprecated flag/shorthand is used. + +**Example #1**: You want to deprecate a flag named "badflag" as well as inform the users what flag they should use instead. +```go +// deprecate a flag by specifying its name and a usage message +flags.MarkDeprecated("badflag", "please use --good-flag instead") +``` +This hides "badflag" from help text, and prints `Flag --badflag has been deprecated, please use --good-flag instead` when "badflag" is used. + +**Example #2**: You want to keep a flag name "noshorthandflag" but deprecate its shortname "n". +```go +// deprecate a flag shorthand by specifying its flag name and a usage message +flags.MarkShorthandDeprecated("noshorthandflag", "please use --noshorthandflag only") +``` +This hides the shortname "n" from help text, and prints `Flag shorthand -n has been deprecated, please use --noshorthandflag only` when the shorthand "n" is used. + +Note that usage message is essential here, and it should not be empty. + +## Hidden flags +It is possible to mark a flag as hidden, meaning it will still function as normal, however will not show up in usage/help text. + +**Example**: You have a flag named "secretFlag" that you need for internal use only and don't want it showing up in help text, or for its usage text to be available. +```go +// hide a flag by specifying its name +flags.MarkHidden("secretFlag") +``` + +## Supporting Go flags when using pflag +In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary +to support flags defined by third-party dependencies (e.g. `golang/glog`). + +**Example**: You want to add the Go flags to the `CommandLine` flagset +```go +import ( + goflag "flag" + flag "github.com/spf13/pflag" +) + +var ip *int = flag.Int("flagname", 1234, "help message for flagname") + +func main() { + flag.CommandLine.AddGoFlagSet(goflag.CommandLine) + flag.Parse() +} +``` + ## More info You can see the full reference documentation of the pflag package diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/bool.go b/vendor/github.com/spf13/pflag/bool.go similarity index 84% rename from Godeps/_workspace/src/github.com/spf13/pflag/bool.go rename to vendor/github.com/spf13/pflag/bool.go index 04c9b5a..c4c5c0b 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/bool.go +++ b/vendor/github.com/spf13/pflag/bool.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // optional interface to indicate boolean flags that can be // supplied without "=value" text @@ -30,7 +27,7 @@ func (b *boolValue) Type() string { return "bool" } -func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } +func (b *boolValue) String() string { return strconv.FormatBool(bool(*b)) } func (b *boolValue) IsBoolFlag() bool { return true } @@ -53,7 +50,7 @@ func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { f.BoolVarP(p, name, "", value, usage) } -// Like BoolVar, but accepts a shorthand letter that can be used after a single dash. +// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) BoolVarP(p *bool, name, shorthand string, value bool, usage string) { flag := f.VarPF(newBoolValue(value, p), name, shorthand, usage) flag.NoOptDefVal = "true" @@ -65,7 +62,7 @@ func BoolVar(p *bool, name string, value bool, usage string) { BoolVarP(p, name, "", value, usage) } -// Like BoolVar, but accepts a shorthand letter that can be used after a single dash. +// BoolVarP is like BoolVar, but accepts a shorthand letter that can be used after a single dash. func BoolVarP(p *bool, name, shorthand string, value bool, usage string) { flag := CommandLine.VarPF(newBoolValue(value, p), name, shorthand, usage) flag.NoOptDefVal = "true" @@ -77,7 +74,7 @@ func (f *FlagSet) Bool(name string, value bool, usage string) *bool { return f.BoolP(name, "", value, usage) } -// Like Bool, but accepts a shorthand letter that can be used after a single dash. +// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) BoolP(name, shorthand string, value bool, usage string) *bool { p := new(bool) f.BoolVarP(p, name, shorthand, value, usage) @@ -90,7 +87,7 @@ func Bool(name string, value bool, usage string) *bool { return BoolP(name, "", value, usage) } -// Like Bool, but accepts a shorthand letter that can be used after a single dash. +// BoolP is like Bool, but accepts a shorthand letter that can be used after a single dash. func BoolP(name, shorthand string, value bool, usage string) *bool { b := CommandLine.BoolP(name, shorthand, value, usage) return b diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go new file mode 100644 index 0000000..d22be41 --- /dev/null +++ b/vendor/github.com/spf13/pflag/count.go @@ -0,0 +1,94 @@ +package pflag + +import "strconv" + +// -- count Value +type countValue int + +func newCountValue(val int, p *int) *countValue { + *p = val + return (*countValue)(p) +} + +func (i *countValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + // -1 means that no specific value was passed, so increment + if v == -1 { + *i = countValue(*i + 1) + } else { + *i = countValue(v) + } + return err +} + +func (i *countValue) Type() string { + return "count" +} + +func (i *countValue) String() string { return strconv.Itoa(int(*i)) } + +func countConv(sval string) (interface{}, error) { + i, err := strconv.Atoi(sval) + if err != nil { + return nil, err + } + return i, nil +} + +// GetCount return the int value of a flag with the given name +func (f *FlagSet) GetCount(name string) (int, error) { + val, err := f.getFlagType(name, "count", countConv) + if err != nil { + return 0, err + } + return val.(int), nil +} + +// CountVar defines a count flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +// A count flag will add 1 to its value evey time it is found on the command line +func (f *FlagSet) CountVar(p *int, name string, usage string) { + f.CountVarP(p, name, "", usage) +} + +// CountVarP is like CountVar only take a shorthand for the flag name. +func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) { + flag := f.VarPF(newCountValue(0, p), name, shorthand, usage) + flag.NoOptDefVal = "-1" +} + +// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set +func CountVar(p *int, name string, usage string) { + CommandLine.CountVar(p, name, usage) +} + +// CountVarP is like CountVar only take a shorthand for the flag name. +func CountVarP(p *int, name, shorthand string, usage string) { + CommandLine.CountVarP(p, name, shorthand, usage) +} + +// Count defines a count flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +// A count flag will add 1 to its value evey time it is found on the command line +func (f *FlagSet) Count(name string, usage string) *int { + p := new(int) + f.CountVarP(p, name, "", usage) + return p +} + +// CountP is like Count only takes a shorthand for the flag name. +func (f *FlagSet) CountP(name, shorthand string, usage string) *int { + p := new(int) + f.CountVarP(p, name, shorthand, usage) + return p +} + +// Count like Count only the flag is placed on the CommandLine isntead of a given flag set +func Count(name string, usage string) *int { + return CommandLine.CountP(name, "", usage) +} + +// CountP is like Count only takes a shorthand for the flag name. +func CountP(name, shorthand string, usage string) *int { + return CommandLine.CountP(name, shorthand, usage) +} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/duration.go b/vendor/github.com/spf13/pflag/duration.go similarity index 87% rename from Godeps/_workspace/src/github.com/spf13/pflag/duration.go rename to vendor/github.com/spf13/pflag/duration.go index 382ffd3..e9debef 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/duration.go +++ b/vendor/github.com/spf13/pflag/duration.go @@ -43,7 +43,7 @@ func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration f.VarP(newDurationValue(value, p), name, "", usage) } -// Like DurationVar, but accepts a shorthand letter that can be used after a single dash. +// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { f.VarP(newDurationValue(value, p), name, shorthand, usage) } @@ -54,7 +54,7 @@ func DurationVar(p *time.Duration, name string, value time.Duration, usage strin CommandLine.VarP(newDurationValue(value, p), name, "", usage) } -// Like DurationVar, but accepts a shorthand letter that can be used after a single dash. +// DurationVarP is like DurationVar, but accepts a shorthand letter that can be used after a single dash. func DurationVarP(p *time.Duration, name, shorthand string, value time.Duration, usage string) { CommandLine.VarP(newDurationValue(value, p), name, shorthand, usage) } @@ -67,7 +67,7 @@ func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time return p } -// Like Duration, but accepts a shorthand letter that can be used after a single dash. +// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { p := new(time.Duration) f.DurationVarP(p, name, shorthand, value, usage) @@ -80,7 +80,7 @@ func Duration(name string, value time.Duration, usage string) *time.Duration { return CommandLine.DurationP(name, "", value, usage) } -// Like Duration, but accepts a shorthand letter that can be used after a single dash. +// DurationP is like Duration, but accepts a shorthand letter that can be used after a single dash. func DurationP(name, shorthand string, value time.Duration, usage string) *time.Duration { return CommandLine.DurationP(name, shorthand, value, usage) } diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go similarity index 56% rename from Godeps/_workspace/src/github.com/spf13/pflag/flag.go rename to vendor/github.com/spf13/pflag/flag.go index 865259b..b0b0d46 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -3,98 +3,98 @@ // license that can be found in the LICENSE file. /* - pflag is a drop-in replacement for Go's flag package, implementing - POSIX/GNU-style --flags. +Package pflag is a drop-in replacement for Go's flag package, implementing +POSIX/GNU-style --flags. - pflag is compatible with the GNU extensions to the POSIX recommendations - for command-line options. See - http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html +pflag is compatible with the GNU extensions to the POSIX recommendations +for command-line options. See +http://www.gnu.org/software/libc/manual/html_node/Argument-Syntax.html - Usage: +Usage: - pflag is a drop-in replacement of Go's native flag package. If you import - pflag under the name "flag" then all code should continue to function - with no changes. +pflag is a drop-in replacement of Go's native flag package. If you import +pflag under the name "flag" then all code should continue to function +with no changes. - import flag "github.com/ogier/pflag" + import flag "github.com/ogier/pflag" There is one exception to this: if you directly instantiate the Flag struct - there is one more field "Shorthand" that you will need to set. - Most code never instantiates this struct directly, and instead uses - functions such as String(), BoolVar(), and Var(), and is therefore - unaffected. +there is one more field "Shorthand" that you will need to set. +Most code never instantiates this struct directly, and instead uses +functions such as String(), BoolVar(), and Var(), and is therefore +unaffected. - Define flags using flag.String(), Bool(), Int(), etc. +Define flags using flag.String(), Bool(), Int(), etc. - This declares an integer flag, -flagname, stored in the pointer ip, with type *int. - var ip = flag.Int("flagname", 1234, "help message for flagname") - If you like, you can bind the flag to a variable using the Var() functions. - var flagvar int - func init() { - flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") - } - Or you can create custom flags that satisfy the Value interface (with - pointer receivers) and couple them to flag parsing by - flag.Var(&flagVal, "name", "help message for flagname") - For such flags, the default value is just the initial value of the variable. +This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + var ip = flag.Int("flagname", 1234, "help message for flagname") +If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int + func init() { + flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") + } +Or you can create custom flags that satisfy the Value interface (with +pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, "name", "help message for flagname") +For such flags, the default value is just the initial value of the variable. - After all flags are defined, call - flag.Parse() - to parse the command line into the defined flags. +After all flags are defined, call + flag.Parse() +to parse the command line into the defined flags. - Flags may then be used directly. If you're using the flags themselves, - they are all pointers; if you bind to variables, they're values. - fmt.Println("ip has value ", *ip) - fmt.Println("flagvar has value ", flagvar) +Flags may then be used directly. If you're using the flags themselves, +they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) + fmt.Println("flagvar has value ", flagvar) - After parsing, the arguments after the flag are available as the - slice flag.Args() or individually as flag.Arg(i). - The arguments are indexed from 0 through flag.NArg()-1. +After parsing, the arguments after the flag are available as the +slice flag.Args() or individually as flag.Arg(i). +The arguments are indexed from 0 through flag.NArg()-1. - The pflag package also defines some new functions that are not in flag, - that give one-letter shorthands for flags. You can use these by appending - 'P' to the name of any function that defines a flag. - var ip = flag.IntP("flagname", "f", 1234, "help message") - var flagvar bool - func init() { - flag.BoolVarP("boolname", "b", true, "help message") - } - flag.VarP(&flagVar, "varname", "v", 1234, "help message") - Shorthand letters can be used with single dashes on the command line. - Boolean shorthand flags can be combined with other shorthand flags. +The pflag package also defines some new functions that are not in flag, +that give one-letter shorthands for flags. You can use these by appending +'P' to the name of any function that defines a flag. + var ip = flag.IntP("flagname", "f", 1234, "help message") + var flagvar bool + func init() { + flag.BoolVarP("boolname", "b", true, "help message") + } + flag.VarP(&flagVar, "varname", "v", 1234, "help message") +Shorthand letters can be used with single dashes on the command line. +Boolean shorthand flags can be combined with other shorthand flags. - Command line flag syntax: - --flag // boolean flags only - --flag=x +Command line flag syntax: + --flag // boolean flags only + --flag=x - Unlike the flag package, a single dash before an option means something - different than a double dash. Single dashes signify a series of shorthand - letters for flags. All but the last shorthand letter must be boolean flags. - // boolean flags - -f - -abc - // non-boolean flags - -n 1234 - -Ifile - // mixed - -abcs "hello" - -abcn1234 +Unlike the flag package, a single dash before an option means something +different than a double dash. Single dashes signify a series of shorthand +letters for flags. All but the last shorthand letter must be boolean flags. + // boolean flags + -f + -abc + // non-boolean flags + -n 1234 + -Ifile + // mixed + -abcs "hello" + -abcn1234 - Flag parsing stops after the terminator "--". Unlike the flag package, - flags can be interspersed with arguments anywhere on the command line - before this terminator. +Flag parsing stops after the terminator "--". Unlike the flag package, +flags can be interspersed with arguments anywhere on the command line +before this terminator. - Integer flags accept 1234, 0664, 0x1234 and may be negative. - Boolean flags (in their long form) accept 1, 0, t, f, true, false, - TRUE, FALSE, True, False. - Duration flags accept any input valid for time.ParseDuration. +Integer flags accept 1234, 0664, 0x1234 and may be negative. +Boolean flags (in their long form) accept 1, 0, t, f, true, false, +TRUE, FALSE, True, False. +Duration flags accept any input valid for time.ParseDuration. - The default set of command-line flags is controlled by - top-level functions. The FlagSet type allows one to define - independent sets of flags, such as to implement subcommands - in a command-line interface. The methods of FlagSet are - analogous to the top-level functions for the command-line - flag set. +The default set of command-line flags is controlled by +top-level functions. The FlagSet type allows one to define +independent sets of flags, such as to implement subcommands +in a command-line interface. The methods of FlagSet are +analogous to the top-level functions for the command-line +flag set. */ package pflag @@ -115,8 +115,11 @@ var ErrHelp = errors.New("pflag: help requested") type ErrorHandling int const ( + // ContinueOnError will return an err from Parse() if an error is found ContinueOnError ErrorHandling = iota + // ExitOnError will call os.Exit(2) if an error is found when parsing ExitOnError + // PanicOnError will panic() if an error is found when parsing flags PanicOnError ) @@ -137,6 +140,7 @@ type FlagSet struct { formal map[NormalizedName]*Flag shorthands map[byte]*Flag args []string // arguments after flags + argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no -- exitOnError bool // does the program exit if there's an error? errorHandling ErrorHandling output io.Writer // nil means stderr; use out() accessor @@ -146,15 +150,17 @@ type FlagSet struct { // A Flag represents the state of a flag. type Flag struct { - Name string // name as it appears on command line - Shorthand string // one-letter abbreviated flag - Usage string // help message - Value Value // value as set - DefValue string // default value (as text); for usage message - Changed bool // If the user set the value (or if left to default) - NoOptDefVal string //default value (as text); if the flag is on the command line without any options - Deprecated string // If this flag is deprecated, this string is the new or now thing to use - Annotations map[string][]string // used by cobra.Command bash autocomple code + Name string // name as it appears on command line + Shorthand string // one-letter abbreviated flag + Usage string // help message + Value Value // value as set + DefValue string // default value (as text); for usage message + Changed bool // If the user set the value (or if left to default) + NoOptDefVal string //default value (as text); if the flag is on the command line without any options + Deprecated string // If this flag is deprecated, this string is the new or now thing to use + Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text + ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use + Annotations map[string][]string // used by cobra.Command bash autocomple code } // Value is the interface to the dynamic value stored in a flag. @@ -181,6 +187,11 @@ func sortFlags(flags map[NormalizedName]*Flag) []*Flag { return result } +// SetNormalizeFunc allows you to add a function which can translate flag names. +// Flags added to the FlagSet will be translated and then when anything tries to +// look up the flag that will also be translated. So it would be possible to create +// a flag named "getURL" and have it translated to "geturl". A user could then pass +// "--getUrl" which may also be translated to "geturl" and everything will work. func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { f.normalizeNameFunc = n for k, v := range f.formal { @@ -191,6 +202,8 @@ func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedNam } } +// GetNormalizeFunc returns the previously set NormalizeFunc of a function which +// does no translation, if not set previously. func (f *FlagSet) GetNormalizeFunc() func(f *FlagSet, name string) NormalizedName { if f.normalizeNameFunc != nil { return f.normalizeNameFunc @@ -224,10 +237,22 @@ func (f *FlagSet) VisitAll(fn func(*Flag)) { } } +// HasFlags returns a bool to indicate if the FlagSet has any flags definied. func (f *FlagSet) HasFlags() bool { return len(f.formal) > 0 } +// HasAvailableFlags returns a bool to indicate if the FlagSet has any flags +// definied that are not hidden or deprecated. +func (f *FlagSet) HasAvailableFlags() bool { + for _, flag := range f.formal { + if !flag.Hidden && len(flag.Deprecated) == 0 { + return true + } + } + return false +} + // VisitAll visits the command-line flags in lexicographical order, calling // fn for each. It visits all flags, even those not set. func VisitAll(fn func(*Flag)) { @@ -262,12 +287,12 @@ func (f *FlagSet) lookup(name NormalizedName) *Flag { func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval string) (interface{}, error)) (interface{}, error) { flag := f.Lookup(name) if flag == nil { - err := fmt.Errorf("flag accessed but not defined: %s\n", name) + err := fmt.Errorf("flag accessed but not defined: %s", name) return nil, err } if flag.Value.Type() != ftype { - err := fmt.Errorf("trying to get %s value of flag of type %s\n", ftype, flag.Value.Type()) + err := fmt.Errorf("trying to get %s value of flag of type %s", ftype, flag.Value.Type()) return nil, err } @@ -279,16 +304,54 @@ func (f *FlagSet) getFlagType(name string, ftype string, convFunc func(sval stri return result, nil } -// Mark a flag deprecated in your program +// ArgsLenAtDash will return the length of f.Args at the moment when a -- was +// found during arg parsing. This allows your program to know which args were +// before the -- and which came after. +func (f *FlagSet) ArgsLenAtDash() int { + return f.argsLenAtDash +} + +// MarkDeprecated indicated that a flag is deprecated in your program. It will +// continue to function but will not show up in help or usage messages. Using +// this flag will also print the given usageMessage. func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { flag := f.Lookup(name) if flag == nil { return fmt.Errorf("flag %q does not exist", name) } + if len(usageMessage) == 0 { + return fmt.Errorf("deprecated message for flag %q must be set", name) + } flag.Deprecated = usageMessage return nil } +// MarkShorthandDeprecated will mark the shorthand of a flag deprecated in your +// program. It will continue to function but will not show up in help or usage +// messages. Using this flag will also print the given usageMessage. +func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + if len(usageMessage) == 0 { + return fmt.Errorf("deprecated message for flag %q must be set", name) + } + flag.ShorthandDeprecated = usageMessage + return nil +} + +// MarkHidden sets a flag to 'hidden' in your program. It will continue to +// function but will not show up in help or usage messages. +func (f *FlagSet) MarkHidden(name string) error { + flag := f.Lookup(name) + if flag == nil { + return fmt.Errorf("flag %q does not exist", name) + } + flag.Hidden = true + return nil +} + // Lookup returns the Flag structure of the named command-line flag, // returning nil if none exists. func Lookup(name string) *Flag { @@ -317,6 +380,9 @@ func (f *FlagSet) Set(name, value string) error { return nil } +// SetAnnotation allows one to set arbitrary annotations on a flag in the FlagSet. +// This is sometimes used by spf13/cobra programs which want to generate additional +// bash completion information. func (f *FlagSet) SetAnnotation(name, key string, values []string) error { normalName := f.normalizeFlagName(name) flag, ok := f.formal[normalName] @@ -330,6 +396,17 @@ func (f *FlagSet) SetAnnotation(name, key string, values []string) error { return nil } +// Changed returns true if the flag was explicitly set during Parse() and false +// otherwise +func (f *FlagSet) Changed(name string) bool { + flag := f.Lookup(name) + // If a flag doesn't exist, it wasn't changed.... + if flag == nil { + return false + } + return flag.Changed +} + // Set sets the value of the named command-line flag. func Set(name, value string) error { return CommandLine.Set(name, value) @@ -338,53 +415,140 @@ func Set(name, value string) error { // PrintDefaults prints, to standard error unless configured // otherwise, the default values of all defined flags in the set. func (f *FlagSet) PrintDefaults() { - f.VisitAll(func(flag *Flag) { - if len(flag.Deprecated) > 0 { - return - } - format := "" - // ex: w/ option string argument '-%s, --%s[=%q]: %s\n' - if len(flag.Shorthand) > 0 { - format = " -%s, --%s" - } else { - format = " %s --%s" - } - if len(flag.NoOptDefVal) > 0 { - format = format + "[" - } - if _, ok := flag.Value.(*stringValue); ok { - format = format + "=%q" - } else { - format = format + "=%s" - } - if len(flag.NoOptDefVal) > 0 { - format = format + "]" - } - format = format + ": %s\n" - fmt.Fprintf(f.out(), format, flag.Shorthand, flag.Name, flag.DefValue, flag.Usage) - }) + usages := f.FlagUsages() + fmt.Fprintf(f.out(), "%s", usages) } +// defaultIsZeroValue returns true if the default value for this flag represents +// a zero value. +func (f *Flag) defaultIsZeroValue() bool { + switch f.Value.(type) { + case boolFlag: + return f.DefValue == "false" + case *durationValue: + // Beginning in Go 1.7, duration zero values are "0s" + return f.DefValue == "0" || f.DefValue == "0s" + case *intValue, *int8Value, *int32Value, *int64Value, *uintValue, *uint8Value, *uint16Value, *uint32Value, *uint64Value, *countValue, *float32Value, *float64Value: + return f.DefValue == "0" + case *stringValue: + return f.DefValue == "" + case *ipValue, *ipMaskValue, *ipNetValue: + return f.DefValue == "" + case *intSliceValue, *stringSliceValue, *stringArrayValue: + return f.DefValue == "[]" + default: + switch f.Value.String() { + case "false": + return true + case "": + return true + case "": + return true + case "0": + return true + } + return false + } +} + +// UnquoteUsage extracts a back-quoted name from the usage +// string for a flag and returns it and the un-quoted usage. +// Given "a `name` to show" it returns ("name", "a name to show"). +// If there are no back quotes, the name is an educated guess of the +// type of the flag's value, or the empty string if the flag is boolean. +func UnquoteUsage(flag *Flag) (name string, usage string) { + // Look for a back-quoted name, but avoid the strings package. + usage = flag.Usage + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name = usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break // Only one back quote; use type name. + } + } + + name = flag.Value.Type() + switch name { + case "bool": + name = "" + case "float64": + name = "float" + case "int64": + name = "int" + case "uint64": + name = "uint" + } + + return +} + +// FlagUsages Returns a string containing the usage information for all flags in +// the FlagSet func (f *FlagSet) FlagUsages() string { x := new(bytes.Buffer) + lines := make([]string, 0, len(f.formal)) + + maxlen := 0 f.VisitAll(func(flag *Flag) { - if len(flag.Deprecated) > 0 { + if len(flag.Deprecated) > 0 || flag.Hidden { return } - format := "--%s=%s: %s\n" - if _, ok := flag.Value.(*stringValue); ok { - // put quotes on the value - format = "--%s=%q: %s\n" - } - if len(flag.Shorthand) > 0 { - format = " -%s, " + format + + line := "" + if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 { + line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name) } else { - format = " %s " + format + line = fmt.Sprintf(" --%s", flag.Name) } - fmt.Fprintf(x, format, flag.Shorthand, flag.Name, flag.DefValue, flag.Usage) + + varname, usage := UnquoteUsage(flag) + if len(varname) > 0 { + line += " " + varname + } + if len(flag.NoOptDefVal) > 0 { + switch flag.Value.Type() { + case "string": + line += fmt.Sprintf("[=%q]", flag.NoOptDefVal) + case "bool": + if flag.NoOptDefVal != "true" { + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } + default: + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } + } + + // This special character will be replaced with spacing once the + // correct alignment is calculated + line += "\x00" + if len(line) > maxlen { + maxlen = len(line) + } + + line += usage + if !flag.defaultIsZeroValue() { + if flag.Value.Type() == "string" { + line += fmt.Sprintf(" (default %q)", flag.DefValue) + } else { + line += fmt.Sprintf(" (default %s)", flag.DefValue) + } + } + + lines = append(lines, line) }) + for _, line := range lines { + sidx := strings.Index(line, "\x00") + spacing := strings.Repeat(" ", maxlen-sidx) + fmt.Fprintln(x, line[:sidx], spacing, line[sidx+1:]) + } + return x.String() } @@ -405,6 +569,8 @@ func defaultUsage(f *FlagSet) { // Usage prints to standard error a usage message documenting all defined command-line flags. // The function is a variable that may be changed to point to a custom function. +// By default it prints a simple header and calls PrintDefaults; for details about the +// format of the output and how to control it, see the documentation for PrintDefaults. var Usage = func() { fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) PrintDefaults() @@ -453,7 +619,7 @@ func (f *FlagSet) Var(value Value, name string, usage string) { f.VarP(value, name, "", usage) } -// Like VarP, but returns the flag created +// VarPF is like VarP, but returns the flag created func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag { // Remember the default value as a string; it won't change. flag := &Flag{ @@ -467,14 +633,15 @@ func (f *FlagSet) VarPF(value Value, name, shorthand, usage string) *Flag { return flag } -// Like Var, but accepts a shorthand letter that can be used after a single dash. +// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) VarP(value Value, name, shorthand, usage string) { _ = f.VarPF(value, name, shorthand, usage) } +// AddFlag will add the flag to the FlagSet func (f *FlagSet) AddFlag(flag *Flag) { // Call normalizeFlagName function only once - var normalizedFlagName NormalizedName = f.normalizeFlagName(flag.Name) + normalizedFlagName := f.normalizeFlagName(flag.Name) _, alreadythere := f.formal[normalizedFlagName] if alreadythere { @@ -508,6 +675,19 @@ func (f *FlagSet) AddFlag(flag *Flag) { f.shorthands[c] = flag } +// AddFlagSet adds one FlagSet to another. If a flag is already present in f +// the flag from newSet will be ignored +func (f *FlagSet) AddFlagSet(newSet *FlagSet) { + if newSet == nil { + return + } + newSet.VisitAll(func(flag *Flag) { + if f.Lookup(flag.Name) == nil { + f.AddFlag(flag) + } + }) +} + // Var defines a flag with the specified name and usage string. The type and // value of the flag are represented by the first argument, of type Value, which // typically holds a user-defined implementation of Value. For instance, the @@ -518,7 +698,7 @@ func Var(value Value, name string, usage string) { CommandLine.VarP(value, name, "", usage) } -// Like Var, but accepts a shorthand letter that can be used after a single dash. +// VarP is like Var, but accepts a shorthand letter that can be used after a single dash. func VarP(value Value, name, shorthand, usage string) { CommandLine.VarP(value, name, shorthand, usage) } @@ -557,9 +737,21 @@ func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error { if len(flag.Deprecated) > 0 { fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) } + if len(flag.ShorthandDeprecated) > 0 && containsShorthand(origArg, flag.Shorthand) { + fmt.Fprintf(os.Stderr, "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated) + } return nil } +func containsShorthand(arg, shorthand string) bool { + // filter out flags -- + if strings.HasPrefix(arg, "-") { + return false + } + arg = strings.SplitN(arg, "=", 2)[0] + return strings.Contains(arg, shorthand) +} + func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error) { a = args name := s[2:] @@ -599,6 +791,9 @@ func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error) } func (f *FlagSet) parseSingleShortArg(shorthands string, args []string) (outShorts string, outArgs []string, err error) { + if strings.HasPrefix(shorthands, "test.") { + return + } outArgs = args outShorts = shorthands[1:] c := shorthands[0] @@ -664,6 +859,7 @@ func (f *FlagSet) parseArgs(args []string) (err error) { if s[1] == '-' { if len(s) == 2 { // "--" terminates the flags + f.argsLenAtDash = len(f.args) f.args = append(f.args, args...) break } @@ -711,7 +907,7 @@ func Parse() { CommandLine.Parse(os.Args[1:]) } -// Whether to support interspersed option/non-option arguments. +// SetInterspersed sets whether to support interspersed option/non-option arguments. func SetInterspersed(interspersed bool) { CommandLine.SetInterspersed(interspersed) } @@ -721,7 +917,7 @@ func Parsed() bool { return CommandLine.Parsed() } -// The default set of command-line flags, parsed from os.Args. +// CommandLine is the default set of command-line flags, parsed from os.Args. var CommandLine = NewFlagSet(os.Args[0], ExitOnError) // NewFlagSet returns a new, empty flag set with the specified name and @@ -730,12 +926,13 @@ func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { f := &FlagSet{ name: name, errorHandling: errorHandling, + argsLenAtDash: -1, interspersed: true, } return f } -// Whether to support interspersed option/non-option arguments. +// SetInterspersed sets whether to support interspersed option/non-option arguments. func (f *FlagSet) SetInterspersed(interspersed bool) { f.interspersed = interspersed } @@ -746,4 +943,5 @@ func (f *FlagSet) SetInterspersed(interspersed bool) { func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { f.name = name f.errorHandling = errorHandling + f.argsLenAtDash = -1 } diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/float32.go b/vendor/github.com/spf13/pflag/float32.go similarity index 83% rename from Godeps/_workspace/src/github.com/spf13/pflag/float32.go rename to vendor/github.com/spf13/pflag/float32.go index 30174cb..a243f81 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/float32.go +++ b/vendor/github.com/spf13/pflag/float32.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- float32 Value type float32Value float32 @@ -23,7 +20,7 @@ func (f *float32Value) Type() string { return "float32" } -func (f *float32Value) String() string { return fmt.Sprintf("%v", *f) } +func (f *float32Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 32) } func float32Conv(sval string) (interface{}, error) { v, err := strconv.ParseFloat(sval, 32) @@ -48,7 +45,7 @@ func (f *FlagSet) Float32Var(p *float32, name string, value float32, usage strin f.VarP(newFloat32Value(value, p), name, "", usage) } -// Like Float32Var, but accepts a shorthand letter that can be used after a single dash. +// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Float32VarP(p *float32, name, shorthand string, value float32, usage string) { f.VarP(newFloat32Value(value, p), name, shorthand, usage) } @@ -59,7 +56,7 @@ func Float32Var(p *float32, name string, value float32, usage string) { CommandLine.VarP(newFloat32Value(value, p), name, "", usage) } -// Like Float32Var, but accepts a shorthand letter that can be used after a single dash. +// Float32VarP is like Float32Var, but accepts a shorthand letter that can be used after a single dash. func Float32VarP(p *float32, name, shorthand string, value float32, usage string) { CommandLine.VarP(newFloat32Value(value, p), name, shorthand, usage) } @@ -72,7 +69,7 @@ func (f *FlagSet) Float32(name string, value float32, usage string) *float32 { return p } -// Like Float32, but accepts a shorthand letter that can be used after a single dash. +// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Float32P(name, shorthand string, value float32, usage string) *float32 { p := new(float32) f.Float32VarP(p, name, shorthand, value, usage) @@ -85,7 +82,7 @@ func Float32(name string, value float32, usage string) *float32 { return CommandLine.Float32P(name, "", value, usage) } -// Like Float32, but accepts a shorthand letter that can be used after a single dash. +// Float32P is like Float32, but accepts a shorthand letter that can be used after a single dash. func Float32P(name, shorthand string, value float32, usage string) *float32 { return CommandLine.Float32P(name, shorthand, value, usage) } diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/float64.go b/vendor/github.com/spf13/pflag/float64.go similarity index 83% rename from Godeps/_workspace/src/github.com/spf13/pflag/float64.go rename to vendor/github.com/spf13/pflag/float64.go index 10e17e4..04b5492 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/float64.go +++ b/vendor/github.com/spf13/pflag/float64.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- float64 Value type float64Value float64 @@ -23,7 +20,7 @@ func (f *float64Value) Type() string { return "float64" } -func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } +func (f *float64Value) String() string { return strconv.FormatFloat(float64(*f), 'g', -1, 64) } func float64Conv(sval string) (interface{}, error) { return strconv.ParseFloat(sval, 64) @@ -44,7 +41,7 @@ func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage strin f.VarP(newFloat64Value(value, p), name, "", usage) } -// Like Float64Var, but accepts a shorthand letter that can be used after a single dash. +// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Float64VarP(p *float64, name, shorthand string, value float64, usage string) { f.VarP(newFloat64Value(value, p), name, shorthand, usage) } @@ -55,7 +52,7 @@ func Float64Var(p *float64, name string, value float64, usage string) { CommandLine.VarP(newFloat64Value(value, p), name, "", usage) } -// Like Float64Var, but accepts a shorthand letter that can be used after a single dash. +// Float64VarP is like Float64Var, but accepts a shorthand letter that can be used after a single dash. func Float64VarP(p *float64, name, shorthand string, value float64, usage string) { CommandLine.VarP(newFloat64Value(value, p), name, shorthand, usage) } @@ -68,7 +65,7 @@ func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { return p } -// Like Float64, but accepts a shorthand letter that can be used after a single dash. +// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Float64P(name, shorthand string, value float64, usage string) *float64 { p := new(float64) f.Float64VarP(p, name, shorthand, value, usage) @@ -81,7 +78,7 @@ func Float64(name string, value float64, usage string) *float64 { return CommandLine.Float64P(name, "", value, usage) } -// Like Float64, but accepts a shorthand letter that can be used after a single dash. +// Float64P is like Float64, but accepts a shorthand letter that can be used after a single dash. func Float64P(name, shorthand string, value float64, usage string) *float64 { return CommandLine.Float64P(name, shorthand, value, usage) } diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go new file mode 100644 index 0000000..b056147 --- /dev/null +++ b/vendor/github.com/spf13/pflag/golangflag.go @@ -0,0 +1,104 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pflag + +import ( + goflag "flag" + "fmt" + "reflect" + "strings" +) + +var _ = fmt.Print + +// flagValueWrapper implements pflag.Value around a flag.Value. The main +// difference here is the addition of the Type method that returns a string +// name of the type. As this is generally unknown, we approximate that with +// reflection. +type flagValueWrapper struct { + inner goflag.Value + flagType string +} + +// We are just copying the boolFlag interface out of goflag as that is what +// they use to decide if a flag should get "true" when no arg is given. +type goBoolFlag interface { + goflag.Value + IsBoolFlag() bool +} + +func wrapFlagValue(v goflag.Value) Value { + // If the flag.Value happens to also be a pflag.Value, just use it directly. + if pv, ok := v.(Value); ok { + return pv + } + + pv := &flagValueWrapper{ + inner: v, + } + + t := reflect.TypeOf(v) + if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr { + t = t.Elem() + } + + pv.flagType = strings.TrimSuffix(t.Name(), "Value") + return pv +} + +func (v *flagValueWrapper) String() string { + return v.inner.String() +} + +func (v *flagValueWrapper) Set(s string) error { + return v.inner.Set(s) +} + +func (v *flagValueWrapper) Type() string { + return v.flagType +} + +// PFlagFromGoFlag will return a *pflag.Flag given a *flag.Flag +// If the *flag.Flag.Name was a single character (ex: `v`) it will be accessiblei +// with both `-v` and `--v` in flags. If the golang flag was more than a single +// character (ex: `verbose`) it will only be accessible via `--verbose` +func PFlagFromGoFlag(goflag *goflag.Flag) *Flag { + // Remember the default value as a string; it won't change. + flag := &Flag{ + Name: goflag.Name, + Usage: goflag.Usage, + Value: wrapFlagValue(goflag.Value), + // Looks like golang flags don't set DefValue correctly :-( + //DefValue: goflag.DefValue, + DefValue: goflag.Value.String(), + } + // Ex: if the golang flag was -v, allow both -v and --v to work + if len(flag.Name) == 1 { + flag.Shorthand = flag.Name + } + if fv, ok := goflag.Value.(goBoolFlag); ok && fv.IsBoolFlag() { + flag.NoOptDefVal = "true" + } + return flag +} + +// AddGoFlag will add the given *flag.Flag to the pflag.FlagSet +func (f *FlagSet) AddGoFlag(goflag *goflag.Flag) { + if f.Lookup(goflag.Name) != nil { + return + } + newflag := PFlagFromGoFlag(goflag) + f.AddFlag(newflag) +} + +// AddGoFlagSet will add the given *flag.FlagSet to the pflag.FlagSet +func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { + if newSet == nil { + return + } + newSet.VisitAll(func(goflag *goflag.Flag) { + f.AddGoFlag(goflag) + }) +} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/int.go b/vendor/github.com/spf13/pflag/int.go similarity index 83% rename from Godeps/_workspace/src/github.com/spf13/pflag/int.go rename to vendor/github.com/spf13/pflag/int.go index 23f70dd..1474b89 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/int.go +++ b/vendor/github.com/spf13/pflag/int.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- int Value type intValue int @@ -23,7 +20,7 @@ func (i *intValue) Type() string { return "int" } -func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } +func (i *intValue) String() string { return strconv.Itoa(int(*i)) } func intConv(sval string) (interface{}, error) { return strconv.Atoi(sval) @@ -44,7 +41,7 @@ func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { f.VarP(newIntValue(value, p), name, "", usage) } -// Like IntVar, but accepts a shorthand letter that can be used after a single dash. +// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) IntVarP(p *int, name, shorthand string, value int, usage string) { f.VarP(newIntValue(value, p), name, shorthand, usage) } @@ -55,7 +52,7 @@ func IntVar(p *int, name string, value int, usage string) { CommandLine.VarP(newIntValue(value, p), name, "", usage) } -// Like IntVar, but accepts a shorthand letter that can be used after a single dash. +// IntVarP is like IntVar, but accepts a shorthand letter that can be used after a single dash. func IntVarP(p *int, name, shorthand string, value int, usage string) { CommandLine.VarP(newIntValue(value, p), name, shorthand, usage) } @@ -68,7 +65,7 @@ func (f *FlagSet) Int(name string, value int, usage string) *int { return p } -// Like Int, but accepts a shorthand letter that can be used after a single dash. +// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) IntP(name, shorthand string, value int, usage string) *int { p := new(int) f.IntVarP(p, name, shorthand, value, usage) @@ -81,7 +78,7 @@ func Int(name string, value int, usage string) *int { return CommandLine.IntP(name, "", value, usage) } -// Like Int, but accepts a shorthand letter that can be used after a single dash. +// IntP is like Int, but accepts a shorthand letter that can be used after a single dash. func IntP(name, shorthand string, value int, usage string) *int { return CommandLine.IntP(name, shorthand, value, usage) } diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/int32.go b/vendor/github.com/spf13/pflag/int32.go similarity index 83% rename from Godeps/_workspace/src/github.com/spf13/pflag/int32.go rename to vendor/github.com/spf13/pflag/int32.go index 515f90b..9b95944 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/int32.go +++ b/vendor/github.com/spf13/pflag/int32.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- int32 Value type int32Value int32 @@ -23,7 +20,7 @@ func (i *int32Value) Type() string { return "int32" } -func (i *int32Value) String() string { return fmt.Sprintf("%v", *i) } +func (i *int32Value) String() string { return strconv.FormatInt(int64(*i), 10) } func int32Conv(sval string) (interface{}, error) { v, err := strconv.ParseInt(sval, 0, 32) @@ -48,7 +45,7 @@ func (f *FlagSet) Int32Var(p *int32, name string, value int32, usage string) { f.VarP(newInt32Value(value, p), name, "", usage) } -// Like Int32Var, but accepts a shorthand letter that can be used after a single dash. +// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Int32VarP(p *int32, name, shorthand string, value int32, usage string) { f.VarP(newInt32Value(value, p), name, shorthand, usage) } @@ -59,7 +56,7 @@ func Int32Var(p *int32, name string, value int32, usage string) { CommandLine.VarP(newInt32Value(value, p), name, "", usage) } -// Like Int32Var, but accepts a shorthand letter that can be used after a single dash. +// Int32VarP is like Int32Var, but accepts a shorthand letter that can be used after a single dash. func Int32VarP(p *int32, name, shorthand string, value int32, usage string) { CommandLine.VarP(newInt32Value(value, p), name, shorthand, usage) } @@ -72,7 +69,7 @@ func (f *FlagSet) Int32(name string, value int32, usage string) *int32 { return p } -// Like Int32, but accepts a shorthand letter that can be used after a single dash. +// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Int32P(name, shorthand string, value int32, usage string) *int32 { p := new(int32) f.Int32VarP(p, name, shorthand, value, usage) @@ -85,7 +82,7 @@ func Int32(name string, value int32, usage string) *int32 { return CommandLine.Int32P(name, "", value, usage) } -// Like Int32, but accepts a shorthand letter that can be used after a single dash. +// Int32P is like Int32, but accepts a shorthand letter that can be used after a single dash. func Int32P(name, shorthand string, value int32, usage string) *int32 { return CommandLine.Int32P(name, shorthand, value, usage) } diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/int64.go b/vendor/github.com/spf13/pflag/int64.go similarity index 83% rename from Godeps/_workspace/src/github.com/spf13/pflag/int64.go rename to vendor/github.com/spf13/pflag/int64.go index b77ade4..0026d78 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/int64.go +++ b/vendor/github.com/spf13/pflag/int64.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- int64 Value type int64Value int64 @@ -23,7 +20,7 @@ func (i *int64Value) Type() string { return "int64" } -func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } +func (i *int64Value) String() string { return strconv.FormatInt(int64(*i), 10) } func int64Conv(sval string) (interface{}, error) { return strconv.ParseInt(sval, 0, 64) @@ -44,7 +41,7 @@ func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { f.VarP(newInt64Value(value, p), name, "", usage) } -// Like Int64Var, but accepts a shorthand letter that can be used after a single dash. +// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Int64VarP(p *int64, name, shorthand string, value int64, usage string) { f.VarP(newInt64Value(value, p), name, shorthand, usage) } @@ -55,7 +52,7 @@ func Int64Var(p *int64, name string, value int64, usage string) { CommandLine.VarP(newInt64Value(value, p), name, "", usage) } -// Like Int64Var, but accepts a shorthand letter that can be used after a single dash. +// Int64VarP is like Int64Var, but accepts a shorthand letter that can be used after a single dash. func Int64VarP(p *int64, name, shorthand string, value int64, usage string) { CommandLine.VarP(newInt64Value(value, p), name, shorthand, usage) } @@ -68,7 +65,7 @@ func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { return p } -// Like Int64, but accepts a shorthand letter that can be used after a single dash. +// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Int64P(name, shorthand string, value int64, usage string) *int64 { p := new(int64) f.Int64VarP(p, name, shorthand, value, usage) @@ -81,7 +78,7 @@ func Int64(name string, value int64, usage string) *int64 { return CommandLine.Int64P(name, "", value, usage) } -// Like Int64, but accepts a shorthand letter that can be used after a single dash. +// Int64P is like Int64, but accepts a shorthand letter that can be used after a single dash. func Int64P(name, shorthand string, value int64, usage string) *int64 { return CommandLine.Int64P(name, shorthand, value, usage) } diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/int8.go b/vendor/github.com/spf13/pflag/int8.go similarity index 83% rename from Godeps/_workspace/src/github.com/spf13/pflag/int8.go rename to vendor/github.com/spf13/pflag/int8.go index c51cb4f..4da9222 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/int8.go +++ b/vendor/github.com/spf13/pflag/int8.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- int8 Value type int8Value int8 @@ -23,7 +20,7 @@ func (i *int8Value) Type() string { return "int8" } -func (i *int8Value) String() string { return fmt.Sprintf("%v", *i) } +func (i *int8Value) String() string { return strconv.FormatInt(int64(*i), 10) } func int8Conv(sval string) (interface{}, error) { v, err := strconv.ParseInt(sval, 0, 8) @@ -48,7 +45,7 @@ func (f *FlagSet) Int8Var(p *int8, name string, value int8, usage string) { f.VarP(newInt8Value(value, p), name, "", usage) } -// Like Int8Var, but accepts a shorthand letter that can be used after a single dash. +// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Int8VarP(p *int8, name, shorthand string, value int8, usage string) { f.VarP(newInt8Value(value, p), name, shorthand, usage) } @@ -59,7 +56,7 @@ func Int8Var(p *int8, name string, value int8, usage string) { CommandLine.VarP(newInt8Value(value, p), name, "", usage) } -// Like Int8Var, but accepts a shorthand letter that can be used after a single dash. +// Int8VarP is like Int8Var, but accepts a shorthand letter that can be used after a single dash. func Int8VarP(p *int8, name, shorthand string, value int8, usage string) { CommandLine.VarP(newInt8Value(value, p), name, shorthand, usage) } @@ -72,7 +69,7 @@ func (f *FlagSet) Int8(name string, value int8, usage string) *int8 { return p } -// Like Int8, but accepts a shorthand letter that can be used after a single dash. +// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Int8P(name, shorthand string, value int8, usage string) *int8 { p := new(int8) f.Int8VarP(p, name, shorthand, value, usage) @@ -85,7 +82,7 @@ func Int8(name string, value int8, usage string) *int8 { return CommandLine.Int8P(name, "", value, usage) } -// Like Int8, but accepts a shorthand letter that can be used after a single dash. +// Int8P is like Int8, but accepts a shorthand letter that can be used after a single dash. func Int8P(name, shorthand string, value int8, usage string) *int8 { return CommandLine.Int8P(name, shorthand, value, usage) } diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/int_slice.go b/vendor/github.com/spf13/pflag/int_slice.go similarity index 75% rename from Godeps/_workspace/src/github.com/spf13/pflag/int_slice.go rename to vendor/github.com/spf13/pflag/int_slice.go index fb3e67b..1e7c9ed 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/int_slice.go +++ b/vendor/github.com/spf13/pflag/int_slice.go @@ -7,11 +7,16 @@ import ( ) // -- intSlice Value -type intSliceValue []int +type intSliceValue struct { + value *[]int + changed bool +} func newIntSliceValue(val []int, p *[]int) *intSliceValue { - *p = val - return (*intSliceValue)(p) + isv := new(intSliceValue) + isv.value = p + *isv.value = val + return isv } func (s *intSliceValue) Set(val string) error { @@ -25,7 +30,12 @@ func (s *intSliceValue) Set(val string) error { } } - *s = intSliceValue(out) + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true return nil } @@ -34,14 +44,19 @@ func (s *intSliceValue) Type() string { } func (s *intSliceValue) String() string { - out := make([]string, len(*s)) - for i, d := range *s { + out := make([]string, len(*s.value)) + for i, d := range *s.value { out[i] = fmt.Sprintf("%d", d) } - return strings.Join(out, ",") + return "[" + strings.Join(out, ",") + "]" } func intSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []int{}, nil + } ss := strings.Split(val, ",") out := make([]int, len(ss)) for i, d := range ss { @@ -70,7 +85,7 @@ func (f *FlagSet) IntSliceVar(p *[]int, name string, value []int, usage string) f.VarP(newIntSliceValue(value, p), name, "", usage) } -// Like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. +// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { f.VarP(newIntSliceValue(value, p), name, shorthand, usage) } @@ -81,7 +96,7 @@ func IntSliceVar(p *[]int, name string, value []int, usage string) { CommandLine.VarP(newIntSliceValue(value, p), name, "", usage) } -// Like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. +// IntSliceVarP is like IntSliceVar, but accepts a shorthand letter that can be used after a single dash. func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { CommandLine.VarP(newIntSliceValue(value, p), name, shorthand, usage) } @@ -89,14 +104,14 @@ func IntSliceVarP(p *[]int, name, shorthand string, value []int, usage string) { // IntSlice defines a []int flag with specified name, default value, and usage string. // The return value is the address of a []int variable that stores the value of the flag. func (f *FlagSet) IntSlice(name string, value []int, usage string) *[]int { - p := make([]int, 0) + p := []int{} f.IntSliceVarP(&p, name, "", value, usage) return &p } -// Like IntSlice, but accepts a shorthand letter that can be used after a single dash. +// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) IntSliceP(name, shorthand string, value []int, usage string) *[]int { - p := make([]int, 0) + p := []int{} f.IntSliceVarP(&p, name, shorthand, value, usage) return &p } @@ -107,7 +122,7 @@ func IntSlice(name string, value []int, usage string) *[]int { return CommandLine.IntSliceP(name, "", value, usage) } -// Like IntSlice, but accepts a shorthand letter that can be used after a single dash. +// IntSliceP is like IntSlice, but accepts a shorthand letter that can be used after a single dash. func IntSliceP(name, shorthand string, value []int, usage string) *[]int { return CommandLine.IntSliceP(name, shorthand, value, usage) } diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/ip.go b/vendor/github.com/spf13/pflag/ip.go similarity index 85% rename from Godeps/_workspace/src/github.com/spf13/pflag/ip.go rename to vendor/github.com/spf13/pflag/ip.go index 746eefd..88a1743 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/ip.go +++ b/vendor/github.com/spf13/pflag/ip.go @@ -3,8 +3,11 @@ package pflag import ( "fmt" "net" + "strings" ) +var _ = strings.TrimSpace + // -- net.IP value type ipValue net.IP @@ -15,7 +18,7 @@ func newIPValue(val net.IP, p *net.IP) *ipValue { func (i *ipValue) String() string { return net.IP(*i).String() } func (i *ipValue) Set(s string) error { - ip := net.ParseIP(s) + ip := net.ParseIP(strings.TrimSpace(s)) if ip == nil { return fmt.Errorf("failed to parse IP: %q", s) } @@ -50,7 +53,7 @@ func (f *FlagSet) IPVar(p *net.IP, name string, value net.IP, usage string) { f.VarP(newIPValue(value, p), name, "", usage) } -// Like IPVar, but accepts a shorthand letter that can be used after a single dash. +// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { f.VarP(newIPValue(value, p), name, shorthand, usage) } @@ -61,7 +64,7 @@ func IPVar(p *net.IP, name string, value net.IP, usage string) { CommandLine.VarP(newIPValue(value, p), name, "", usage) } -// Like IPVar, but accepts a shorthand letter that can be used after a single dash. +// IPVarP is like IPVar, but accepts a shorthand letter that can be used after a single dash. func IPVarP(p *net.IP, name, shorthand string, value net.IP, usage string) { CommandLine.VarP(newIPValue(value, p), name, shorthand, usage) } @@ -74,7 +77,7 @@ func (f *FlagSet) IP(name string, value net.IP, usage string) *net.IP { return p } -// Like IP, but accepts a shorthand letter that can be used after a single dash. +// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) IPP(name, shorthand string, value net.IP, usage string) *net.IP { p := new(net.IP) f.IPVarP(p, name, shorthand, value, usage) @@ -87,7 +90,7 @@ func IP(name string, value net.IP, usage string) *net.IP { return CommandLine.IPP(name, "", value, usage) } -// Like IP, but accepts a shorthand letter that can be used after a single dash. +// IPP is like IP, but accepts a shorthand letter that can be used after a single dash. func IPP(name, shorthand string, value net.IP, usage string) *net.IP { return CommandLine.IPP(name, shorthand, value, usage) } diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/ipmask.go b/vendor/github.com/spf13/pflag/ipmask.go similarity index 88% rename from Godeps/_workspace/src/github.com/spf13/pflag/ipmask.go rename to vendor/github.com/spf13/pflag/ipmask.go index 1b10efb..5bd44bd 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/ipmask.go +++ b/vendor/github.com/spf13/pflag/ipmask.go @@ -28,7 +28,7 @@ func (i *ipMaskValue) Type() string { return "ipMask" } -// Parse IPv4 netmask written in IP form (e.g. 255.255.255.0). +// ParseIPv4Mask written in IP form (e.g. 255.255.255.0). // This function should really belong to the net package. func ParseIPv4Mask(s string) net.IPMask { mask := net.ParseIP(s) @@ -79,7 +79,7 @@ func (f *FlagSet) IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage f.VarP(newIPMaskValue(value, p), name, "", usage) } -// Like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. +// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { f.VarP(newIPMaskValue(value, p), name, shorthand, usage) } @@ -90,7 +90,7 @@ func IPMaskVar(p *net.IPMask, name string, value net.IPMask, usage string) { CommandLine.VarP(newIPMaskValue(value, p), name, "", usage) } -// Like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. +// IPMaskVarP is like IPMaskVar, but accepts a shorthand letter that can be used after a single dash. func IPMaskVarP(p *net.IPMask, name, shorthand string, value net.IPMask, usage string) { CommandLine.VarP(newIPMaskValue(value, p), name, shorthand, usage) } @@ -103,7 +103,7 @@ func (f *FlagSet) IPMask(name string, value net.IPMask, usage string) *net.IPMas return p } -// Like IPMask, but accepts a shorthand letter that can be used after a single dash. +// IPMaskP is like IPMask, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { p := new(net.IPMask) f.IPMaskVarP(p, name, shorthand, value, usage) @@ -116,7 +116,7 @@ func IPMask(name string, value net.IPMask, usage string) *net.IPMask { return CommandLine.IPMaskP(name, "", value, usage) } -// Like IP, but accepts a shorthand letter that can be used after a single dash. +// IPMaskP is like IP, but accepts a shorthand letter that can be used after a single dash. func IPMaskP(name, shorthand string, value net.IPMask, usage string) *net.IPMask { return CommandLine.IPMaskP(name, shorthand, value, usage) } diff --git a/vendor/github.com/spf13/pflag/ipnet.go b/vendor/github.com/spf13/pflag/ipnet.go new file mode 100644 index 0000000..149b764 --- /dev/null +++ b/vendor/github.com/spf13/pflag/ipnet.go @@ -0,0 +1,100 @@ +package pflag + +import ( + "fmt" + "net" + "strings" +) + +// IPNet adapts net.IPNet for use as a flag. +type ipNetValue net.IPNet + +func (ipnet ipNetValue) String() string { + n := net.IPNet(ipnet) + return n.String() +} + +func (ipnet *ipNetValue) Set(value string) error { + _, n, err := net.ParseCIDR(strings.TrimSpace(value)) + if err != nil { + return err + } + *ipnet = ipNetValue(*n) + return nil +} + +func (*ipNetValue) Type() string { + return "ipNet" +} + +var _ = strings.TrimSpace + +func newIPNetValue(val net.IPNet, p *net.IPNet) *ipNetValue { + *p = val + return (*ipNetValue)(p) +} + +func ipNetConv(sval string) (interface{}, error) { + _, n, err := net.ParseCIDR(strings.TrimSpace(sval)) + if err == nil { + return *n, nil + } + return nil, fmt.Errorf("invalid string being converted to IPNet: %s", sval) +} + +// GetIPNet return the net.IPNet value of a flag with the given name +func (f *FlagSet) GetIPNet(name string) (net.IPNet, error) { + val, err := f.getFlagType(name, "ipNet", ipNetConv) + if err != nil { + return net.IPNet{}, err + } + return val.(net.IPNet), nil +} + +// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. +// The argument p points to an net.IPNet variable in which to store the value of the flag. +func (f *FlagSet) IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { + f.VarP(newIPNetValue(value, p), name, "", usage) +} + +// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { + f.VarP(newIPNetValue(value, p), name, shorthand, usage) +} + +// IPNetVar defines an net.IPNet flag with specified name, default value, and usage string. +// The argument p points to an net.IPNet variable in which to store the value of the flag. +func IPNetVar(p *net.IPNet, name string, value net.IPNet, usage string) { + CommandLine.VarP(newIPNetValue(value, p), name, "", usage) +} + +// IPNetVarP is like IPNetVar, but accepts a shorthand letter that can be used after a single dash. +func IPNetVarP(p *net.IPNet, name, shorthand string, value net.IPNet, usage string) { + CommandLine.VarP(newIPNetValue(value, p), name, shorthand, usage) +} + +// IPNet defines an net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of an net.IPNet variable that stores the value of the flag. +func (f *FlagSet) IPNet(name string, value net.IPNet, usage string) *net.IPNet { + p := new(net.IPNet) + f.IPNetVarP(p, name, "", value, usage) + return p +} + +// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { + p := new(net.IPNet) + f.IPNetVarP(p, name, shorthand, value, usage) + return p +} + +// IPNet defines an net.IPNet flag with specified name, default value, and usage string. +// The return value is the address of an net.IPNet variable that stores the value of the flag. +func IPNet(name string, value net.IPNet, usage string) *net.IPNet { + return CommandLine.IPNetP(name, "", value, usage) +} + +// IPNetP is like IPNet, but accepts a shorthand letter that can be used after a single dash. +func IPNetP(name, shorthand string, value net.IPNet, usage string) *net.IPNet { + return CommandLine.IPNetP(name, shorthand, value, usage) +} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/string.go b/vendor/github.com/spf13/pflag/string.go similarity index 84% rename from Godeps/_workspace/src/github.com/spf13/pflag/string.go rename to vendor/github.com/spf13/pflag/string.go index f89ea8b..04e0a26 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/string.go +++ b/vendor/github.com/spf13/pflag/string.go @@ -1,7 +1,5 @@ package pflag -import "fmt" - // -- string Value type stringValue string @@ -18,7 +16,7 @@ func (s *stringValue) Type() string { return "string" } -func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } +func (s *stringValue) String() string { return string(*s) } func stringConv(sval string) (interface{}, error) { return sval, nil @@ -39,7 +37,7 @@ func (f *FlagSet) StringVar(p *string, name string, value string, usage string) f.VarP(newStringValue(value, p), name, "", usage) } -// Like StringVar, but accepts a shorthand letter that can be used after a single dash. +// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) StringVarP(p *string, name, shorthand string, value string, usage string) { f.VarP(newStringValue(value, p), name, shorthand, usage) } @@ -50,7 +48,7 @@ func StringVar(p *string, name string, value string, usage string) { CommandLine.VarP(newStringValue(value, p), name, "", usage) } -// Like StringVar, but accepts a shorthand letter that can be used after a single dash. +// StringVarP is like StringVar, but accepts a shorthand letter that can be used after a single dash. func StringVarP(p *string, name, shorthand string, value string, usage string) { CommandLine.VarP(newStringValue(value, p), name, shorthand, usage) } @@ -63,7 +61,7 @@ func (f *FlagSet) String(name string, value string, usage string) *string { return p } -// Like String, but accepts a shorthand letter that can be used after a single dash. +// StringP is like String, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) StringP(name, shorthand string, value string, usage string) *string { p := new(string) f.StringVarP(p, name, shorthand, value, usage) @@ -76,7 +74,7 @@ func String(name string, value string, usage string) *string { return CommandLine.StringP(name, "", value, usage) } -// Like String, but accepts a shorthand letter that can be used after a single dash. +// StringP is like String, but accepts a shorthand letter that can be used after a single dash. func StringP(name, shorthand string, value string, usage string) *string { return CommandLine.StringP(name, shorthand, value, usage) } diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go new file mode 100644 index 0000000..f320f2e --- /dev/null +++ b/vendor/github.com/spf13/pflag/string_array.go @@ -0,0 +1,110 @@ +package pflag + +import ( + "fmt" + "strings" +) + +var _ = fmt.Fprint + +// -- stringArray Value +type stringArrayValue struct { + value *[]string + changed bool +} + +func newStringArrayValue(val []string, p *[]string) *stringArrayValue { + ssv := new(stringArrayValue) + ssv.value = p + *ssv.value = val + return ssv +} + +func (s *stringArrayValue) Set(val string) error { + if !s.changed { + *s.value = []string{val} + s.changed = true + } else { + *s.value = append(*s.value, val) + } + return nil +} + +func (s *stringArrayValue) Type() string { + return "stringArray" +} + +func (s *stringArrayValue) String() string { + str, _ := writeAsCSV(*s.value) + return "[" + str + "]" +} + +func stringArrayConv(sval string) (interface{}, error) { + sval = strings.Trim(sval, "[]") + // An empty string would cause a array with one (empty) string + if len(sval) == 0 { + return []string{}, nil + } + return readAsCSV(sval) +} + +// GetStringArray return the []string value of a flag with the given name +func (f *FlagSet) GetStringArray(name string) ([]string, error) { + val, err := f.getFlagType(name, "stringArray", stringArrayConv) + if err != nil { + return []string{}, err + } + return val.([]string), nil +} + +// StringArrayVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the values of the multiple flags. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) { + f.VarP(newStringArrayValue(value, p), name, "", usage) +} + +// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { + f.VarP(newStringArrayValue(value, p), name, shorthand, usage) +} + +// StringArrayVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a []string variable in which to store the value of the flag. +// The value of each argument will not try to be separated by comma +func StringArrayVar(p *[]string, name string, value []string, usage string) { + CommandLine.VarP(newStringArrayValue(value, p), name, "", usage) +} + +// StringArrayVarP is like StringArrayVar, but accepts a shorthand letter that can be used after a single dash. +func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage string) { + CommandLine.VarP(newStringArrayValue(value, p), name, shorthand, usage) +} + +// StringArray defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string { + p := []string{} + f.StringArrayVarP(&p, name, "", value, usage) + return &p +} + +// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage string) *[]string { + p := []string{} + f.StringArrayVarP(&p, name, shorthand, value, usage) + return &p +} + +// StringArray defines a string flag with specified name, default value, and usage string. +// The return value is the address of a []string variable that stores the value of the flag. +// The value of each argument will not try to be separated by comma +func StringArray(name string, value []string, usage string) *[]string { + return CommandLine.StringArrayP(name, "", value, usage) +} + +// StringArrayP is like StringArray, but accepts a shorthand letter that can be used after a single dash. +func StringArrayP(name, shorthand string, value []string, usage string) *[]string { + return CommandLine.StringArrayP(name, shorthand, value, usage) +} diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go similarity index 62% rename from Godeps/_workspace/src/github.com/spf13/pflag/string_slice.go rename to vendor/github.com/spf13/pflag/string_slice.go index bbe6e00..51e3c5d 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/string_slice.go +++ b/vendor/github.com/spf13/pflag/string_slice.go @@ -1,31 +1,77 @@ package pflag import ( + "bytes" + "encoding/csv" + "fmt" "strings" ) +var _ = fmt.Fprint + // -- stringSlice Value -type stringSliceValue []string +type stringSliceValue struct { + value *[]string + changed bool +} func newStringSliceValue(val []string, p *[]string) *stringSliceValue { - *p = val - return (*stringSliceValue)(p) + ssv := new(stringSliceValue) + ssv.value = p + *ssv.value = val + return ssv +} + +func readAsCSV(val string) ([]string, error) { + if val == "" { + return []string{}, nil + } + stringReader := strings.NewReader(val) + csvReader := csv.NewReader(stringReader) + return csvReader.Read() +} + +func writeAsCSV(vals []string) (string, error) { + b := &bytes.Buffer{} + w := csv.NewWriter(b) + err := w.Write(vals) + if err != nil { + return "", err + } + w.Flush() + return strings.TrimSuffix(b.String(), fmt.Sprintln()), nil } func (s *stringSliceValue) Set(val string) error { - v := strings.Split(val, ",") - *s = stringSliceValue(v) + v, err := readAsCSV(val) + if err != nil { + return err + } + if !s.changed { + *s.value = v + } else { + *s.value = append(*s.value, v...) + } + s.changed = true return nil } + func (s *stringSliceValue) Type() string { return "stringSlice" } -func (s *stringSliceValue) String() string { return strings.Join(*s, ",") } +func (s *stringSliceValue) String() string { + str, _ := writeAsCSV(*s.value) + return "[" + str + "]" +} func stringSliceConv(sval string) (interface{}, error) { - v := strings.Split(sval, ",") - return v, nil + sval = strings.Trim(sval, "[]") + // An empty string would cause a slice with one (empty) string + if len(sval) == 0 { + return []string{}, nil + } + return readAsCSV(sval) } // GetStringSlice return the []string value of a flag with the given name @@ -43,7 +89,7 @@ func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage f.VarP(newStringSliceValue(value, p), name, "", usage) } -// Like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. +// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { f.VarP(newStringSliceValue(value, p), name, shorthand, usage) } @@ -54,7 +100,7 @@ func StringSliceVar(p *[]string, name string, value []string, usage string) { CommandLine.VarP(newStringSliceValue(value, p), name, "", usage) } -// Like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. +// StringSliceVarP is like StringSliceVar, but accepts a shorthand letter that can be used after a single dash. func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage string) { CommandLine.VarP(newStringSliceValue(value, p), name, shorthand, usage) } @@ -62,14 +108,14 @@ func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage // StringSlice defines a string flag with specified name, default value, and usage string. // The return value is the address of a []string variable that stores the value of the flag. func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { - p := make([]string, 0) + p := []string{} f.StringSliceVarP(&p, name, "", value, usage) return &p } -// Like StringSlice, but accepts a shorthand letter that can be used after a single dash. +// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage string) *[]string { - p := make([]string, 0) + p := []string{} f.StringSliceVarP(&p, name, shorthand, value, usage) return &p } @@ -80,7 +126,7 @@ func StringSlice(name string, value []string, usage string) *[]string { return CommandLine.StringSliceP(name, "", value, usage) } -// Like StringSlice, but accepts a shorthand letter that can be used after a single dash. +// StringSliceP is like StringSlice, but accepts a shorthand letter that can be used after a single dash. func StringSliceP(name, shorthand string, value []string, usage string) *[]string { return CommandLine.StringSliceP(name, shorthand, value, usage) } diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/uint.go b/vendor/github.com/spf13/pflag/uint.go similarity index 83% rename from Godeps/_workspace/src/github.com/spf13/pflag/uint.go rename to vendor/github.com/spf13/pflag/uint.go index d6f8e5b..dcbc2b7 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/uint.go +++ b/vendor/github.com/spf13/pflag/uint.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- uint Value type uintValue uint @@ -23,7 +20,7 @@ func (i *uintValue) Type() string { return "uint" } -func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } +func (i *uintValue) String() string { return strconv.FormatUint(uint64(*i), 10) } func uintConv(sval string) (interface{}, error) { v, err := strconv.ParseUint(sval, 0, 0) @@ -48,7 +45,7 @@ func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { f.VarP(newUintValue(value, p), name, "", usage) } -// Like UintVar, but accepts a shorthand letter that can be used after a single dash. +// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) UintVarP(p *uint, name, shorthand string, value uint, usage string) { f.VarP(newUintValue(value, p), name, shorthand, usage) } @@ -59,7 +56,7 @@ func UintVar(p *uint, name string, value uint, usage string) { CommandLine.VarP(newUintValue(value, p), name, "", usage) } -// Like UintVar, but accepts a shorthand letter that can be used after a single dash. +// UintVarP is like UintVar, but accepts a shorthand letter that can be used after a single dash. func UintVarP(p *uint, name, shorthand string, value uint, usage string) { CommandLine.VarP(newUintValue(value, p), name, shorthand, usage) } @@ -72,7 +69,7 @@ func (f *FlagSet) Uint(name string, value uint, usage string) *uint { return p } -// Like Uint, but accepts a shorthand letter that can be used after a single dash. +// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) UintP(name, shorthand string, value uint, usage string) *uint { p := new(uint) f.UintVarP(p, name, shorthand, value, usage) @@ -85,7 +82,7 @@ func Uint(name string, value uint, usage string) *uint { return CommandLine.UintP(name, "", value, usage) } -// Like Uint, but accepts a shorthand letter that can be used after a single dash. +// UintP is like Uint, but accepts a shorthand letter that can be used after a single dash. func UintP(name, shorthand string, value uint, usage string) *uint { return CommandLine.UintP(name, shorthand, value, usage) } diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/uint16.go b/vendor/github.com/spf13/pflag/uint16.go similarity index 83% rename from Godeps/_workspace/src/github.com/spf13/pflag/uint16.go rename to vendor/github.com/spf13/pflag/uint16.go index 1cdc3df..7e9914e 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/uint16.go +++ b/vendor/github.com/spf13/pflag/uint16.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- uint16 value type uint16Value uint16 @@ -12,7 +9,7 @@ func newUint16Value(val uint16, p *uint16) *uint16Value { *p = val return (*uint16Value)(p) } -func (i *uint16Value) String() string { return fmt.Sprintf("%d", *i) } + func (i *uint16Value) Set(s string) error { v, err := strconv.ParseUint(s, 0, 16) *i = uint16Value(v) @@ -23,6 +20,8 @@ func (i *uint16Value) Type() string { return "uint16" } +func (i *uint16Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + func uint16Conv(sval string) (interface{}, error) { v, err := strconv.ParseUint(sval, 0, 16) if err != nil { @@ -46,7 +45,7 @@ func (f *FlagSet) Uint16Var(p *uint16, name string, value uint16, usage string) f.VarP(newUint16Value(value, p), name, "", usage) } -// Like Uint16Var, but accepts a shorthand letter that can be used after a single dash. +// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { f.VarP(newUint16Value(value, p), name, shorthand, usage) } @@ -57,7 +56,7 @@ func Uint16Var(p *uint16, name string, value uint16, usage string) { CommandLine.VarP(newUint16Value(value, p), name, "", usage) } -// Like Uint16Var, but accepts a shorthand letter that can be used after a single dash. +// Uint16VarP is like Uint16Var, but accepts a shorthand letter that can be used after a single dash. func Uint16VarP(p *uint16, name, shorthand string, value uint16, usage string) { CommandLine.VarP(newUint16Value(value, p), name, shorthand, usage) } @@ -70,7 +69,7 @@ func (f *FlagSet) Uint16(name string, value uint16, usage string) *uint16 { return p } -// Like Uint16, but accepts a shorthand letter that can be used after a single dash. +// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Uint16P(name, shorthand string, value uint16, usage string) *uint16 { p := new(uint16) f.Uint16VarP(p, name, shorthand, value, usage) @@ -83,7 +82,7 @@ func Uint16(name string, value uint16, usage string) *uint16 { return CommandLine.Uint16P(name, "", value, usage) } -// Like Uint16, but accepts a shorthand letter that can be used after a single dash. +// Uint16P is like Uint16, but accepts a shorthand letter that can be used after a single dash. func Uint16P(name, shorthand string, value uint16, usage string) *uint16 { return CommandLine.Uint16P(name, shorthand, value, usage) } diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/uint32.go b/vendor/github.com/spf13/pflag/uint32.go similarity index 83% rename from Godeps/_workspace/src/github.com/spf13/pflag/uint32.go rename to vendor/github.com/spf13/pflag/uint32.go index 1326e4a..d802453 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/uint32.go +++ b/vendor/github.com/spf13/pflag/uint32.go @@ -1,18 +1,15 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" -// -- uint16 value +// -- uint32 value type uint32Value uint32 func newUint32Value(val uint32, p *uint32) *uint32Value { *p = val return (*uint32Value)(p) } -func (i *uint32Value) String() string { return fmt.Sprintf("%d", *i) } + func (i *uint32Value) Set(s string) error { v, err := strconv.ParseUint(s, 0, 32) *i = uint32Value(v) @@ -23,6 +20,8 @@ func (i *uint32Value) Type() string { return "uint32" } +func (i *uint32Value) String() string { return strconv.FormatUint(uint64(*i), 10) } + func uint32Conv(sval string) (interface{}, error) { v, err := strconv.ParseUint(sval, 0, 32) if err != nil { @@ -46,7 +45,7 @@ func (f *FlagSet) Uint32Var(p *uint32, name string, value uint32, usage string) f.VarP(newUint32Value(value, p), name, "", usage) } -// Like Uint32Var, but accepts a shorthand letter that can be used after a single dash. +// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { f.VarP(newUint32Value(value, p), name, shorthand, usage) } @@ -57,7 +56,7 @@ func Uint32Var(p *uint32, name string, value uint32, usage string) { CommandLine.VarP(newUint32Value(value, p), name, "", usage) } -// Like Uint32Var, but accepts a shorthand letter that can be used after a single dash. +// Uint32VarP is like Uint32Var, but accepts a shorthand letter that can be used after a single dash. func Uint32VarP(p *uint32, name, shorthand string, value uint32, usage string) { CommandLine.VarP(newUint32Value(value, p), name, shorthand, usage) } @@ -70,7 +69,7 @@ func (f *FlagSet) Uint32(name string, value uint32, usage string) *uint32 { return p } -// Like Uint32, but accepts a shorthand letter that can be used after a single dash. +// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Uint32P(name, shorthand string, value uint32, usage string) *uint32 { p := new(uint32) f.Uint32VarP(p, name, shorthand, value, usage) @@ -83,7 +82,7 @@ func Uint32(name string, value uint32, usage string) *uint32 { return CommandLine.Uint32P(name, "", value, usage) } -// Like Uint32, but accepts a shorthand letter that can be used after a single dash. +// Uint32P is like Uint32, but accepts a shorthand letter that can be used after a single dash. func Uint32P(name, shorthand string, value uint32, usage string) *uint32 { return CommandLine.Uint32P(name, shorthand, value, usage) } diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/uint64.go b/vendor/github.com/spf13/pflag/uint64.go similarity index 83% rename from Godeps/_workspace/src/github.com/spf13/pflag/uint64.go rename to vendor/github.com/spf13/pflag/uint64.go index 6788bbf..f62240f 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/uint64.go +++ b/vendor/github.com/spf13/pflag/uint64.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- uint64 Value type uint64Value uint64 @@ -23,7 +20,7 @@ func (i *uint64Value) Type() string { return "uint64" } -func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } +func (i *uint64Value) String() string { return strconv.FormatUint(uint64(*i), 10) } func uint64Conv(sval string) (interface{}, error) { v, err := strconv.ParseUint(sval, 0, 64) @@ -48,7 +45,7 @@ func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) f.VarP(newUint64Value(value, p), name, "", usage) } -// Like Uint64Var, but accepts a shorthand letter that can be used after a single dash. +// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { f.VarP(newUint64Value(value, p), name, shorthand, usage) } @@ -59,7 +56,7 @@ func Uint64Var(p *uint64, name string, value uint64, usage string) { CommandLine.VarP(newUint64Value(value, p), name, "", usage) } -// Like Uint64Var, but accepts a shorthand letter that can be used after a single dash. +// Uint64VarP is like Uint64Var, but accepts a shorthand letter that can be used after a single dash. func Uint64VarP(p *uint64, name, shorthand string, value uint64, usage string) { CommandLine.VarP(newUint64Value(value, p), name, shorthand, usage) } @@ -72,7 +69,7 @@ func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { return p } -// Like Uint64, but accepts a shorthand letter that can be used after a single dash. +// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Uint64P(name, shorthand string, value uint64, usage string) *uint64 { p := new(uint64) f.Uint64VarP(p, name, shorthand, value, usage) @@ -85,7 +82,7 @@ func Uint64(name string, value uint64, usage string) *uint64 { return CommandLine.Uint64P(name, "", value, usage) } -// Like Uint64, but accepts a shorthand letter that can be used after a single dash. +// Uint64P is like Uint64, but accepts a shorthand letter that can be used after a single dash. func Uint64P(name, shorthand string, value uint64, usage string) *uint64 { return CommandLine.Uint64P(name, shorthand, value, usage) } diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/uint8.go b/vendor/github.com/spf13/pflag/uint8.go similarity index 83% rename from Godeps/_workspace/src/github.com/spf13/pflag/uint8.go rename to vendor/github.com/spf13/pflag/uint8.go index 560c569..bb0e83c 100644 --- a/Godeps/_workspace/src/github.com/spf13/pflag/uint8.go +++ b/vendor/github.com/spf13/pflag/uint8.go @@ -1,9 +1,6 @@ package pflag -import ( - "fmt" - "strconv" -) +import "strconv" // -- uint8 Value type uint8Value uint8 @@ -23,7 +20,7 @@ func (i *uint8Value) Type() string { return "uint8" } -func (i *uint8Value) String() string { return fmt.Sprintf("%v", *i) } +func (i *uint8Value) String() string { return strconv.FormatUint(uint64(*i), 10) } func uint8Conv(sval string) (interface{}, error) { v, err := strconv.ParseUint(sval, 0, 8) @@ -48,7 +45,7 @@ func (f *FlagSet) Uint8Var(p *uint8, name string, value uint8, usage string) { f.VarP(newUint8Value(value, p), name, "", usage) } -// Like Uint8Var, but accepts a shorthand letter that can be used after a single dash. +// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { f.VarP(newUint8Value(value, p), name, shorthand, usage) } @@ -59,7 +56,7 @@ func Uint8Var(p *uint8, name string, value uint8, usage string) { CommandLine.VarP(newUint8Value(value, p), name, "", usage) } -// Like Uint8Var, but accepts a shorthand letter that can be used after a single dash. +// Uint8VarP is like Uint8Var, but accepts a shorthand letter that can be used after a single dash. func Uint8VarP(p *uint8, name, shorthand string, value uint8, usage string) { CommandLine.VarP(newUint8Value(value, p), name, shorthand, usage) } @@ -72,7 +69,7 @@ func (f *FlagSet) Uint8(name string, value uint8, usage string) *uint8 { return p } -// Like Uint8, but accepts a shorthand letter that can be used after a single dash. +// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. func (f *FlagSet) Uint8P(name, shorthand string, value uint8, usage string) *uint8 { p := new(uint8) f.Uint8VarP(p, name, shorthand, value, usage) @@ -85,7 +82,7 @@ func Uint8(name string, value uint8, usage string) *uint8 { return CommandLine.Uint8P(name, "", value, usage) } -// Like Uint8, but accepts a shorthand letter that can be used after a single dash. +// Uint8P is like Uint8, but accepts a shorthand letter that can be used after a single dash. func Uint8P(name, shorthand string, value uint8, usage string) *uint8 { return CommandLine.Uint8P(name, shorthand, value, usage) }