2017-12-08 12:03:10 +00:00
|
|
|
|
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
|
|
|
|
|
|
|
|
|
|
package rekognition
|
|
|
|
|
|
|
|
|
|
import (
|
|
|
|
|
"time"
|
|
|
|
|
|
|
|
|
|
"github.com/aws/aws-sdk-go/aws"
|
|
|
|
|
"github.com/aws/aws-sdk-go/aws/awsutil"
|
|
|
|
|
"github.com/aws/aws-sdk-go/aws/request"
|
2019-01-21 14:27:20 +00:00
|
|
|
|
"github.com/aws/aws-sdk-go/private/protocol"
|
|
|
|
|
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
|
2017-12-08 12:03:10 +00:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
const opCompareFaces = "CompareFaces"
|
|
|
|
|
|
|
|
|
|
// CompareFacesRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the CompareFaces operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See CompareFaces for more information on using the CompareFaces
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the CompareFacesRequest method.
|
|
|
|
|
// req, resp := client.CompareFacesRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) CompareFacesRequest(input *CompareFacesInput) (req *request.Request, output *CompareFacesOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opCompareFaces,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &CompareFacesInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &CompareFacesOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// CompareFaces API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Compares a face in the source input image with each of the 100 largest faces
|
|
|
|
|
// detected in the target input image.
|
|
|
|
|
//
|
|
|
|
|
// If the source image contains multiple faces, the service detects the largest
|
|
|
|
|
// face and compares it with each face detected in the target image.
|
|
|
|
|
//
|
|
|
|
|
// You pass the input and target images either as base64-encoded image bytes
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// or as references to images in an Amazon S3 bucket. If you use the AWS CLI
|
|
|
|
|
// to call Amazon Rekognition operations, passing image bytes isn't supported.
|
|
|
|
|
// The image must be formatted as a PNG or JPEG file.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// In response, the operation returns an array of face matches ordered by similarity
|
|
|
|
|
// score in descending order. For each face match, the response provides a bounding
|
|
|
|
|
// box of the face, facial landmarks, pose details (pitch, role, and yaw), quality
|
|
|
|
|
// (brightness and sharpness), and confidence value (indicating the level of
|
|
|
|
|
// confidence that the bounding box contains a face). The response also provides
|
|
|
|
|
// a similarity score, which indicates how closely the faces match.
|
|
|
|
|
//
|
|
|
|
|
// By default, only faces with a similarity score of greater than or equal to
|
|
|
|
|
// 80% are returned in the response. You can change this value by specifying
|
|
|
|
|
// the SimilarityThreshold parameter.
|
|
|
|
|
//
|
|
|
|
|
// CompareFaces also returns an array of faces that don't match the source image.
|
|
|
|
|
// For each face, it returns a bounding box, confidence value, landmarks, pose
|
|
|
|
|
// details, and quality. The response also returns information about the face
|
|
|
|
|
// in the source image, including the bounding box of the face and confidence
|
|
|
|
|
// value.
|
|
|
|
|
//
|
|
|
|
|
// If the image doesn't contain Exif metadata, CompareFaces returns orientation
|
|
|
|
|
// information for the source and target images. Use these values to display
|
|
|
|
|
// the images with the correct image orientation.
|
|
|
|
|
//
|
|
|
|
|
// If no faces are detected in the source or target images, CompareFaces returns
|
|
|
|
|
// an InvalidParameterException error.
|
|
|
|
|
//
|
|
|
|
|
// This is a stateless API operation. That is, data returned by this operation
|
|
|
|
|
// doesn't persist.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// For an example, see Comparing Faces in Images in the Amazon Rekognition Developer
|
|
|
|
|
// Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// This operation requires permissions to perform the rekognition:CompareFaces
|
|
|
|
|
// action.
|
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation CompareFaces for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException"
|
|
|
|
|
// Amazon Rekognition is unable to access the S3 object specified in the request.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeImageTooLargeException "ImageTooLargeException"
|
|
|
|
|
// The input image size exceeds the allowed limit. For more information, see
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidImageFormatException "InvalidImageFormatException"
|
|
|
|
|
// The provided image format is not supported.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) CompareFaces(input *CompareFacesInput) (*CompareFacesOutput, error) {
|
|
|
|
|
req, out := c.CompareFacesRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// CompareFacesWithContext is the same as CompareFaces with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See CompareFaces for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) CompareFacesWithContext(ctx aws.Context, input *CompareFacesInput, opts ...request.Option) (*CompareFacesOutput, error) {
|
|
|
|
|
req, out := c.CompareFacesRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opCreateCollection = "CreateCollection"
|
|
|
|
|
|
|
|
|
|
// CreateCollectionRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the CreateCollection operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See CreateCollection for more information on using the CreateCollection
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the CreateCollectionRequest method.
|
|
|
|
|
// req, resp := client.CreateCollectionRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) CreateCollectionRequest(input *CreateCollectionInput) (req *request.Request, output *CreateCollectionOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opCreateCollection,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &CreateCollectionInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &CreateCollectionOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// CreateCollection API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Creates a collection in an AWS Region. You can add faces to the collection
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// using the IndexFaces operation.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// For example, you might create collections, one for each of your application
|
|
|
|
|
// users. A user can then index faces using the IndexFaces operation and persist
|
|
|
|
|
// results in a specific collection. Then, a user can search the collection
|
|
|
|
|
// for faces in the user-specific container.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// When you create a collection, it is associated with the latest version of
|
|
|
|
|
// the face model version.
|
|
|
|
|
//
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// Collection names are case-sensitive.
|
|
|
|
|
//
|
|
|
|
|
// This operation requires permissions to perform the rekognition:CreateCollection
|
|
|
|
|
// action.
|
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation CreateCollection for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException"
|
|
|
|
|
// A collection with the specified ID already exists.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) CreateCollection(input *CreateCollectionInput) (*CreateCollectionOutput, error) {
|
|
|
|
|
req, out := c.CreateCollectionRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// CreateCollectionWithContext is the same as CreateCollection with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See CreateCollection for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) CreateCollectionWithContext(ctx aws.Context, input *CreateCollectionInput, opts ...request.Option) (*CreateCollectionOutput, error) {
|
|
|
|
|
req, out := c.CreateCollectionRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opCreateStreamProcessor = "CreateStreamProcessor"
|
|
|
|
|
|
|
|
|
|
// CreateStreamProcessorRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the CreateStreamProcessor operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See CreateStreamProcessor for more information on using the CreateStreamProcessor
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the CreateStreamProcessorRequest method.
|
|
|
|
|
// req, resp := client.CreateStreamProcessorRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) CreateStreamProcessorRequest(input *CreateStreamProcessorInput) (req *request.Request, output *CreateStreamProcessorOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opCreateStreamProcessor,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &CreateStreamProcessorInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &CreateStreamProcessorOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// CreateStreamProcessor API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Creates an Amazon Rekognition stream processor that you can use to detect
|
|
|
|
|
// and recognize faces in a streaming video.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Amazon Rekognition Video is a consumer of live video from Amazon Kinesis
|
|
|
|
|
// Video Streams. Amazon Rekognition Video sends analysis results to Amazon
|
|
|
|
|
// Kinesis Data Streams.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// You provide as input a Kinesis video stream (Input) and a Kinesis data stream
|
|
|
|
|
// (Output) stream. You also specify the face recognition criteria in Settings.
|
|
|
|
|
// For example, the collection containing faces that you want to recognize.
|
|
|
|
|
// Use Name to assign an identifier for the stream processor. You use Name to
|
|
|
|
|
// manage the stream processor. For example, you can start processing the source
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// video by calling StartStreamProcessor with the Name field.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// After you have finished analyzing a streaming video, use StopStreamProcessor
|
|
|
|
|
// to stop processing. You can delete the stream processor by calling DeleteStreamProcessor.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation CreateStreamProcessor for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeLimitExceededException "LimitExceededException"
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// An Amazon Rekognition service limit was exceeded. For example, if you start
|
|
|
|
|
// too many Amazon Rekognition Video jobs concurrently, calls to start operations
|
|
|
|
|
// (StartLabelDetection, for example) will raise a LimitExceededException exception
|
|
|
|
|
// (HTTP status code: 400) until the number of concurrently running jobs is
|
|
|
|
|
// below the Amazon Rekognition service limit.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceInUseException "ResourceInUseException"
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) CreateStreamProcessor(input *CreateStreamProcessorInput) (*CreateStreamProcessorOutput, error) {
|
|
|
|
|
req, out := c.CreateStreamProcessorRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// CreateStreamProcessorWithContext is the same as CreateStreamProcessor with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See CreateStreamProcessor for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) CreateStreamProcessorWithContext(ctx aws.Context, input *CreateStreamProcessorInput, opts ...request.Option) (*CreateStreamProcessorOutput, error) {
|
|
|
|
|
req, out := c.CreateStreamProcessorRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opDeleteCollection = "DeleteCollection"
|
|
|
|
|
|
|
|
|
|
// DeleteCollectionRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the DeleteCollection operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See DeleteCollection for more information on using the DeleteCollection
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the DeleteCollectionRequest method.
|
|
|
|
|
// req, resp := client.DeleteCollectionRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) DeleteCollectionRequest(input *DeleteCollectionInput) (req *request.Request, output *DeleteCollectionOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opDeleteCollection,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &DeleteCollectionInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &DeleteCollectionOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DeleteCollection API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Deletes the specified collection. Note that this operation removes all faces
|
|
|
|
|
// in the collection. For an example, see delete-collection-procedure.
|
|
|
|
|
//
|
|
|
|
|
// This operation requires permissions to perform the rekognition:DeleteCollection
|
|
|
|
|
// action.
|
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation DeleteCollection for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
|
|
|
|
|
// The collection specified in the request cannot be found.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) DeleteCollection(input *DeleteCollectionInput) (*DeleteCollectionOutput, error) {
|
|
|
|
|
req, out := c.DeleteCollectionRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DeleteCollectionWithContext is the same as DeleteCollection with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See DeleteCollection for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) DeleteCollectionWithContext(ctx aws.Context, input *DeleteCollectionInput, opts ...request.Option) (*DeleteCollectionOutput, error) {
|
|
|
|
|
req, out := c.DeleteCollectionRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opDeleteFaces = "DeleteFaces"
|
|
|
|
|
|
|
|
|
|
// DeleteFacesRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the DeleteFaces operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See DeleteFaces for more information on using the DeleteFaces
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the DeleteFacesRequest method.
|
|
|
|
|
// req, resp := client.DeleteFacesRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) DeleteFacesRequest(input *DeleteFacesInput) (req *request.Request, output *DeleteFacesOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opDeleteFaces,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &DeleteFacesInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &DeleteFacesOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DeleteFaces API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Deletes faces from a collection. You specify a collection ID and an array
|
|
|
|
|
// of face IDs to remove from the collection.
|
|
|
|
|
//
|
|
|
|
|
// This operation requires permissions to perform the rekognition:DeleteFaces
|
|
|
|
|
// action.
|
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation DeleteFaces for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
|
|
|
|
|
// The collection specified in the request cannot be found.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) DeleteFaces(input *DeleteFacesInput) (*DeleteFacesOutput, error) {
|
|
|
|
|
req, out := c.DeleteFacesRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DeleteFacesWithContext is the same as DeleteFaces with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See DeleteFaces for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) DeleteFacesWithContext(ctx aws.Context, input *DeleteFacesInput, opts ...request.Option) (*DeleteFacesOutput, error) {
|
|
|
|
|
req, out := c.DeleteFacesRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opDeleteStreamProcessor = "DeleteStreamProcessor"
|
|
|
|
|
|
|
|
|
|
// DeleteStreamProcessorRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the DeleteStreamProcessor operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See DeleteStreamProcessor for more information on using the DeleteStreamProcessor
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the DeleteStreamProcessorRequest method.
|
|
|
|
|
// req, resp := client.DeleteStreamProcessorRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) DeleteStreamProcessorRequest(input *DeleteStreamProcessorInput) (req *request.Request, output *DeleteStreamProcessorOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opDeleteStreamProcessor,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &DeleteStreamProcessorInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &DeleteStreamProcessorOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
2019-01-21 14:27:20 +00:00
|
|
|
|
req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
|
2017-12-08 12:03:10 +00:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DeleteStreamProcessor API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Deletes the stream processor identified by Name. You assign the value for
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Name when you create the stream processor with CreateStreamProcessor. You
|
|
|
|
|
// might not be able to use the same name for a stream processor for a few seconds
|
|
|
|
|
// after calling DeleteStreamProcessor.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation DeleteStreamProcessor for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
|
|
|
|
|
// The collection specified in the request cannot be found.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceInUseException "ResourceInUseException"
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) DeleteStreamProcessor(input *DeleteStreamProcessorInput) (*DeleteStreamProcessorOutput, error) {
|
|
|
|
|
req, out := c.DeleteStreamProcessorRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DeleteStreamProcessorWithContext is the same as DeleteStreamProcessor with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See DeleteStreamProcessor for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) DeleteStreamProcessorWithContext(ctx aws.Context, input *DeleteStreamProcessorInput, opts ...request.Option) (*DeleteStreamProcessorOutput, error) {
|
|
|
|
|
req, out := c.DeleteStreamProcessorRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
const opDescribeCollection = "DescribeCollection"
|
|
|
|
|
|
|
|
|
|
// DescribeCollectionRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the DescribeCollection operation. The "output" return
|
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See DescribeCollection for more information on using the DescribeCollection
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the DescribeCollectionRequest method.
|
|
|
|
|
// req, resp := client.DescribeCollectionRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) DescribeCollectionRequest(input *DescribeCollectionInput) (req *request.Request, output *DescribeCollectionOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opDescribeCollection,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &DescribeCollectionInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &DescribeCollectionOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DescribeCollection API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Describes the specified collection. You can use DescribeCollection to get
|
|
|
|
|
// information, such as the number of faces indexed into a collection and the
|
|
|
|
|
// version of the model used by the collection for face detection.
|
|
|
|
|
//
|
|
|
|
|
// For more information, see Describing a Collection in the Amazon Rekognition
|
|
|
|
|
// Developer Guide.
|
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation DescribeCollection for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
|
|
|
|
|
// The collection specified in the request cannot be found.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) DescribeCollection(input *DescribeCollectionInput) (*DescribeCollectionOutput, error) {
|
|
|
|
|
req, out := c.DescribeCollectionRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DescribeCollectionWithContext is the same as DescribeCollection with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See DescribeCollection for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) DescribeCollectionWithContext(ctx aws.Context, input *DescribeCollectionInput, opts ...request.Option) (*DescribeCollectionOutput, error) {
|
|
|
|
|
req, out := c.DescribeCollectionRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-08 12:03:10 +00:00
|
|
|
|
const opDescribeStreamProcessor = "DescribeStreamProcessor"
|
|
|
|
|
|
|
|
|
|
// DescribeStreamProcessorRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the DescribeStreamProcessor operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See DescribeStreamProcessor for more information on using the DescribeStreamProcessor
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the DescribeStreamProcessorRequest method.
|
|
|
|
|
// req, resp := client.DescribeStreamProcessorRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) DescribeStreamProcessorRequest(input *DescribeStreamProcessorInput) (req *request.Request, output *DescribeStreamProcessorOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opDescribeStreamProcessor,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &DescribeStreamProcessorInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &DescribeStreamProcessorOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DescribeStreamProcessor API operation for Amazon Rekognition.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Provides information about a stream processor created by CreateStreamProcessor.
|
|
|
|
|
// You can get information about the input and output streams, the input parameters
|
|
|
|
|
// for the face recognition being performed, and the current status of the stream
|
|
|
|
|
// processor.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation DescribeStreamProcessor for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
|
|
|
|
|
// The collection specified in the request cannot be found.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) DescribeStreamProcessor(input *DescribeStreamProcessorInput) (*DescribeStreamProcessorOutput, error) {
|
|
|
|
|
req, out := c.DescribeStreamProcessorRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DescribeStreamProcessorWithContext is the same as DescribeStreamProcessor with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See DescribeStreamProcessor for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) DescribeStreamProcessorWithContext(ctx aws.Context, input *DescribeStreamProcessorInput, opts ...request.Option) (*DescribeStreamProcessorOutput, error) {
|
|
|
|
|
req, out := c.DescribeStreamProcessorRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opDetectFaces = "DetectFaces"
|
|
|
|
|
|
|
|
|
|
// DetectFacesRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the DetectFaces operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See DetectFaces for more information on using the DetectFaces
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the DetectFacesRequest method.
|
|
|
|
|
// req, resp := client.DetectFacesRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) DetectFacesRequest(input *DetectFacesInput) (req *request.Request, output *DetectFacesOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opDetectFaces,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &DetectFacesInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &DetectFacesOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DetectFaces API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Detects faces within an image that is provided as input.
|
|
|
|
|
//
|
|
|
|
|
// DetectFaces detects the 100 largest faces in the image. For each face detected,
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// the operation returns face details. These details include a bounding box
|
|
|
|
|
// of the face, a confidence value (that the bounding box contains a face),
|
|
|
|
|
// and a fixed set of attributes such as facial landmarks (for example, coordinates
|
|
|
|
|
// of eye and mouth), gender, presence of beard, sunglasses, and so on.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// The face-detection algorithm is most effective on frontal faces. For non-frontal
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// or obscured faces, the algorithm might not detect the faces or might detect
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// faces with lower confidence.
|
|
|
|
|
//
|
|
|
|
|
// You pass the input image either as base64-encoded image bytes or as a reference
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// to an image in an Amazon S3 bucket. If you use the to call Amazon Rekognition
|
|
|
|
|
// operations, passing image bytes is not supported. The image must be either
|
|
|
|
|
// a PNG or JPEG formatted file.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// This is a stateless API operation. That is, the operation does not persist
|
|
|
|
|
// any data.
|
|
|
|
|
//
|
|
|
|
|
// This operation requires permissions to perform the rekognition:DetectFaces
|
|
|
|
|
// action.
|
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation DetectFaces for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException"
|
|
|
|
|
// Amazon Rekognition is unable to access the S3 object specified in the request.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeImageTooLargeException "ImageTooLargeException"
|
|
|
|
|
// The input image size exceeds the allowed limit. For more information, see
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidImageFormatException "InvalidImageFormatException"
|
|
|
|
|
// The provided image format is not supported.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) DetectFaces(input *DetectFacesInput) (*DetectFacesOutput, error) {
|
|
|
|
|
req, out := c.DetectFacesRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DetectFacesWithContext is the same as DetectFaces with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See DetectFaces for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) DetectFacesWithContext(ctx aws.Context, input *DetectFacesInput, opts ...request.Option) (*DetectFacesOutput, error) {
|
|
|
|
|
req, out := c.DetectFacesRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opDetectLabels = "DetectLabels"
|
|
|
|
|
|
|
|
|
|
// DetectLabelsRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the DetectLabels operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See DetectLabels for more information on using the DetectLabels
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the DetectLabelsRequest method.
|
|
|
|
|
// req, resp := client.DetectLabelsRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) DetectLabelsRequest(input *DetectLabelsInput) (req *request.Request, output *DetectLabelsOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opDetectLabels,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &DetectLabelsInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &DetectLabelsOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DetectLabels API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Detects instances of real-world entities within an image (JPEG or PNG) provided
|
|
|
|
|
// as input. This includes objects like flower, tree, and table; events like
|
|
|
|
|
// wedding, graduation, and birthday party; and concepts like landscape, evening,
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// and nature.
|
|
|
|
|
//
|
|
|
|
|
// For an example, see Analyzing Images Stored in an Amazon S3 Bucket in the
|
|
|
|
|
// Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// DetectLabels does not support the detection of activities. However, activity
|
|
|
|
|
// detection is supported for label detection in videos. For more information,
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// see StartLabelDetection in the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// You pass the input image as base64-encoded image bytes or as a reference
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// Rekognition operations, passing image bytes is not supported. The image must
|
|
|
|
|
// be either a PNG or JPEG formatted file.
|
|
|
|
|
//
|
|
|
|
|
// For each object, scene, and concept the API returns one or more labels. Each
|
|
|
|
|
// label provides the object name, and the level of confidence that the image
|
|
|
|
|
// contains the object. For example, suppose the input image has a lighthouse,
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// the sea, and a rock. The response includes all three labels, one for each
|
|
|
|
|
// object.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// {Name: lighthouse, Confidence: 98.4629}
|
|
|
|
|
//
|
|
|
|
|
// {Name: rock,Confidence: 79.2097}
|
|
|
|
|
//
|
|
|
|
|
// {Name: sea,Confidence: 75.061}
|
|
|
|
|
//
|
|
|
|
|
// In the preceding example, the operation returns one label for each of the
|
|
|
|
|
// three objects. The operation can also return multiple labels for the same
|
|
|
|
|
// object in the image. For example, if the input image shows a flower (for
|
|
|
|
|
// example, a tulip), the operation might return the following three labels.
|
|
|
|
|
//
|
|
|
|
|
// {Name: flower,Confidence: 99.0562}
|
|
|
|
|
//
|
|
|
|
|
// {Name: plant,Confidence: 99.0562}
|
|
|
|
|
//
|
|
|
|
|
// {Name: tulip,Confidence: 99.0562}
|
|
|
|
|
//
|
|
|
|
|
// In this example, the detection algorithm more precisely identifies the flower
|
|
|
|
|
// as a tulip.
|
|
|
|
|
//
|
|
|
|
|
// In response, the API returns an array of labels. In addition, the response
|
|
|
|
|
// also includes the orientation correction. Optionally, you can specify MinConfidence
|
|
|
|
|
// to control the confidence threshold for the labels returned. The default
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// is 55%. You can also add the MaxLabels parameter to limit the number of labels
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// returned.
|
|
|
|
|
//
|
|
|
|
|
// If the object detected is a person, the operation doesn't provide the same
|
|
|
|
|
// facial details that the DetectFaces operation provides.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// DetectLabels returns bounding boxes for instances of common object labels
|
|
|
|
|
// in an array of Instance objects. An Instance object contains a BoundingBox
|
|
|
|
|
// object, for the location of the label on the image. It also includes the
|
|
|
|
|
// confidence by which the bounding box was detected.
|
|
|
|
|
//
|
|
|
|
|
// DetectLabels also returns a hierarchical taxonomy of detected labels. For
|
|
|
|
|
// example, a detected car might be assigned the label car. The label car has
|
|
|
|
|
// two parent labels: Vehicle (its parent) and Transportation (its grandparent).
|
|
|
|
|
// The response returns the entire list of ancestors for a label. Each ancestor
|
|
|
|
|
// is a unique label in the response. In the previous example, Car, Vehicle,
|
|
|
|
|
// and Transportation are returned as unique labels in the response.
|
|
|
|
|
//
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// This is a stateless API operation. That is, the operation does not persist
|
|
|
|
|
// any data.
|
|
|
|
|
//
|
|
|
|
|
// This operation requires permissions to perform the rekognition:DetectLabels
|
|
|
|
|
// action.
|
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation DetectLabels for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException"
|
|
|
|
|
// Amazon Rekognition is unable to access the S3 object specified in the request.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeImageTooLargeException "ImageTooLargeException"
|
|
|
|
|
// The input image size exceeds the allowed limit. For more information, see
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidImageFormatException "InvalidImageFormatException"
|
|
|
|
|
// The provided image format is not supported.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) DetectLabels(input *DetectLabelsInput) (*DetectLabelsOutput, error) {
|
|
|
|
|
req, out := c.DetectLabelsRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DetectLabelsWithContext is the same as DetectLabels with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See DetectLabels for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) DetectLabelsWithContext(ctx aws.Context, input *DetectLabelsInput, opts ...request.Option) (*DetectLabelsOutput, error) {
|
|
|
|
|
req, out := c.DetectLabelsRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opDetectModerationLabels = "DetectModerationLabels"
|
|
|
|
|
|
|
|
|
|
// DetectModerationLabelsRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the DetectModerationLabels operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See DetectModerationLabels for more information on using the DetectModerationLabels
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the DetectModerationLabelsRequest method.
|
|
|
|
|
// req, resp := client.DetectModerationLabelsRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) DetectModerationLabelsRequest(input *DetectModerationLabelsInput) (req *request.Request, output *DetectModerationLabelsOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opDetectModerationLabels,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &DetectModerationLabelsInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &DetectModerationLabelsOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DetectModerationLabels API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Detects explicit or suggestive adult content in a specified JPEG or PNG format
|
|
|
|
|
// image. Use DetectModerationLabels to moderate images depending on your requirements.
|
|
|
|
|
// For example, you might want to filter images that contain nudity, but not
|
|
|
|
|
// images containing suggestive content.
|
|
|
|
|
//
|
|
|
|
|
// To filter images, use the labels returned by DetectModerationLabels to determine
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// which types of content are appropriate.
|
|
|
|
|
//
|
|
|
|
|
// For information about moderation labels, see Detecting Unsafe Content in
|
|
|
|
|
// the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// You pass the input image either as base64-encoded image bytes or as a reference
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// Rekognition operations, passing image bytes is not supported. The image must
|
|
|
|
|
// be either a PNG or JPEG formatted file.
|
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation DetectModerationLabels for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException"
|
|
|
|
|
// Amazon Rekognition is unable to access the S3 object specified in the request.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeImageTooLargeException "ImageTooLargeException"
|
|
|
|
|
// The input image size exceeds the allowed limit. For more information, see
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidImageFormatException "InvalidImageFormatException"
|
|
|
|
|
// The provided image format is not supported.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) DetectModerationLabels(input *DetectModerationLabelsInput) (*DetectModerationLabelsOutput, error) {
|
|
|
|
|
req, out := c.DetectModerationLabelsRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DetectModerationLabelsWithContext is the same as DetectModerationLabels with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See DetectModerationLabels for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) DetectModerationLabelsWithContext(ctx aws.Context, input *DetectModerationLabelsInput, opts ...request.Option) (*DetectModerationLabelsOutput, error) {
|
|
|
|
|
req, out := c.DetectModerationLabelsRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opDetectText = "DetectText"
|
|
|
|
|
|
|
|
|
|
// DetectTextRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the DetectText operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See DetectText for more information on using the DetectText
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the DetectTextRequest method.
|
|
|
|
|
// req, resp := client.DetectTextRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) DetectTextRequest(input *DetectTextInput) (req *request.Request, output *DetectTextOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opDetectText,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &DetectTextInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &DetectTextOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DetectText API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Detects text in the input image and converts it into machine-readable text.
|
|
|
|
|
//
|
|
|
|
|
// Pass the input image as base64-encoded image bytes or as a reference to an
|
|
|
|
|
// image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon Rekognition
|
|
|
|
|
// operations, you must pass it as a reference to an image in an Amazon S3 bucket.
|
|
|
|
|
// For the AWS CLI, passing image bytes is not supported. The image must be
|
|
|
|
|
// either a .png or .jpeg formatted file.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The DetectText operation returns text in an array of TextDetection elements,
|
|
|
|
|
// TextDetections. Each TextDetection element provides information about a single
|
|
|
|
|
// word or line of text that was detected in the image.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// A word is one or more ISO basic latin script characters that are not separated
|
|
|
|
|
// by spaces. DetectText can detect up to 50 words in an image.
|
|
|
|
|
//
|
|
|
|
|
// A line is a string of equally spaced words. A line isn't necessarily a complete
|
|
|
|
|
// sentence. For example, a driver's license number is detected as a line. A
|
|
|
|
|
// line ends when there is no aligned text after it. Also, a line ends when
|
|
|
|
|
// there is a large gap between words, relative to the length of the words.
|
|
|
|
|
// This means, depending on the gap between words, Amazon Rekognition may detect
|
|
|
|
|
// multiple lines in text aligned in the same direction. Periods don't represent
|
|
|
|
|
// the end of a line. If a sentence spans multiple lines, the DetectText operation
|
|
|
|
|
// returns multiple lines.
|
|
|
|
|
//
|
|
|
|
|
// To determine whether a TextDetection element is a line of text or a word,
|
|
|
|
|
// use the TextDetection object Type field.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// To be detected, text must be within +/- 90 degrees orientation of the horizontal
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// axis.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// For more information, see DetectText in the Amazon Rekognition Developer
|
|
|
|
|
// Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation DetectText for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException"
|
|
|
|
|
// Amazon Rekognition is unable to access the S3 object specified in the request.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeImageTooLargeException "ImageTooLargeException"
|
|
|
|
|
// The input image size exceeds the allowed limit. For more information, see
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidImageFormatException "InvalidImageFormatException"
|
|
|
|
|
// The provided image format is not supported.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) DetectText(input *DetectTextInput) (*DetectTextOutput, error) {
|
|
|
|
|
req, out := c.DetectTextRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// DetectTextWithContext is the same as DetectText with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See DetectText for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) DetectTextWithContext(ctx aws.Context, input *DetectTextInput, opts ...request.Option) (*DetectTextOutput, error) {
|
|
|
|
|
req, out := c.DetectTextRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opGetCelebrityInfo = "GetCelebrityInfo"
|
|
|
|
|
|
|
|
|
|
// GetCelebrityInfoRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the GetCelebrityInfo operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See GetCelebrityInfo for more information on using the GetCelebrityInfo
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the GetCelebrityInfoRequest method.
|
|
|
|
|
// req, resp := client.GetCelebrityInfoRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) GetCelebrityInfoRequest(input *GetCelebrityInfoInput) (req *request.Request, output *GetCelebrityInfoOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opGetCelebrityInfo,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &GetCelebrityInfoInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &GetCelebrityInfoOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetCelebrityInfo API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Gets the name and additional information about a celebrity based on his or
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// her Amazon Rekognition ID. The additional information is returned as an array
|
|
|
|
|
// of URLs. If there is no additional information about the celebrity, this
|
|
|
|
|
// list is empty.
|
|
|
|
|
//
|
|
|
|
|
// For more information, see Recognizing Celebrities in an Image in the Amazon
|
|
|
|
|
// Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// This operation requires permissions to perform the rekognition:GetCelebrityInfo
|
|
|
|
|
// action.
|
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation GetCelebrityInfo for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
|
|
|
|
|
// The collection specified in the request cannot be found.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) GetCelebrityInfo(input *GetCelebrityInfoInput) (*GetCelebrityInfoOutput, error) {
|
|
|
|
|
req, out := c.GetCelebrityInfoRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetCelebrityInfoWithContext is the same as GetCelebrityInfo with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See GetCelebrityInfo for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) GetCelebrityInfoWithContext(ctx aws.Context, input *GetCelebrityInfoInput, opts ...request.Option) (*GetCelebrityInfoOutput, error) {
|
|
|
|
|
req, out := c.GetCelebrityInfoRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opGetCelebrityRecognition = "GetCelebrityRecognition"
|
|
|
|
|
|
|
|
|
|
// GetCelebrityRecognitionRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the GetCelebrityRecognition operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See GetCelebrityRecognition for more information on using the GetCelebrityRecognition
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the GetCelebrityRecognitionRequest method.
|
|
|
|
|
// req, resp := client.GetCelebrityRecognitionRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) GetCelebrityRecognitionRequest(input *GetCelebrityRecognitionInput) (req *request.Request, output *GetCelebrityRecognitionOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opGetCelebrityRecognition,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
Paginator: &request.Paginator{
|
|
|
|
|
InputTokens: []string{"NextToken"},
|
|
|
|
|
OutputTokens: []string{"NextToken"},
|
|
|
|
|
LimitToken: "MaxResults",
|
|
|
|
|
TruncationToken: "",
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &GetCelebrityRecognitionInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &GetCelebrityRecognitionOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetCelebrityRecognition API operation for Amazon Rekognition.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Gets the celebrity recognition results for a Amazon Rekognition Video analysis
|
|
|
|
|
// started by StartCelebrityRecognition.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Celebrity recognition in a video is an asynchronous operation. Analysis is
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// started by a call to StartCelebrityRecognition which returns a job identifier
|
|
|
|
|
// (JobId). When the celebrity recognition operation finishes, Amazon Rekognition
|
|
|
|
|
// Video publishes a completion status to the Amazon Simple Notification Service
|
|
|
|
|
// topic registered in the initial call to StartCelebrityRecognition. To get
|
|
|
|
|
// the results of the celebrity recognition analysis, first check that the status
|
|
|
|
|
// value published to the Amazon SNS topic is SUCCEEDED. If so, call GetCelebrityDetection
|
|
|
|
|
// and pass the job identifier (JobId) from the initial call to StartCelebrityDetection.
|
|
|
|
|
//
|
|
|
|
|
// For more information, see Working With Stored Videos in the Amazon Rekognition
|
|
|
|
|
// Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// GetCelebrityRecognition returns detected celebrities and the time(s) they
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// are detected in an array (Celebrities) of CelebrityRecognition objects. Each
|
|
|
|
|
// CelebrityRecognition contains information about the celebrity in a CelebrityDetail
|
|
|
|
|
// object and the time, Timestamp, the celebrity was detected.
|
|
|
|
|
//
|
|
|
|
|
// GetCelebrityRecognition only returns the default facial attributes (BoundingBox,
|
|
|
|
|
// Confidence, Landmarks, Pose, and Quality). The other facial attributes listed
|
|
|
|
|
// in the Face object of the following response syntax are not returned. For
|
|
|
|
|
// more information, see FaceDetail in the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// By default, the Celebrities array is sorted by time (milliseconds from the
|
|
|
|
|
// start of the video). You can also sort the array by celebrity by specifying
|
|
|
|
|
// the value ID in the SortBy input parameter.
|
|
|
|
|
//
|
|
|
|
|
// The CelebrityDetail object includes the celebrity identifer and additional
|
|
|
|
|
// information urls. If you don't store the additional information urls, you
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// can get them later by calling GetCelebrityInfo with the celebrity identifer.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// No information is returned for faces not recognized as celebrities.
|
|
|
|
|
//
|
|
|
|
|
// Use MaxResults parameter to limit the number of labels returned. If there
|
|
|
|
|
// are more results than specified in MaxResults, the value of NextToken in
|
|
|
|
|
// the operation response contains a pagination token for getting the next set
|
|
|
|
|
// of results. To get the next page of results, call GetCelebrityDetection and
|
|
|
|
|
// populate the NextToken request parameter with the token value returned from
|
|
|
|
|
// the previous call to GetCelebrityRecognition.
|
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation GetCelebrityRecognition for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidPaginationTokenException "InvalidPaginationTokenException"
|
|
|
|
|
// Pagination token in the request is not valid.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
|
|
|
|
|
// The collection specified in the request cannot be found.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) GetCelebrityRecognition(input *GetCelebrityRecognitionInput) (*GetCelebrityRecognitionOutput, error) {
|
|
|
|
|
req, out := c.GetCelebrityRecognitionRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetCelebrityRecognitionWithContext is the same as GetCelebrityRecognition with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See GetCelebrityRecognition for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) GetCelebrityRecognitionWithContext(ctx aws.Context, input *GetCelebrityRecognitionInput, opts ...request.Option) (*GetCelebrityRecognitionOutput, error) {
|
|
|
|
|
req, out := c.GetCelebrityRecognitionRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetCelebrityRecognitionPages iterates over the pages of a GetCelebrityRecognition operation,
|
|
|
|
|
// calling the "fn" function with the response data for each page. To stop
|
|
|
|
|
// iterating, return false from the fn function.
|
|
|
|
|
//
|
|
|
|
|
// See GetCelebrityRecognition method for more information on how to use this operation.
|
|
|
|
|
//
|
|
|
|
|
// Note: This operation can generate multiple requests to a service.
|
|
|
|
|
//
|
|
|
|
|
// // Example iterating over at most 3 pages of a GetCelebrityRecognition operation.
|
|
|
|
|
// pageNum := 0
|
|
|
|
|
// err := client.GetCelebrityRecognitionPages(params,
|
|
|
|
|
// func(page *GetCelebrityRecognitionOutput, lastPage bool) bool {
|
|
|
|
|
// pageNum++
|
|
|
|
|
// fmt.Println(page)
|
|
|
|
|
// return pageNum <= 3
|
|
|
|
|
// })
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) GetCelebrityRecognitionPages(input *GetCelebrityRecognitionInput, fn func(*GetCelebrityRecognitionOutput, bool) bool) error {
|
|
|
|
|
return c.GetCelebrityRecognitionPagesWithContext(aws.BackgroundContext(), input, fn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetCelebrityRecognitionPagesWithContext same as GetCelebrityRecognitionPages except
|
|
|
|
|
// it takes a Context and allows setting request options on the pages.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) GetCelebrityRecognitionPagesWithContext(ctx aws.Context, input *GetCelebrityRecognitionInput, fn func(*GetCelebrityRecognitionOutput, bool) bool, opts ...request.Option) error {
|
|
|
|
|
p := request.Pagination{
|
|
|
|
|
NewRequest: func() (*request.Request, error) {
|
|
|
|
|
var inCpy *GetCelebrityRecognitionInput
|
|
|
|
|
if input != nil {
|
|
|
|
|
tmp := *input
|
|
|
|
|
inCpy = &tmp
|
|
|
|
|
}
|
|
|
|
|
req, _ := c.GetCelebrityRecognitionRequest(inCpy)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return req, nil
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cont := true
|
|
|
|
|
for p.Next() && cont {
|
|
|
|
|
cont = fn(p.Page().(*GetCelebrityRecognitionOutput), !p.HasNextPage())
|
|
|
|
|
}
|
|
|
|
|
return p.Err()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opGetContentModeration = "GetContentModeration"
|
|
|
|
|
|
|
|
|
|
// GetContentModerationRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the GetContentModeration operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See GetContentModeration for more information on using the GetContentModeration
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the GetContentModerationRequest method.
|
|
|
|
|
// req, resp := client.GetContentModerationRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) GetContentModerationRequest(input *GetContentModerationInput) (req *request.Request, output *GetContentModerationOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opGetContentModeration,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
Paginator: &request.Paginator{
|
|
|
|
|
InputTokens: []string{"NextToken"},
|
|
|
|
|
OutputTokens: []string{"NextToken"},
|
|
|
|
|
LimitToken: "MaxResults",
|
|
|
|
|
TruncationToken: "",
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &GetContentModerationInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &GetContentModerationOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetContentModeration API operation for Amazon Rekognition.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Gets the content moderation analysis results for a Amazon Rekognition Video
|
|
|
|
|
// analysis started by StartContentModeration.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Content moderation analysis of a video is an asynchronous operation. You
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// start analysis by calling StartContentModeration. which returns a job identifier
|
|
|
|
|
// (JobId). When analysis finishes, Amazon Rekognition Video publishes a completion
|
|
|
|
|
// status to the Amazon Simple Notification Service topic registered in the
|
|
|
|
|
// initial call to StartContentModeration. To get the results of the content
|
|
|
|
|
// moderation analysis, first check that the status value published to the Amazon
|
|
|
|
|
// SNS topic is SUCCEEDED. If so, call GetCelebrityDetection and pass the job
|
|
|
|
|
// identifier (JobId) from the initial call to StartCelebrityDetection.
|
|
|
|
|
//
|
|
|
|
|
// For more information, see Working with Stored Videos in the Amazon Rekognition
|
|
|
|
|
// Devlopers Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// GetContentModeration returns detected content moderation labels, and the
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// time they are detected, in an array, ModerationLabels, of ContentModerationDetection
|
|
|
|
|
// objects.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// By default, the moderated labels are returned sorted by time, in milliseconds
|
|
|
|
|
// from the start of the video. You can also sort them by moderated label by
|
|
|
|
|
// specifying NAME for the SortBy input parameter.
|
|
|
|
|
//
|
|
|
|
|
// Since video analysis can return a large number of results, use the MaxResults
|
|
|
|
|
// parameter to limit the number of labels returned in a single call to GetContentModeration.
|
|
|
|
|
// If there are more results than specified in MaxResults, the value of NextToken
|
|
|
|
|
// in the operation response contains a pagination token for getting the next
|
|
|
|
|
// set of results. To get the next page of results, call GetContentModeration
|
|
|
|
|
// and populate the NextToken request parameter with the value of NextToken
|
|
|
|
|
// returned from the previous call to GetContentModeration.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// For more information, see Detecting Unsafe Content in the Amazon Rekognition
|
|
|
|
|
// Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation GetContentModeration for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidPaginationTokenException "InvalidPaginationTokenException"
|
|
|
|
|
// Pagination token in the request is not valid.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
|
|
|
|
|
// The collection specified in the request cannot be found.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) GetContentModeration(input *GetContentModerationInput) (*GetContentModerationOutput, error) {
|
|
|
|
|
req, out := c.GetContentModerationRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetContentModerationWithContext is the same as GetContentModeration with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See GetContentModeration for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) GetContentModerationWithContext(ctx aws.Context, input *GetContentModerationInput, opts ...request.Option) (*GetContentModerationOutput, error) {
|
|
|
|
|
req, out := c.GetContentModerationRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetContentModerationPages iterates over the pages of a GetContentModeration operation,
|
|
|
|
|
// calling the "fn" function with the response data for each page. To stop
|
|
|
|
|
// iterating, return false from the fn function.
|
|
|
|
|
//
|
|
|
|
|
// See GetContentModeration method for more information on how to use this operation.
|
|
|
|
|
//
|
|
|
|
|
// Note: This operation can generate multiple requests to a service.
|
|
|
|
|
//
|
|
|
|
|
// // Example iterating over at most 3 pages of a GetContentModeration operation.
|
|
|
|
|
// pageNum := 0
|
|
|
|
|
// err := client.GetContentModerationPages(params,
|
|
|
|
|
// func(page *GetContentModerationOutput, lastPage bool) bool {
|
|
|
|
|
// pageNum++
|
|
|
|
|
// fmt.Println(page)
|
|
|
|
|
// return pageNum <= 3
|
|
|
|
|
// })
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) GetContentModerationPages(input *GetContentModerationInput, fn func(*GetContentModerationOutput, bool) bool) error {
|
|
|
|
|
return c.GetContentModerationPagesWithContext(aws.BackgroundContext(), input, fn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetContentModerationPagesWithContext same as GetContentModerationPages except
|
|
|
|
|
// it takes a Context and allows setting request options on the pages.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) GetContentModerationPagesWithContext(ctx aws.Context, input *GetContentModerationInput, fn func(*GetContentModerationOutput, bool) bool, opts ...request.Option) error {
|
|
|
|
|
p := request.Pagination{
|
|
|
|
|
NewRequest: func() (*request.Request, error) {
|
|
|
|
|
var inCpy *GetContentModerationInput
|
|
|
|
|
if input != nil {
|
|
|
|
|
tmp := *input
|
|
|
|
|
inCpy = &tmp
|
|
|
|
|
}
|
|
|
|
|
req, _ := c.GetContentModerationRequest(inCpy)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return req, nil
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cont := true
|
|
|
|
|
for p.Next() && cont {
|
|
|
|
|
cont = fn(p.Page().(*GetContentModerationOutput), !p.HasNextPage())
|
|
|
|
|
}
|
|
|
|
|
return p.Err()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opGetFaceDetection = "GetFaceDetection"
|
|
|
|
|
|
|
|
|
|
// GetFaceDetectionRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the GetFaceDetection operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See GetFaceDetection for more information on using the GetFaceDetection
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the GetFaceDetectionRequest method.
|
|
|
|
|
// req, resp := client.GetFaceDetectionRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) GetFaceDetectionRequest(input *GetFaceDetectionInput) (req *request.Request, output *GetFaceDetectionOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opGetFaceDetection,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
Paginator: &request.Paginator{
|
|
|
|
|
InputTokens: []string{"NextToken"},
|
|
|
|
|
OutputTokens: []string{"NextToken"},
|
|
|
|
|
LimitToken: "MaxResults",
|
|
|
|
|
TruncationToken: "",
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &GetFaceDetectionInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &GetFaceDetectionOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetFaceDetection API operation for Amazon Rekognition.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Gets face detection results for a Amazon Rekognition Video analysis started
|
|
|
|
|
// by StartFaceDetection.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Face detection with Amazon Rekognition Video is an asynchronous operation.
|
|
|
|
|
// You start face detection by calling StartFaceDetection which returns a job
|
|
|
|
|
// identifier (JobId). When the face detection operation finishes, Amazon Rekognition
|
|
|
|
|
// Video publishes a completion status to the Amazon Simple Notification Service
|
|
|
|
|
// topic registered in the initial call to StartFaceDetection. To get the results
|
|
|
|
|
// of the face detection operation, first check that the status value published
|
|
|
|
|
// to the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass
|
|
|
|
|
// the job identifier (JobId) from the initial call to StartFaceDetection.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// GetFaceDetection returns an array of detected faces (Faces) sorted by the
|
|
|
|
|
// time the faces were detected.
|
|
|
|
|
//
|
|
|
|
|
// Use MaxResults parameter to limit the number of labels returned. If there
|
|
|
|
|
// are more results than specified in MaxResults, the value of NextToken in
|
|
|
|
|
// the operation response contains a pagination token for getting the next set
|
|
|
|
|
// of results. To get the next page of results, call GetFaceDetection and populate
|
|
|
|
|
// the NextToken request parameter with the token value returned from the previous
|
|
|
|
|
// call to GetFaceDetection.
|
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation GetFaceDetection for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidPaginationTokenException "InvalidPaginationTokenException"
|
|
|
|
|
// Pagination token in the request is not valid.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
|
|
|
|
|
// The collection specified in the request cannot be found.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) GetFaceDetection(input *GetFaceDetectionInput) (*GetFaceDetectionOutput, error) {
|
|
|
|
|
req, out := c.GetFaceDetectionRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetFaceDetectionWithContext is the same as GetFaceDetection with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See GetFaceDetection for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) GetFaceDetectionWithContext(ctx aws.Context, input *GetFaceDetectionInput, opts ...request.Option) (*GetFaceDetectionOutput, error) {
|
|
|
|
|
req, out := c.GetFaceDetectionRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetFaceDetectionPages iterates over the pages of a GetFaceDetection operation,
|
|
|
|
|
// calling the "fn" function with the response data for each page. To stop
|
|
|
|
|
// iterating, return false from the fn function.
|
|
|
|
|
//
|
|
|
|
|
// See GetFaceDetection method for more information on how to use this operation.
|
|
|
|
|
//
|
|
|
|
|
// Note: This operation can generate multiple requests to a service.
|
|
|
|
|
//
|
|
|
|
|
// // Example iterating over at most 3 pages of a GetFaceDetection operation.
|
|
|
|
|
// pageNum := 0
|
|
|
|
|
// err := client.GetFaceDetectionPages(params,
|
|
|
|
|
// func(page *GetFaceDetectionOutput, lastPage bool) bool {
|
|
|
|
|
// pageNum++
|
|
|
|
|
// fmt.Println(page)
|
|
|
|
|
// return pageNum <= 3
|
|
|
|
|
// })
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) GetFaceDetectionPages(input *GetFaceDetectionInput, fn func(*GetFaceDetectionOutput, bool) bool) error {
|
|
|
|
|
return c.GetFaceDetectionPagesWithContext(aws.BackgroundContext(), input, fn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetFaceDetectionPagesWithContext same as GetFaceDetectionPages except
|
|
|
|
|
// it takes a Context and allows setting request options on the pages.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) GetFaceDetectionPagesWithContext(ctx aws.Context, input *GetFaceDetectionInput, fn func(*GetFaceDetectionOutput, bool) bool, opts ...request.Option) error {
|
|
|
|
|
p := request.Pagination{
|
|
|
|
|
NewRequest: func() (*request.Request, error) {
|
|
|
|
|
var inCpy *GetFaceDetectionInput
|
|
|
|
|
if input != nil {
|
|
|
|
|
tmp := *input
|
|
|
|
|
inCpy = &tmp
|
|
|
|
|
}
|
|
|
|
|
req, _ := c.GetFaceDetectionRequest(inCpy)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return req, nil
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cont := true
|
|
|
|
|
for p.Next() && cont {
|
|
|
|
|
cont = fn(p.Page().(*GetFaceDetectionOutput), !p.HasNextPage())
|
|
|
|
|
}
|
|
|
|
|
return p.Err()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opGetFaceSearch = "GetFaceSearch"
|
|
|
|
|
|
|
|
|
|
// GetFaceSearchRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the GetFaceSearch operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See GetFaceSearch for more information on using the GetFaceSearch
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the GetFaceSearchRequest method.
|
|
|
|
|
// req, resp := client.GetFaceSearchRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) GetFaceSearchRequest(input *GetFaceSearchInput) (req *request.Request, output *GetFaceSearchOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opGetFaceSearch,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
Paginator: &request.Paginator{
|
|
|
|
|
InputTokens: []string{"NextToken"},
|
|
|
|
|
OutputTokens: []string{"NextToken"},
|
|
|
|
|
LimitToken: "MaxResults",
|
|
|
|
|
TruncationToken: "",
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &GetFaceSearchInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &GetFaceSearchOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetFaceSearch API operation for Amazon Rekognition.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Gets the face search results for Amazon Rekognition Video face search started
|
|
|
|
|
// by StartFaceSearch. The search returns faces in a collection that match the
|
|
|
|
|
// faces of persons detected in a video. It also includes the time(s) that faces
|
|
|
|
|
// are matched in the video.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Face search in a video is an asynchronous operation. You start face search
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// by calling to StartFaceSearch which returns a job identifier (JobId). When
|
|
|
|
|
// the search operation finishes, Amazon Rekognition Video publishes a completion
|
|
|
|
|
// status to the Amazon Simple Notification Service topic registered in the
|
|
|
|
|
// initial call to StartFaceSearch. To get the search results, first check that
|
|
|
|
|
// the status value published to the Amazon SNS topic is SUCCEEDED. If so, call
|
|
|
|
|
// GetFaceSearch and pass the job identifier (JobId) from the initial call to
|
|
|
|
|
// StartFaceSearch.
|
|
|
|
|
//
|
|
|
|
|
// For more information, see Searching Faces in a Collection in the Amazon Rekognition
|
|
|
|
|
// Developer Guide.
|
|
|
|
|
//
|
|
|
|
|
// The search results are retured in an array, Persons, of PersonMatch objects.
|
|
|
|
|
// EachPersonMatch element contains details about the matching faces in the
|
|
|
|
|
// input collection, person information (facial attributes, bounding boxes,
|
|
|
|
|
// and person identifer) for the matched person, and the time the person was
|
|
|
|
|
// matched in the video.
|
|
|
|
|
//
|
|
|
|
|
// GetFaceSearch only returns the default facial attributes (BoundingBox, Confidence,
|
|
|
|
|
// Landmarks, Pose, and Quality). The other facial attributes listed in the
|
|
|
|
|
// Face object of the following response syntax are not returned. For more information,
|
|
|
|
|
// see FaceDetail in the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// By default, the Persons array is sorted by the time, in milliseconds from
|
|
|
|
|
// the start of the video, persons are matched. You can also sort by persons
|
|
|
|
|
// by specifying INDEX for the SORTBY input parameter.
|
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation GetFaceSearch for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidPaginationTokenException "InvalidPaginationTokenException"
|
|
|
|
|
// Pagination token in the request is not valid.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
|
|
|
|
|
// The collection specified in the request cannot be found.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) GetFaceSearch(input *GetFaceSearchInput) (*GetFaceSearchOutput, error) {
|
|
|
|
|
req, out := c.GetFaceSearchRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetFaceSearchWithContext is the same as GetFaceSearch with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See GetFaceSearch for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) GetFaceSearchWithContext(ctx aws.Context, input *GetFaceSearchInput, opts ...request.Option) (*GetFaceSearchOutput, error) {
|
|
|
|
|
req, out := c.GetFaceSearchRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetFaceSearchPages iterates over the pages of a GetFaceSearch operation,
|
|
|
|
|
// calling the "fn" function with the response data for each page. To stop
|
|
|
|
|
// iterating, return false from the fn function.
|
|
|
|
|
//
|
|
|
|
|
// See GetFaceSearch method for more information on how to use this operation.
|
|
|
|
|
//
|
|
|
|
|
// Note: This operation can generate multiple requests to a service.
|
|
|
|
|
//
|
|
|
|
|
// // Example iterating over at most 3 pages of a GetFaceSearch operation.
|
|
|
|
|
// pageNum := 0
|
|
|
|
|
// err := client.GetFaceSearchPages(params,
|
|
|
|
|
// func(page *GetFaceSearchOutput, lastPage bool) bool {
|
|
|
|
|
// pageNum++
|
|
|
|
|
// fmt.Println(page)
|
|
|
|
|
// return pageNum <= 3
|
|
|
|
|
// })
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) GetFaceSearchPages(input *GetFaceSearchInput, fn func(*GetFaceSearchOutput, bool) bool) error {
|
|
|
|
|
return c.GetFaceSearchPagesWithContext(aws.BackgroundContext(), input, fn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetFaceSearchPagesWithContext same as GetFaceSearchPages except
|
|
|
|
|
// it takes a Context and allows setting request options on the pages.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) GetFaceSearchPagesWithContext(ctx aws.Context, input *GetFaceSearchInput, fn func(*GetFaceSearchOutput, bool) bool, opts ...request.Option) error {
|
|
|
|
|
p := request.Pagination{
|
|
|
|
|
NewRequest: func() (*request.Request, error) {
|
|
|
|
|
var inCpy *GetFaceSearchInput
|
|
|
|
|
if input != nil {
|
|
|
|
|
tmp := *input
|
|
|
|
|
inCpy = &tmp
|
|
|
|
|
}
|
|
|
|
|
req, _ := c.GetFaceSearchRequest(inCpy)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return req, nil
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cont := true
|
|
|
|
|
for p.Next() && cont {
|
|
|
|
|
cont = fn(p.Page().(*GetFaceSearchOutput), !p.HasNextPage())
|
|
|
|
|
}
|
|
|
|
|
return p.Err()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opGetLabelDetection = "GetLabelDetection"
|
|
|
|
|
|
|
|
|
|
// GetLabelDetectionRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the GetLabelDetection operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See GetLabelDetection for more information on using the GetLabelDetection
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the GetLabelDetectionRequest method.
|
|
|
|
|
// req, resp := client.GetLabelDetectionRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) GetLabelDetectionRequest(input *GetLabelDetectionInput) (req *request.Request, output *GetLabelDetectionOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opGetLabelDetection,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
Paginator: &request.Paginator{
|
|
|
|
|
InputTokens: []string{"NextToken"},
|
|
|
|
|
OutputTokens: []string{"NextToken"},
|
|
|
|
|
LimitToken: "MaxResults",
|
|
|
|
|
TruncationToken: "",
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &GetLabelDetectionInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &GetLabelDetectionOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetLabelDetection API operation for Amazon Rekognition.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Gets the label detection results of a Amazon Rekognition Video analysis started
|
|
|
|
|
// by StartLabelDetection.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The label detection operation is started by a call to StartLabelDetection
|
|
|
|
|
// which returns a job identifier (JobId). When the label detection operation
|
|
|
|
|
// finishes, Amazon Rekognition publishes a completion status to the Amazon
|
|
|
|
|
// Simple Notification Service topic registered in the initial call to StartlabelDetection.
|
|
|
|
|
// To get the results of the label detection operation, first check that the
|
|
|
|
|
// status value published to the Amazon SNS topic is SUCCEEDED. If so, call
|
|
|
|
|
// GetLabelDetection and pass the job identifier (JobId) from the initial call
|
|
|
|
|
// to StartLabelDetection.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// GetLabelDetection returns an array of detected labels (Labels) sorted by
|
|
|
|
|
// the time the labels were detected. You can also sort by the label name by
|
|
|
|
|
// specifying NAME for the SortBy input parameter.
|
|
|
|
|
//
|
|
|
|
|
// The labels returned include the label name, the percentage confidence in
|
|
|
|
|
// the accuracy of the detected label, and the time the label was detected in
|
|
|
|
|
// the video.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The returned labels also include bounding box information for common objects,
|
|
|
|
|
// a hierarchical taxonomy of detected labels, and the version of the label
|
|
|
|
|
// model used for detection.
|
|
|
|
|
//
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// Use MaxResults parameter to limit the number of labels returned. If there
|
|
|
|
|
// are more results than specified in MaxResults, the value of NextToken in
|
|
|
|
|
// the operation response contains a pagination token for getting the next set
|
|
|
|
|
// of results. To get the next page of results, call GetlabelDetection and populate
|
|
|
|
|
// the NextToken request parameter with the token value returned from the previous
|
|
|
|
|
// call to GetLabelDetection.
|
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation GetLabelDetection for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidPaginationTokenException "InvalidPaginationTokenException"
|
|
|
|
|
// Pagination token in the request is not valid.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
|
|
|
|
|
// The collection specified in the request cannot be found.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) GetLabelDetection(input *GetLabelDetectionInput) (*GetLabelDetectionOutput, error) {
|
|
|
|
|
req, out := c.GetLabelDetectionRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetLabelDetectionWithContext is the same as GetLabelDetection with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See GetLabelDetection for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) GetLabelDetectionWithContext(ctx aws.Context, input *GetLabelDetectionInput, opts ...request.Option) (*GetLabelDetectionOutput, error) {
|
|
|
|
|
req, out := c.GetLabelDetectionRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetLabelDetectionPages iterates over the pages of a GetLabelDetection operation,
|
|
|
|
|
// calling the "fn" function with the response data for each page. To stop
|
|
|
|
|
// iterating, return false from the fn function.
|
|
|
|
|
//
|
|
|
|
|
// See GetLabelDetection method for more information on how to use this operation.
|
|
|
|
|
//
|
|
|
|
|
// Note: This operation can generate multiple requests to a service.
|
|
|
|
|
//
|
|
|
|
|
// // Example iterating over at most 3 pages of a GetLabelDetection operation.
|
|
|
|
|
// pageNum := 0
|
|
|
|
|
// err := client.GetLabelDetectionPages(params,
|
|
|
|
|
// func(page *GetLabelDetectionOutput, lastPage bool) bool {
|
|
|
|
|
// pageNum++
|
|
|
|
|
// fmt.Println(page)
|
|
|
|
|
// return pageNum <= 3
|
|
|
|
|
// })
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) GetLabelDetectionPages(input *GetLabelDetectionInput, fn func(*GetLabelDetectionOutput, bool) bool) error {
|
|
|
|
|
return c.GetLabelDetectionPagesWithContext(aws.BackgroundContext(), input, fn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetLabelDetectionPagesWithContext same as GetLabelDetectionPages except
|
|
|
|
|
// it takes a Context and allows setting request options on the pages.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) GetLabelDetectionPagesWithContext(ctx aws.Context, input *GetLabelDetectionInput, fn func(*GetLabelDetectionOutput, bool) bool, opts ...request.Option) error {
|
|
|
|
|
p := request.Pagination{
|
|
|
|
|
NewRequest: func() (*request.Request, error) {
|
|
|
|
|
var inCpy *GetLabelDetectionInput
|
|
|
|
|
if input != nil {
|
|
|
|
|
tmp := *input
|
|
|
|
|
inCpy = &tmp
|
|
|
|
|
}
|
|
|
|
|
req, _ := c.GetLabelDetectionRequest(inCpy)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return req, nil
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cont := true
|
|
|
|
|
for p.Next() && cont {
|
|
|
|
|
cont = fn(p.Page().(*GetLabelDetectionOutput), !p.HasNextPage())
|
|
|
|
|
}
|
|
|
|
|
return p.Err()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opGetPersonTracking = "GetPersonTracking"
|
|
|
|
|
|
|
|
|
|
// GetPersonTrackingRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the GetPersonTracking operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See GetPersonTracking for more information on using the GetPersonTracking
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the GetPersonTrackingRequest method.
|
|
|
|
|
// req, resp := client.GetPersonTrackingRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) GetPersonTrackingRequest(input *GetPersonTrackingInput) (req *request.Request, output *GetPersonTrackingOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opGetPersonTracking,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
Paginator: &request.Paginator{
|
|
|
|
|
InputTokens: []string{"NextToken"},
|
|
|
|
|
OutputTokens: []string{"NextToken"},
|
|
|
|
|
LimitToken: "MaxResults",
|
|
|
|
|
TruncationToken: "",
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &GetPersonTrackingInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &GetPersonTrackingOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetPersonTracking API operation for Amazon Rekognition.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Gets the path tracking results of a Amazon Rekognition Video analysis started
|
|
|
|
|
// by StartPersonTracking.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The person path tracking operation is started by a call to StartPersonTracking
|
|
|
|
|
// which returns a job identifier (JobId). When the operation finishes, Amazon
|
|
|
|
|
// Rekognition Video publishes a completion status to the Amazon Simple Notification
|
|
|
|
|
// Service topic registered in the initial call to StartPersonTracking.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// To get the results of the person path tracking operation, first check that
|
|
|
|
|
// the status value published to the Amazon SNS topic is SUCCEEDED. If so, call
|
|
|
|
|
// GetPersonTracking and pass the job identifier (JobId) from the initial call
|
|
|
|
|
// to StartPersonTracking.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// GetPersonTracking returns an array, Persons, of tracked persons and the time(s)
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// their paths were tracked in the video.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// GetPersonTracking only returns the default facial attributes (BoundingBox,
|
|
|
|
|
// Confidence, Landmarks, Pose, and Quality). The other facial attributes listed
|
|
|
|
|
// in the Face object of the following response syntax are not returned.
|
|
|
|
|
//
|
|
|
|
|
// For more information, see FaceDetail in the Amazon Rekognition Developer
|
|
|
|
|
// Guide.
|
|
|
|
|
//
|
|
|
|
|
// By default, the array is sorted by the time(s) a person's path is tracked
|
|
|
|
|
// in the video. You can sort by tracked persons by specifying INDEX for the
|
|
|
|
|
// SortBy input parameter.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use the MaxResults parameter to limit the number of items returned. If there
|
|
|
|
|
// are more results than specified in MaxResults, the value of NextToken in
|
|
|
|
|
// the operation response contains a pagination token for getting the next set
|
|
|
|
|
// of results. To get the next page of results, call GetPersonTracking and populate
|
|
|
|
|
// the NextToken request parameter with the token value returned from the previous
|
|
|
|
|
// call to GetPersonTracking.
|
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation GetPersonTracking for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidPaginationTokenException "InvalidPaginationTokenException"
|
|
|
|
|
// Pagination token in the request is not valid.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
|
|
|
|
|
// The collection specified in the request cannot be found.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) GetPersonTracking(input *GetPersonTrackingInput) (*GetPersonTrackingOutput, error) {
|
|
|
|
|
req, out := c.GetPersonTrackingRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetPersonTrackingWithContext is the same as GetPersonTracking with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See GetPersonTracking for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) GetPersonTrackingWithContext(ctx aws.Context, input *GetPersonTrackingInput, opts ...request.Option) (*GetPersonTrackingOutput, error) {
|
|
|
|
|
req, out := c.GetPersonTrackingRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetPersonTrackingPages iterates over the pages of a GetPersonTracking operation,
|
|
|
|
|
// calling the "fn" function with the response data for each page. To stop
|
|
|
|
|
// iterating, return false from the fn function.
|
|
|
|
|
//
|
|
|
|
|
// See GetPersonTracking method for more information on how to use this operation.
|
|
|
|
|
//
|
|
|
|
|
// Note: This operation can generate multiple requests to a service.
|
|
|
|
|
//
|
|
|
|
|
// // Example iterating over at most 3 pages of a GetPersonTracking operation.
|
|
|
|
|
// pageNum := 0
|
|
|
|
|
// err := client.GetPersonTrackingPages(params,
|
|
|
|
|
// func(page *GetPersonTrackingOutput, lastPage bool) bool {
|
|
|
|
|
// pageNum++
|
|
|
|
|
// fmt.Println(page)
|
|
|
|
|
// return pageNum <= 3
|
|
|
|
|
// })
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) GetPersonTrackingPages(input *GetPersonTrackingInput, fn func(*GetPersonTrackingOutput, bool) bool) error {
|
|
|
|
|
return c.GetPersonTrackingPagesWithContext(aws.BackgroundContext(), input, fn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GetPersonTrackingPagesWithContext same as GetPersonTrackingPages except
|
|
|
|
|
// it takes a Context and allows setting request options on the pages.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) GetPersonTrackingPagesWithContext(ctx aws.Context, input *GetPersonTrackingInput, fn func(*GetPersonTrackingOutput, bool) bool, opts ...request.Option) error {
|
|
|
|
|
p := request.Pagination{
|
|
|
|
|
NewRequest: func() (*request.Request, error) {
|
|
|
|
|
var inCpy *GetPersonTrackingInput
|
|
|
|
|
if input != nil {
|
|
|
|
|
tmp := *input
|
|
|
|
|
inCpy = &tmp
|
|
|
|
|
}
|
|
|
|
|
req, _ := c.GetPersonTrackingRequest(inCpy)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return req, nil
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cont := true
|
|
|
|
|
for p.Next() && cont {
|
|
|
|
|
cont = fn(p.Page().(*GetPersonTrackingOutput), !p.HasNextPage())
|
|
|
|
|
}
|
|
|
|
|
return p.Err()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opIndexFaces = "IndexFaces"
|
|
|
|
|
|
|
|
|
|
// IndexFacesRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the IndexFaces operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See IndexFaces for more information on using the IndexFaces
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the IndexFacesRequest method.
|
|
|
|
|
// req, resp := client.IndexFacesRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) IndexFacesRequest(input *IndexFacesInput) (req *request.Request, output *IndexFacesOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opIndexFaces,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &IndexFacesInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &IndexFacesOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// IndexFaces API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Detects faces in the input image and adds them to the specified collection.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Amazon Rekognition doesn't save the actual faces that are detected. Instead,
|
|
|
|
|
// the underlying detection algorithm first detects the faces in the input image.
|
|
|
|
|
// For each face, the algorithm extracts facial features into a feature vector,
|
|
|
|
|
// and stores it in the backend database. Amazon Rekognition uses feature vectors
|
|
|
|
|
// when it performs face match and search operations using the SearchFaces and
|
|
|
|
|
// SearchFacesByImage operations.
|
|
|
|
|
//
|
|
|
|
|
// For more information, see Adding Faces to a Collection in the Amazon Rekognition
|
|
|
|
|
// Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// To get the number of faces in a collection, call DescribeCollection.
|
|
|
|
|
//
|
|
|
|
|
// If you're using version 1.0 of the face detection model, IndexFaces indexes
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// the 15 largest faces in the input image. Later versions of the face detection
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// model index the 100 largest faces in the input image.
|
|
|
|
|
//
|
|
|
|
|
// If you're using version 4 or later of the face model, image orientation information
|
|
|
|
|
// is not returned in the OrientationCorrection field.
|
|
|
|
|
//
|
|
|
|
|
// To determine which version of the model you're using, call DescribeCollection
|
|
|
|
|
// and supply the collection ID. You can also get the model version from the
|
|
|
|
|
// value of FaceModelVersion in the response from IndexFaces
|
|
|
|
|
//
|
|
|
|
|
// For more information, see Model Versioning in the Amazon Rekognition Developer
|
|
|
|
|
// Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// If you provide the optional ExternalImageID for the input image you provided,
|
|
|
|
|
// Amazon Rekognition associates this ID with all faces that it detects. When
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// you call the ListFaces operation, the response returns the external ID. You
|
|
|
|
|
// can use this external image ID to create a client-side index to associate
|
|
|
|
|
// the faces with each image. You can then use the index to find all faces in
|
|
|
|
|
// an image.
|
|
|
|
|
//
|
|
|
|
|
// You can specify the maximum number of faces to index with the MaxFaces input
|
|
|
|
|
// parameter. This is useful when you want to index the largest faces in an
|
|
|
|
|
// image and don't want to index smaller faces, such as those belonging to people
|
|
|
|
|
// standing in the background.
|
|
|
|
|
//
|
|
|
|
|
// The QualityFilter input parameter allows you to filter out detected faces
|
|
|
|
|
// that don’t meet the required quality bar chosen by Amazon Rekognition. The
|
|
|
|
|
// quality bar is based on a variety of common use cases. By default, IndexFaces
|
|
|
|
|
// filters detected faces. You can also explicitly filter detected faces by
|
|
|
|
|
// specifying AUTO for the value of QualityFilter. If you do not want to filter
|
|
|
|
|
// detected faces, specify NONE.
|
|
|
|
|
//
|
|
|
|
|
// To use quality filtering, you need a collection associated with version 3
|
|
|
|
|
// of the face model. To get the version of the face model associated with a
|
|
|
|
|
// collection, call DescribeCollection.
|
|
|
|
|
//
|
|
|
|
|
// Information about faces detected in an image, but not indexed, is returned
|
|
|
|
|
// in an array of UnindexedFace objects, UnindexedFaces. Faces aren't indexed
|
|
|
|
|
// for reasons such as:
|
|
|
|
|
//
|
|
|
|
|
// * The number of faces detected exceeds the value of the MaxFaces request
|
|
|
|
|
// parameter.
|
|
|
|
|
//
|
|
|
|
|
// * The face is too small compared to the image dimensions.
|
|
|
|
|
//
|
|
|
|
|
// * The face is too blurry.
|
|
|
|
|
//
|
|
|
|
|
// * The image is too dark.
|
|
|
|
|
//
|
|
|
|
|
// * The face has an extreme pose.
|
|
|
|
|
//
|
|
|
|
|
// In response, the IndexFaces operation returns an array of metadata for all
|
|
|
|
|
// detected faces, FaceRecords. This includes:
|
|
|
|
|
//
|
|
|
|
|
// * The bounding box, BoundingBox, of the detected face.
|
|
|
|
|
//
|
|
|
|
|
// * A confidence value, Confidence, which indicates the confidence that
|
|
|
|
|
// the bounding box contains a face.
|
|
|
|
|
//
|
|
|
|
|
// * A face ID, faceId, assigned by the service for each face that's detected
|
|
|
|
|
// and stored.
|
|
|
|
|
//
|
|
|
|
|
// * An image ID, ImageId, assigned by the service for the input image.
|
|
|
|
|
//
|
|
|
|
|
// If you request all facial attributes (by using the detectionAttributes parameter),
|
|
|
|
|
// Amazon Rekognition returns detailed facial attributes, such as facial landmarks
|
|
|
|
|
// (for example, location of eye and mouth) and other facial attributes like
|
|
|
|
|
// gender. If you provide the same image, specify the same collection, and use
|
|
|
|
|
// the same external ID in the IndexFaces operation, Amazon Rekognition doesn't
|
|
|
|
|
// save duplicate face metadata.
|
|
|
|
|
//
|
|
|
|
|
// The input image is passed either as base64-encoded image bytes, or as a reference
|
|
|
|
|
// to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
|
|
|
|
|
// Rekognition operations, passing image bytes isn't supported. The image must
|
|
|
|
|
// be formatted as a PNG or JPEG file.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// This operation requires permissions to perform the rekognition:IndexFaces
|
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation IndexFaces for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException"
|
|
|
|
|
// Amazon Rekognition is unable to access the S3 object specified in the request.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeImageTooLargeException "ImageTooLargeException"
|
|
|
|
|
// The input image size exceeds the allowed limit. For more information, see
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
|
|
|
|
|
// The collection specified in the request cannot be found.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidImageFormatException "InvalidImageFormatException"
|
|
|
|
|
// The provided image format is not supported.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) IndexFaces(input *IndexFacesInput) (*IndexFacesOutput, error) {
|
|
|
|
|
req, out := c.IndexFacesRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// IndexFacesWithContext is the same as IndexFaces with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See IndexFaces for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) IndexFacesWithContext(ctx aws.Context, input *IndexFacesInput, opts ...request.Option) (*IndexFacesOutput, error) {
|
|
|
|
|
req, out := c.IndexFacesRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opListCollections = "ListCollections"
|
|
|
|
|
|
|
|
|
|
// ListCollectionsRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the ListCollections operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See ListCollections for more information on using the ListCollections
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the ListCollectionsRequest method.
|
|
|
|
|
// req, resp := client.ListCollectionsRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) ListCollectionsRequest(input *ListCollectionsInput) (req *request.Request, output *ListCollectionsOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opListCollections,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
Paginator: &request.Paginator{
|
|
|
|
|
InputTokens: []string{"NextToken"},
|
|
|
|
|
OutputTokens: []string{"NextToken"},
|
|
|
|
|
LimitToken: "MaxResults",
|
|
|
|
|
TruncationToken: "",
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &ListCollectionsInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &ListCollectionsOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ListCollections API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Returns list of collection IDs in your account. If the result is truncated,
|
|
|
|
|
// the response also provides a NextToken that you can use in the subsequent
|
|
|
|
|
// request to fetch the next set of collection IDs.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// For an example, see Listing Collections in the Amazon Rekognition Developer
|
|
|
|
|
// Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// This operation requires permissions to perform the rekognition:ListCollections
|
|
|
|
|
// action.
|
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation ListCollections for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidPaginationTokenException "InvalidPaginationTokenException"
|
|
|
|
|
// Pagination token in the request is not valid.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
|
|
|
|
|
// The collection specified in the request cannot be found.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) ListCollections(input *ListCollectionsInput) (*ListCollectionsOutput, error) {
|
|
|
|
|
req, out := c.ListCollectionsRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ListCollectionsWithContext is the same as ListCollections with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See ListCollections for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) ListCollectionsWithContext(ctx aws.Context, input *ListCollectionsInput, opts ...request.Option) (*ListCollectionsOutput, error) {
|
|
|
|
|
req, out := c.ListCollectionsRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ListCollectionsPages iterates over the pages of a ListCollections operation,
|
|
|
|
|
// calling the "fn" function with the response data for each page. To stop
|
|
|
|
|
// iterating, return false from the fn function.
|
|
|
|
|
//
|
|
|
|
|
// See ListCollections method for more information on how to use this operation.
|
|
|
|
|
//
|
|
|
|
|
// Note: This operation can generate multiple requests to a service.
|
|
|
|
|
//
|
|
|
|
|
// // Example iterating over at most 3 pages of a ListCollections operation.
|
|
|
|
|
// pageNum := 0
|
|
|
|
|
// err := client.ListCollectionsPages(params,
|
|
|
|
|
// func(page *ListCollectionsOutput, lastPage bool) bool {
|
|
|
|
|
// pageNum++
|
|
|
|
|
// fmt.Println(page)
|
|
|
|
|
// return pageNum <= 3
|
|
|
|
|
// })
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) ListCollectionsPages(input *ListCollectionsInput, fn func(*ListCollectionsOutput, bool) bool) error {
|
|
|
|
|
return c.ListCollectionsPagesWithContext(aws.BackgroundContext(), input, fn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ListCollectionsPagesWithContext same as ListCollectionsPages except
|
|
|
|
|
// it takes a Context and allows setting request options on the pages.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) ListCollectionsPagesWithContext(ctx aws.Context, input *ListCollectionsInput, fn func(*ListCollectionsOutput, bool) bool, opts ...request.Option) error {
|
|
|
|
|
p := request.Pagination{
|
|
|
|
|
NewRequest: func() (*request.Request, error) {
|
|
|
|
|
var inCpy *ListCollectionsInput
|
|
|
|
|
if input != nil {
|
|
|
|
|
tmp := *input
|
|
|
|
|
inCpy = &tmp
|
|
|
|
|
}
|
|
|
|
|
req, _ := c.ListCollectionsRequest(inCpy)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return req, nil
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cont := true
|
|
|
|
|
for p.Next() && cont {
|
|
|
|
|
cont = fn(p.Page().(*ListCollectionsOutput), !p.HasNextPage())
|
|
|
|
|
}
|
|
|
|
|
return p.Err()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opListFaces = "ListFaces"
|
|
|
|
|
|
|
|
|
|
// ListFacesRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the ListFaces operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See ListFaces for more information on using the ListFaces
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the ListFacesRequest method.
|
|
|
|
|
// req, resp := client.ListFacesRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) ListFacesRequest(input *ListFacesInput) (req *request.Request, output *ListFacesOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opListFaces,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
Paginator: &request.Paginator{
|
|
|
|
|
InputTokens: []string{"NextToken"},
|
|
|
|
|
OutputTokens: []string{"NextToken"},
|
|
|
|
|
LimitToken: "MaxResults",
|
|
|
|
|
TruncationToken: "",
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &ListFacesInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &ListFacesOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ListFaces API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Returns metadata for faces in the specified collection. This metadata includes
|
|
|
|
|
// information such as the bounding box coordinates, the confidence (that the
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// bounding box contains a face), and face ID. For an example, see Listing Faces
|
|
|
|
|
// in a Collection in the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// This operation requires permissions to perform the rekognition:ListFaces
|
|
|
|
|
// action.
|
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation ListFaces for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidPaginationTokenException "InvalidPaginationTokenException"
|
|
|
|
|
// Pagination token in the request is not valid.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
|
|
|
|
|
// The collection specified in the request cannot be found.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) ListFaces(input *ListFacesInput) (*ListFacesOutput, error) {
|
|
|
|
|
req, out := c.ListFacesRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ListFacesWithContext is the same as ListFaces with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See ListFaces for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) ListFacesWithContext(ctx aws.Context, input *ListFacesInput, opts ...request.Option) (*ListFacesOutput, error) {
|
|
|
|
|
req, out := c.ListFacesRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ListFacesPages iterates over the pages of a ListFaces operation,
|
|
|
|
|
// calling the "fn" function with the response data for each page. To stop
|
|
|
|
|
// iterating, return false from the fn function.
|
|
|
|
|
//
|
|
|
|
|
// See ListFaces method for more information on how to use this operation.
|
|
|
|
|
//
|
|
|
|
|
// Note: This operation can generate multiple requests to a service.
|
|
|
|
|
//
|
|
|
|
|
// // Example iterating over at most 3 pages of a ListFaces operation.
|
|
|
|
|
// pageNum := 0
|
|
|
|
|
// err := client.ListFacesPages(params,
|
|
|
|
|
// func(page *ListFacesOutput, lastPage bool) bool {
|
|
|
|
|
// pageNum++
|
|
|
|
|
// fmt.Println(page)
|
|
|
|
|
// return pageNum <= 3
|
|
|
|
|
// })
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) ListFacesPages(input *ListFacesInput, fn func(*ListFacesOutput, bool) bool) error {
|
|
|
|
|
return c.ListFacesPagesWithContext(aws.BackgroundContext(), input, fn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ListFacesPagesWithContext same as ListFacesPages except
|
|
|
|
|
// it takes a Context and allows setting request options on the pages.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) ListFacesPagesWithContext(ctx aws.Context, input *ListFacesInput, fn func(*ListFacesOutput, bool) bool, opts ...request.Option) error {
|
|
|
|
|
p := request.Pagination{
|
|
|
|
|
NewRequest: func() (*request.Request, error) {
|
|
|
|
|
var inCpy *ListFacesInput
|
|
|
|
|
if input != nil {
|
|
|
|
|
tmp := *input
|
|
|
|
|
inCpy = &tmp
|
|
|
|
|
}
|
|
|
|
|
req, _ := c.ListFacesRequest(inCpy)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return req, nil
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cont := true
|
|
|
|
|
for p.Next() && cont {
|
|
|
|
|
cont = fn(p.Page().(*ListFacesOutput), !p.HasNextPage())
|
|
|
|
|
}
|
|
|
|
|
return p.Err()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opListStreamProcessors = "ListStreamProcessors"
|
|
|
|
|
|
|
|
|
|
// ListStreamProcessorsRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the ListStreamProcessors operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See ListStreamProcessors for more information on using the ListStreamProcessors
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the ListStreamProcessorsRequest method.
|
|
|
|
|
// req, resp := client.ListStreamProcessorsRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) ListStreamProcessorsRequest(input *ListStreamProcessorsInput) (req *request.Request, output *ListStreamProcessorsOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opListStreamProcessors,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
Paginator: &request.Paginator{
|
|
|
|
|
InputTokens: []string{"NextToken"},
|
|
|
|
|
OutputTokens: []string{"NextToken"},
|
|
|
|
|
LimitToken: "MaxResults",
|
|
|
|
|
TruncationToken: "",
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &ListStreamProcessorsInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &ListStreamProcessorsOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ListStreamProcessors API operation for Amazon Rekognition.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Gets a list of stream processors that you have created with CreateStreamProcessor.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation ListStreamProcessors for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidPaginationTokenException "InvalidPaginationTokenException"
|
|
|
|
|
// Pagination token in the request is not valid.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) ListStreamProcessors(input *ListStreamProcessorsInput) (*ListStreamProcessorsOutput, error) {
|
|
|
|
|
req, out := c.ListStreamProcessorsRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ListStreamProcessorsWithContext is the same as ListStreamProcessors with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See ListStreamProcessors for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) ListStreamProcessorsWithContext(ctx aws.Context, input *ListStreamProcessorsInput, opts ...request.Option) (*ListStreamProcessorsOutput, error) {
|
|
|
|
|
req, out := c.ListStreamProcessorsRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ListStreamProcessorsPages iterates over the pages of a ListStreamProcessors operation,
|
|
|
|
|
// calling the "fn" function with the response data for each page. To stop
|
|
|
|
|
// iterating, return false from the fn function.
|
|
|
|
|
//
|
|
|
|
|
// See ListStreamProcessors method for more information on how to use this operation.
|
|
|
|
|
//
|
|
|
|
|
// Note: This operation can generate multiple requests to a service.
|
|
|
|
|
//
|
|
|
|
|
// // Example iterating over at most 3 pages of a ListStreamProcessors operation.
|
|
|
|
|
// pageNum := 0
|
|
|
|
|
// err := client.ListStreamProcessorsPages(params,
|
|
|
|
|
// func(page *ListStreamProcessorsOutput, lastPage bool) bool {
|
|
|
|
|
// pageNum++
|
|
|
|
|
// fmt.Println(page)
|
|
|
|
|
// return pageNum <= 3
|
|
|
|
|
// })
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) ListStreamProcessorsPages(input *ListStreamProcessorsInput, fn func(*ListStreamProcessorsOutput, bool) bool) error {
|
|
|
|
|
return c.ListStreamProcessorsPagesWithContext(aws.BackgroundContext(), input, fn)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ListStreamProcessorsPagesWithContext same as ListStreamProcessorsPages except
|
|
|
|
|
// it takes a Context and allows setting request options on the pages.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) ListStreamProcessorsPagesWithContext(ctx aws.Context, input *ListStreamProcessorsInput, fn func(*ListStreamProcessorsOutput, bool) bool, opts ...request.Option) error {
|
|
|
|
|
p := request.Pagination{
|
|
|
|
|
NewRequest: func() (*request.Request, error) {
|
|
|
|
|
var inCpy *ListStreamProcessorsInput
|
|
|
|
|
if input != nil {
|
|
|
|
|
tmp := *input
|
|
|
|
|
inCpy = &tmp
|
|
|
|
|
}
|
|
|
|
|
req, _ := c.ListStreamProcessorsRequest(inCpy)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return req, nil
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
cont := true
|
|
|
|
|
for p.Next() && cont {
|
|
|
|
|
cont = fn(p.Page().(*ListStreamProcessorsOutput), !p.HasNextPage())
|
|
|
|
|
}
|
|
|
|
|
return p.Err()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opRecognizeCelebrities = "RecognizeCelebrities"
|
|
|
|
|
|
|
|
|
|
// RecognizeCelebritiesRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the RecognizeCelebrities operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See RecognizeCelebrities for more information on using the RecognizeCelebrities
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the RecognizeCelebritiesRequest method.
|
|
|
|
|
// req, resp := client.RecognizeCelebritiesRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) RecognizeCelebritiesRequest(input *RecognizeCelebritiesInput) (req *request.Request, output *RecognizeCelebritiesOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opRecognizeCelebrities,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &RecognizeCelebritiesInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &RecognizeCelebritiesOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// RecognizeCelebrities API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Returns an array of celebrities recognized in the input image. For more information,
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// see Recognizing Celebrities in the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// RecognizeCelebrities returns the 100 largest faces in the image. It lists
|
|
|
|
|
// recognized celebrities in the CelebrityFaces array and unrecognized faces
|
|
|
|
|
// in the UnrecognizedFaces array. RecognizeCelebrities doesn't return celebrities
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// whose faces aren't among the largest 100 faces in the image.
|
|
|
|
|
//
|
|
|
|
|
// For each celebrity recognized, RecognizeCelebrities returns a Celebrity object.
|
|
|
|
|
// The Celebrity object contains the celebrity name, ID, URL links to additional
|
|
|
|
|
// information, match confidence, and a ComparedFace object that you can use
|
|
|
|
|
// to locate the celebrity's face on the image.
|
|
|
|
|
//
|
|
|
|
|
// Amazon Rekognition doesn't retain information about which images a celebrity
|
|
|
|
|
// has been recognized in. Your application must store this information and
|
|
|
|
|
// use the Celebrity ID property as a unique identifier for the celebrity. If
|
|
|
|
|
// you don't store the celebrity name or additional information URLs returned
|
|
|
|
|
// by RecognizeCelebrities, you will need the ID to identify the celebrity in
|
|
|
|
|
// a call to the GetCelebrityInfo operation.
|
|
|
|
|
//
|
|
|
|
|
// You pass the input image either as base64-encoded image bytes or as a reference
|
|
|
|
|
// to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// Rekognition operations, passing image bytes is not supported. The image must
|
|
|
|
|
// be either a PNG or JPEG formatted file.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// For an example, see Recognizing Celebrities in an Image in the Amazon Rekognition
|
|
|
|
|
// Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// This operation requires permissions to perform the rekognition:RecognizeCelebrities
|
|
|
|
|
// operation.
|
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation RecognizeCelebrities for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException"
|
|
|
|
|
// Amazon Rekognition is unable to access the S3 object specified in the request.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidImageFormatException "InvalidImageFormatException"
|
|
|
|
|
// The provided image format is not supported.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeImageTooLargeException "ImageTooLargeException"
|
|
|
|
|
// The input image size exceeds the allowed limit. For more information, see
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidImageFormatException "InvalidImageFormatException"
|
|
|
|
|
// The provided image format is not supported.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) RecognizeCelebrities(input *RecognizeCelebritiesInput) (*RecognizeCelebritiesOutput, error) {
|
|
|
|
|
req, out := c.RecognizeCelebritiesRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// RecognizeCelebritiesWithContext is the same as RecognizeCelebrities with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See RecognizeCelebrities for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) RecognizeCelebritiesWithContext(ctx aws.Context, input *RecognizeCelebritiesInput, opts ...request.Option) (*RecognizeCelebritiesOutput, error) {
|
|
|
|
|
req, out := c.RecognizeCelebritiesRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opSearchFaces = "SearchFaces"
|
|
|
|
|
|
|
|
|
|
// SearchFacesRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the SearchFaces operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See SearchFaces for more information on using the SearchFaces
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the SearchFacesRequest method.
|
|
|
|
|
// req, resp := client.SearchFacesRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) SearchFacesRequest(input *SearchFacesInput) (req *request.Request, output *SearchFacesOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opSearchFaces,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &SearchFacesInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &SearchFacesOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SearchFaces API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// For a given input face ID, searches for matching faces in the collection
|
|
|
|
|
// the face belongs to. You get a face ID when you add a face to the collection
|
|
|
|
|
// using the IndexFaces operation. The operation compares the features of the
|
|
|
|
|
// input face with faces in the specified collection.
|
|
|
|
|
//
|
|
|
|
|
// You can also search faces without indexing faces by using the SearchFacesByImage
|
|
|
|
|
// operation.
|
|
|
|
|
//
|
|
|
|
|
// The operation response returns an array of faces that match, ordered by similarity
|
|
|
|
|
// score with the highest similarity first. More specifically, it is an array
|
|
|
|
|
// of metadata for each face match that is found. Along with the metadata, the
|
|
|
|
|
// response also includes a confidence value for each face match, indicating
|
|
|
|
|
// the confidence that the specific face matches the input face.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// For an example, see Searching for a Face Using Its Face ID in the Amazon
|
|
|
|
|
// Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// This operation requires permissions to perform the rekognition:SearchFaces
|
|
|
|
|
// action.
|
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation SearchFaces for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
|
|
|
|
|
// The collection specified in the request cannot be found.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) SearchFaces(input *SearchFacesInput) (*SearchFacesOutput, error) {
|
|
|
|
|
req, out := c.SearchFacesRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SearchFacesWithContext is the same as SearchFaces with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See SearchFaces for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) SearchFacesWithContext(ctx aws.Context, input *SearchFacesInput, opts ...request.Option) (*SearchFacesOutput, error) {
|
|
|
|
|
req, out := c.SearchFacesRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opSearchFacesByImage = "SearchFacesByImage"
|
|
|
|
|
|
|
|
|
|
// SearchFacesByImageRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the SearchFacesByImage operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See SearchFacesByImage for more information on using the SearchFacesByImage
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the SearchFacesByImageRequest method.
|
|
|
|
|
// req, resp := client.SearchFacesByImageRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) SearchFacesByImageRequest(input *SearchFacesByImageInput) (req *request.Request, output *SearchFacesByImageOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opSearchFacesByImage,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &SearchFacesByImageInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &SearchFacesByImageOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SearchFacesByImage API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// For a given input image, first detects the largest face in the image, and
|
|
|
|
|
// then searches the specified collection for matching faces. The operation
|
|
|
|
|
// compares the features of the input face with faces in the specified collection.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// To search for all faces in an input image, you might first call the IndexFaces
|
|
|
|
|
// operation, and then use the face IDs returned in subsequent calls to the
|
|
|
|
|
// SearchFaces operation.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// You can also call the DetectFaces operation and use the bounding boxes in
|
|
|
|
|
// the response to make face crops, which then you can pass in to the SearchFacesByImage
|
|
|
|
|
// operation.
|
|
|
|
|
//
|
|
|
|
|
// You pass the input image either as base64-encoded image bytes or as a reference
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// to an image in an Amazon S3 bucket. If you use the AWS CLI to call Amazon
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// Rekognition operations, passing image bytes is not supported. The image must
|
|
|
|
|
// be either a PNG or JPEG formatted file.
|
|
|
|
|
//
|
|
|
|
|
// The response returns an array of faces that match, ordered by similarity
|
|
|
|
|
// score with the highest similarity first. More specifically, it is an array
|
|
|
|
|
// of metadata for each face match found. Along with the metadata, the response
|
|
|
|
|
// also includes a similarity indicating how similar the face is to the input
|
|
|
|
|
// face. In the response, the operation also returns the bounding box (and a
|
|
|
|
|
// confidence level that the bounding box contains a face) of the face that
|
|
|
|
|
// Amazon Rekognition used for the input image.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// For an example, Searching for a Face Using an Image in the Amazon Rekognition
|
|
|
|
|
// Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// This operation requires permissions to perform the rekognition:SearchFacesByImage
|
|
|
|
|
// action.
|
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation SearchFacesByImage for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException"
|
|
|
|
|
// Amazon Rekognition is unable to access the S3 object specified in the request.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeImageTooLargeException "ImageTooLargeException"
|
|
|
|
|
// The input image size exceeds the allowed limit. For more information, see
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Limits in Amazon Rekognition in the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
|
|
|
|
|
// The collection specified in the request cannot be found.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidImageFormatException "InvalidImageFormatException"
|
|
|
|
|
// The provided image format is not supported.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) SearchFacesByImage(input *SearchFacesByImageInput) (*SearchFacesByImageOutput, error) {
|
|
|
|
|
req, out := c.SearchFacesByImageRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SearchFacesByImageWithContext is the same as SearchFacesByImage with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See SearchFacesByImage for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) SearchFacesByImageWithContext(ctx aws.Context, input *SearchFacesByImageInput, opts ...request.Option) (*SearchFacesByImageOutput, error) {
|
|
|
|
|
req, out := c.SearchFacesByImageRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opStartCelebrityRecognition = "StartCelebrityRecognition"
|
|
|
|
|
|
|
|
|
|
// StartCelebrityRecognitionRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the StartCelebrityRecognition operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See StartCelebrityRecognition for more information on using the StartCelebrityRecognition
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the StartCelebrityRecognitionRequest method.
|
|
|
|
|
// req, resp := client.StartCelebrityRecognitionRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) StartCelebrityRecognitionRequest(input *StartCelebrityRecognitionInput) (req *request.Request, output *StartCelebrityRecognitionOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opStartCelebrityRecognition,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &StartCelebrityRecognitionInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &StartCelebrityRecognitionOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// StartCelebrityRecognition API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Starts asynchronous recognition of celebrities in a stored video.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Amazon Rekognition Video can detect celebrities in a video must be stored
|
|
|
|
|
// in an Amazon S3 bucket. Use Video to specify the bucket name and the filename
|
|
|
|
|
// of the video. StartCelebrityRecognition returns a job identifier (JobId)
|
|
|
|
|
// which you use to get the results of the analysis. When celebrity recognition
|
|
|
|
|
// analysis is finished, Amazon Rekognition Video publishes a completion status
|
|
|
|
|
// to the Amazon Simple Notification Service topic that you specify in NotificationChannel.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// To get the results of the celebrity recognition analysis, first check that
|
|
|
|
|
// the status value published to the Amazon SNS topic is SUCCEEDED. If so, call
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// GetCelebrityRecognition and pass the job identifier (JobId) from the initial
|
|
|
|
|
// call to StartCelebrityRecognition.
|
|
|
|
|
//
|
|
|
|
|
// For more information, see Recognizing Celebrities in the Amazon Rekognition
|
|
|
|
|
// Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation StartCelebrityRecognition for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException"
|
|
|
|
|
// A ClientRequestToken input parameter was reused with an operation, but at
|
|
|
|
|
// least one of the other input parameters is different from the previous call
|
|
|
|
|
// to the operation.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException"
|
|
|
|
|
// Amazon Rekognition is unable to access the S3 object specified in the request.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeVideoTooLargeException "VideoTooLargeException"
|
|
|
|
|
// The file size or duration of the supplied media is too large. The maximum
|
|
|
|
|
// file size is 8GB. The maximum duration is 2 hours.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeLimitExceededException "LimitExceededException"
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// An Amazon Rekognition service limit was exceeded. For example, if you start
|
|
|
|
|
// too many Amazon Rekognition Video jobs concurrently, calls to start operations
|
|
|
|
|
// (StartLabelDetection, for example) will raise a LimitExceededException exception
|
|
|
|
|
// (HTTP status code: 400) until the number of concurrently running jobs is
|
|
|
|
|
// below the Amazon Rekognition service limit.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) StartCelebrityRecognition(input *StartCelebrityRecognitionInput) (*StartCelebrityRecognitionOutput, error) {
|
|
|
|
|
req, out := c.StartCelebrityRecognitionRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// StartCelebrityRecognitionWithContext is the same as StartCelebrityRecognition with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See StartCelebrityRecognition for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) StartCelebrityRecognitionWithContext(ctx aws.Context, input *StartCelebrityRecognitionInput, opts ...request.Option) (*StartCelebrityRecognitionOutput, error) {
|
|
|
|
|
req, out := c.StartCelebrityRecognitionRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opStartContentModeration = "StartContentModeration"
|
|
|
|
|
|
|
|
|
|
// StartContentModerationRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the StartContentModeration operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See StartContentModeration for more information on using the StartContentModeration
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the StartContentModerationRequest method.
|
|
|
|
|
// req, resp := client.StartContentModerationRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) StartContentModerationRequest(input *StartContentModerationInput) (req *request.Request, output *StartContentModerationOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opStartContentModeration,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &StartContentModerationInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &StartContentModerationOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// StartContentModeration API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Starts asynchronous detection of explicit or suggestive adult content in
|
|
|
|
|
// a stored video.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Amazon Rekognition Video can moderate content in a video stored in an Amazon
|
|
|
|
|
// S3 bucket. Use Video to specify the bucket name and the filename of the video.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// StartContentModeration returns a job identifier (JobId) which you use to
|
|
|
|
|
// get the results of the analysis. When content moderation analysis is finished,
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Amazon Rekognition Video publishes a completion status to the Amazon Simple
|
|
|
|
|
// Notification Service topic that you specify in NotificationChannel.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// To get the results of the content moderation analysis, first check that the
|
|
|
|
|
// status value published to the Amazon SNS topic is SUCCEEDED. If so, call
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// GetContentModeration and pass the job identifier (JobId) from the initial
|
|
|
|
|
// call to StartContentModeration.
|
|
|
|
|
//
|
|
|
|
|
// For more information, see Detecting Unsafe Content in the Amazon Rekognition
|
|
|
|
|
// Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation StartContentModeration for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException"
|
|
|
|
|
// A ClientRequestToken input parameter was reused with an operation, but at
|
|
|
|
|
// least one of the other input parameters is different from the previous call
|
|
|
|
|
// to the operation.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException"
|
|
|
|
|
// Amazon Rekognition is unable to access the S3 object specified in the request.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeVideoTooLargeException "VideoTooLargeException"
|
|
|
|
|
// The file size or duration of the supplied media is too large. The maximum
|
|
|
|
|
// file size is 8GB. The maximum duration is 2 hours.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeLimitExceededException "LimitExceededException"
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// An Amazon Rekognition service limit was exceeded. For example, if you start
|
|
|
|
|
// too many Amazon Rekognition Video jobs concurrently, calls to start operations
|
|
|
|
|
// (StartLabelDetection, for example) will raise a LimitExceededException exception
|
|
|
|
|
// (HTTP status code: 400) until the number of concurrently running jobs is
|
|
|
|
|
// below the Amazon Rekognition service limit.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) StartContentModeration(input *StartContentModerationInput) (*StartContentModerationOutput, error) {
|
|
|
|
|
req, out := c.StartContentModerationRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// StartContentModerationWithContext is the same as StartContentModeration with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See StartContentModeration for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) StartContentModerationWithContext(ctx aws.Context, input *StartContentModerationInput, opts ...request.Option) (*StartContentModerationOutput, error) {
|
|
|
|
|
req, out := c.StartContentModerationRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opStartFaceDetection = "StartFaceDetection"
|
|
|
|
|
|
|
|
|
|
// StartFaceDetectionRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the StartFaceDetection operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See StartFaceDetection for more information on using the StartFaceDetection
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the StartFaceDetectionRequest method.
|
|
|
|
|
// req, resp := client.StartFaceDetectionRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) StartFaceDetectionRequest(input *StartFaceDetectionInput) (req *request.Request, output *StartFaceDetectionOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opStartFaceDetection,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &StartFaceDetectionInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &StartFaceDetectionOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// StartFaceDetection API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Starts asynchronous detection of faces in a stored video.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Amazon Rekognition Video can detect faces in a video stored in an Amazon
|
|
|
|
|
// S3 bucket. Use Video to specify the bucket name and the filename of the video.
|
|
|
|
|
// StartFaceDetection returns a job identifier (JobId) that you use to get the
|
|
|
|
|
// results of the operation. When face detection is finished, Amazon Rekognition
|
|
|
|
|
// Video publishes a completion status to the Amazon Simple Notification Service
|
|
|
|
|
// topic that you specify in NotificationChannel. To get the results of the
|
|
|
|
|
// face detection operation, first check that the status value published to
|
|
|
|
|
// the Amazon SNS topic is SUCCEEDED. If so, call GetFaceDetection and pass
|
|
|
|
|
// the job identifier (JobId) from the initial call to StartFaceDetection.
|
|
|
|
|
//
|
|
|
|
|
// For more information, see Detecting Faces in a Stored Video in the Amazon
|
|
|
|
|
// Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation StartFaceDetection for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException"
|
|
|
|
|
// A ClientRequestToken input parameter was reused with an operation, but at
|
|
|
|
|
// least one of the other input parameters is different from the previous call
|
|
|
|
|
// to the operation.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException"
|
|
|
|
|
// Amazon Rekognition is unable to access the S3 object specified in the request.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeVideoTooLargeException "VideoTooLargeException"
|
|
|
|
|
// The file size or duration of the supplied media is too large. The maximum
|
|
|
|
|
// file size is 8GB. The maximum duration is 2 hours.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeLimitExceededException "LimitExceededException"
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// An Amazon Rekognition service limit was exceeded. For example, if you start
|
|
|
|
|
// too many Amazon Rekognition Video jobs concurrently, calls to start operations
|
|
|
|
|
// (StartLabelDetection, for example) will raise a LimitExceededException exception
|
|
|
|
|
// (HTTP status code: 400) until the number of concurrently running jobs is
|
|
|
|
|
// below the Amazon Rekognition service limit.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) StartFaceDetection(input *StartFaceDetectionInput) (*StartFaceDetectionOutput, error) {
|
|
|
|
|
req, out := c.StartFaceDetectionRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// StartFaceDetectionWithContext is the same as StartFaceDetection with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See StartFaceDetection for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) StartFaceDetectionWithContext(ctx aws.Context, input *StartFaceDetectionInput, opts ...request.Option) (*StartFaceDetectionOutput, error) {
|
|
|
|
|
req, out := c.StartFaceDetectionRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opStartFaceSearch = "StartFaceSearch"
|
|
|
|
|
|
|
|
|
|
// StartFaceSearchRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the StartFaceSearch operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See StartFaceSearch for more information on using the StartFaceSearch
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the StartFaceSearchRequest method.
|
|
|
|
|
// req, resp := client.StartFaceSearchRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) StartFaceSearchRequest(input *StartFaceSearchInput) (req *request.Request, output *StartFaceSearchOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opStartFaceSearch,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &StartFaceSearchInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &StartFaceSearchOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// StartFaceSearch API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Starts the asynchronous search for faces in a collection that match the faces
|
|
|
|
|
// of persons detected in a stored video.
|
|
|
|
|
//
|
|
|
|
|
// The video must be stored in an Amazon S3 bucket. Use Video to specify the
|
|
|
|
|
// bucket name and the filename of the video. StartFaceSearch returns a job
|
|
|
|
|
// identifier (JobId) which you use to get the search results once the search
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// has completed. When searching is finished, Amazon Rekognition Video publishes
|
|
|
|
|
// a completion status to the Amazon Simple Notification Service topic that
|
|
|
|
|
// you specify in NotificationChannel. To get the search results, first check
|
|
|
|
|
// that the status value published to the Amazon SNS topic is SUCCEEDED. If
|
|
|
|
|
// so, call GetFaceSearch and pass the job identifier (JobId) from the initial
|
|
|
|
|
// call to StartFaceSearch. For more information, see procedure-person-search-videos.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation StartFaceSearch for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException"
|
|
|
|
|
// A ClientRequestToken input parameter was reused with an operation, but at
|
|
|
|
|
// least one of the other input parameters is different from the previous call
|
|
|
|
|
// to the operation.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException"
|
|
|
|
|
// Amazon Rekognition is unable to access the S3 object specified in the request.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeVideoTooLargeException "VideoTooLargeException"
|
|
|
|
|
// The file size or duration of the supplied media is too large. The maximum
|
|
|
|
|
// file size is 8GB. The maximum duration is 2 hours.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeLimitExceededException "LimitExceededException"
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// An Amazon Rekognition service limit was exceeded. For example, if you start
|
|
|
|
|
// too many Amazon Rekognition Video jobs concurrently, calls to start operations
|
|
|
|
|
// (StartLabelDetection, for example) will raise a LimitExceededException exception
|
|
|
|
|
// (HTTP status code: 400) until the number of concurrently running jobs is
|
|
|
|
|
// below the Amazon Rekognition service limit.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
|
|
|
|
|
// The collection specified in the request cannot be found.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) StartFaceSearch(input *StartFaceSearchInput) (*StartFaceSearchOutput, error) {
|
|
|
|
|
req, out := c.StartFaceSearchRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// StartFaceSearchWithContext is the same as StartFaceSearch with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See StartFaceSearch for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) StartFaceSearchWithContext(ctx aws.Context, input *StartFaceSearchInput, opts ...request.Option) (*StartFaceSearchOutput, error) {
|
|
|
|
|
req, out := c.StartFaceSearchRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opStartLabelDetection = "StartLabelDetection"
|
|
|
|
|
|
|
|
|
|
// StartLabelDetectionRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the StartLabelDetection operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See StartLabelDetection for more information on using the StartLabelDetection
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the StartLabelDetectionRequest method.
|
|
|
|
|
// req, resp := client.StartLabelDetectionRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) StartLabelDetectionRequest(input *StartLabelDetectionInput) (req *request.Request, output *StartLabelDetectionOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opStartLabelDetection,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &StartLabelDetectionInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &StartLabelDetectionOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// StartLabelDetection API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Starts asynchronous detection of labels in a stored video.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Amazon Rekognition Video can detect labels in a video. Labels are instances
|
|
|
|
|
// of real-world entities. This includes objects like flower, tree, and table;
|
|
|
|
|
// events like wedding, graduation, and birthday party; concepts like landscape,
|
|
|
|
|
// evening, and nature; and activities like a person getting out of a car or
|
|
|
|
|
// a person skiing.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// The video must be stored in an Amazon S3 bucket. Use Video to specify the
|
|
|
|
|
// bucket name and the filename of the video. StartLabelDetection returns a
|
|
|
|
|
// job identifier (JobId) which you use to get the results of the operation.
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// When label detection is finished, Amazon Rekognition Video publishes a completion
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// status to the Amazon Simple Notification Service topic that you specify in
|
|
|
|
|
// NotificationChannel.
|
|
|
|
|
//
|
|
|
|
|
// To get the results of the label detection operation, first check that the
|
|
|
|
|
// status value published to the Amazon SNS topic is SUCCEEDED. If so, call
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// GetLabelDetection and pass the job identifier (JobId) from the initial call
|
|
|
|
|
// to StartLabelDetection.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation StartLabelDetection for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException"
|
|
|
|
|
// A ClientRequestToken input parameter was reused with an operation, but at
|
|
|
|
|
// least one of the other input parameters is different from the previous call
|
|
|
|
|
// to the operation.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException"
|
|
|
|
|
// Amazon Rekognition is unable to access the S3 object specified in the request.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeVideoTooLargeException "VideoTooLargeException"
|
|
|
|
|
// The file size or duration of the supplied media is too large. The maximum
|
|
|
|
|
// file size is 8GB. The maximum duration is 2 hours.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeLimitExceededException "LimitExceededException"
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// An Amazon Rekognition service limit was exceeded. For example, if you start
|
|
|
|
|
// too many Amazon Rekognition Video jobs concurrently, calls to start operations
|
|
|
|
|
// (StartLabelDetection, for example) will raise a LimitExceededException exception
|
|
|
|
|
// (HTTP status code: 400) until the number of concurrently running jobs is
|
|
|
|
|
// below the Amazon Rekognition service limit.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) StartLabelDetection(input *StartLabelDetectionInput) (*StartLabelDetectionOutput, error) {
|
|
|
|
|
req, out := c.StartLabelDetectionRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// StartLabelDetectionWithContext is the same as StartLabelDetection with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See StartLabelDetection for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) StartLabelDetectionWithContext(ctx aws.Context, input *StartLabelDetectionInput, opts ...request.Option) (*StartLabelDetectionOutput, error) {
|
|
|
|
|
req, out := c.StartLabelDetectionRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opStartPersonTracking = "StartPersonTracking"
|
|
|
|
|
|
|
|
|
|
// StartPersonTrackingRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the StartPersonTracking operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See StartPersonTracking for more information on using the StartPersonTracking
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the StartPersonTrackingRequest method.
|
|
|
|
|
// req, resp := client.StartPersonTrackingRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) StartPersonTrackingRequest(input *StartPersonTrackingInput) (req *request.Request, output *StartPersonTrackingOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opStartPersonTracking,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &StartPersonTrackingInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &StartPersonTrackingOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// StartPersonTracking API operation for Amazon Rekognition.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Starts the asynchronous tracking of a person's path in a stored video.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Amazon Rekognition Video can track the path of people in a video stored in
|
|
|
|
|
// an Amazon S3 bucket. Use Video to specify the bucket name and the filename
|
|
|
|
|
// of the video. StartPersonTracking returns a job identifier (JobId) which
|
|
|
|
|
// you use to get the results of the operation. When label detection is finished,
|
|
|
|
|
// Amazon Rekognition publishes a completion status to the Amazon Simple Notification
|
|
|
|
|
// Service topic that you specify in NotificationChannel.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// To get the results of the person detection operation, first check that the
|
|
|
|
|
// status value published to the Amazon SNS topic is SUCCEEDED. If so, call
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// GetPersonTracking and pass the job identifier (JobId) from the initial call
|
|
|
|
|
// to StartPersonTracking.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation StartPersonTracking for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeIdempotentParameterMismatchException "IdempotentParameterMismatchException"
|
|
|
|
|
// A ClientRequestToken input parameter was reused with an operation, but at
|
|
|
|
|
// least one of the other input parameters is different from the previous call
|
|
|
|
|
// to the operation.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException"
|
|
|
|
|
// Amazon Rekognition is unable to access the S3 object specified in the request.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeVideoTooLargeException "VideoTooLargeException"
|
|
|
|
|
// The file size or duration of the supplied media is too large. The maximum
|
|
|
|
|
// file size is 8GB. The maximum duration is 2 hours.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeLimitExceededException "LimitExceededException"
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// An Amazon Rekognition service limit was exceeded. For example, if you start
|
|
|
|
|
// too many Amazon Rekognition Video jobs concurrently, calls to start operations
|
|
|
|
|
// (StartLabelDetection, for example) will raise a LimitExceededException exception
|
|
|
|
|
// (HTTP status code: 400) until the number of concurrently running jobs is
|
|
|
|
|
// below the Amazon Rekognition service limit.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) StartPersonTracking(input *StartPersonTrackingInput) (*StartPersonTrackingOutput, error) {
|
|
|
|
|
req, out := c.StartPersonTrackingRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// StartPersonTrackingWithContext is the same as StartPersonTracking with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See StartPersonTracking for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) StartPersonTrackingWithContext(ctx aws.Context, input *StartPersonTrackingInput, opts ...request.Option) (*StartPersonTrackingOutput, error) {
|
|
|
|
|
req, out := c.StartPersonTrackingRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opStartStreamProcessor = "StartStreamProcessor"
|
|
|
|
|
|
|
|
|
|
// StartStreamProcessorRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the StartStreamProcessor operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See StartStreamProcessor for more information on using the StartStreamProcessor
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the StartStreamProcessorRequest method.
|
|
|
|
|
// req, resp := client.StartStreamProcessorRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) StartStreamProcessorRequest(input *StartStreamProcessorInput) (req *request.Request, output *StartStreamProcessorOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opStartStreamProcessor,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &StartStreamProcessorInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &StartStreamProcessorOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
2019-01-21 14:27:20 +00:00
|
|
|
|
req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
|
2017-12-08 12:03:10 +00:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// StartStreamProcessor API operation for Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
// Starts processing a stream processor. You create a stream processor by calling
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// CreateStreamProcessor. To tell StartStreamProcessor which stream processor
|
|
|
|
|
// to start, use the value of the Name field specified in the call to CreateStreamProcessor.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation StartStreamProcessor for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
|
|
|
|
|
// The collection specified in the request cannot be found.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceInUseException "ResourceInUseException"
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) StartStreamProcessor(input *StartStreamProcessorInput) (*StartStreamProcessorOutput, error) {
|
|
|
|
|
req, out := c.StartStreamProcessorRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// StartStreamProcessorWithContext is the same as StartStreamProcessor with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See StartStreamProcessor for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) StartStreamProcessorWithContext(ctx aws.Context, input *StartStreamProcessorInput, opts ...request.Option) (*StartStreamProcessorOutput, error) {
|
|
|
|
|
req, out := c.StartStreamProcessorRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const opStopStreamProcessor = "StopStreamProcessor"
|
|
|
|
|
|
|
|
|
|
// StopStreamProcessorRequest generates a "aws/request.Request" representing the
|
|
|
|
|
// client's request for the StopStreamProcessor operation. The "output" return
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// value will be populated with the request's response once the request completes
|
|
|
|
|
// successfully.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Use "Send" method on the returned Request to send the API call to the service.
|
|
|
|
|
// the "output" return value is not valid until after Send returns without error.
|
|
|
|
|
//
|
|
|
|
|
// See StopStreamProcessor for more information on using the StopStreamProcessor
|
|
|
|
|
// API call, and error handling.
|
|
|
|
|
//
|
|
|
|
|
// This method is useful when you want to inject custom logic or configuration
|
|
|
|
|
// into the SDK's request lifecycle. Such as custom headers, or retry logic.
|
|
|
|
|
//
|
|
|
|
|
//
|
|
|
|
|
// // Example sending a request using the StopStreamProcessorRequest method.
|
|
|
|
|
// req, resp := client.StopStreamProcessorRequest(params)
|
|
|
|
|
//
|
|
|
|
|
// err := req.Send()
|
|
|
|
|
// if err == nil { // resp is now filled
|
|
|
|
|
// fmt.Println(resp)
|
|
|
|
|
// }
|
|
|
|
|
func (c *Rekognition) StopStreamProcessorRequest(input *StopStreamProcessorInput) (req *request.Request, output *StopStreamProcessorOutput) {
|
|
|
|
|
op := &request.Operation{
|
|
|
|
|
Name: opStopStreamProcessor,
|
|
|
|
|
HTTPMethod: "POST",
|
|
|
|
|
HTTPPath: "/",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if input == nil {
|
|
|
|
|
input = &StopStreamProcessorInput{}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
output = &StopStreamProcessorOutput{}
|
|
|
|
|
req = c.newRequest(op, input, output)
|
2019-01-21 14:27:20 +00:00
|
|
|
|
req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler)
|
2017-12-08 12:03:10 +00:00
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// StopStreamProcessor API operation for Amazon Rekognition.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Stops a running stream processor that was created by CreateStreamProcessor.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
|
|
|
|
// with awserr.Error's Code and Message methods to get detailed information about
|
|
|
|
|
// the error.
|
|
|
|
|
//
|
|
|
|
|
// See the AWS API reference guide for Amazon Rekognition's
|
|
|
|
|
// API operation StopStreamProcessor for usage and error information.
|
|
|
|
|
//
|
|
|
|
|
// Returned Error Codes:
|
|
|
|
|
// * ErrCodeAccessDeniedException "AccessDeniedException"
|
|
|
|
|
// You are not authorized to perform the action.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInternalServerError "InternalServerError"
|
|
|
|
|
// Amazon Rekognition experienced a service issue. Try your call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeThrottlingException "ThrottlingException"
|
|
|
|
|
// Amazon Rekognition is temporarily unable to process the request. Try your
|
|
|
|
|
// call again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeInvalidParameterException "InvalidParameterException"
|
|
|
|
|
// Input parameter violated a constraint. Validate your parameter before calling
|
|
|
|
|
// the API operation again.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceNotFoundException "ResourceNotFoundException"
|
|
|
|
|
// The collection specified in the request cannot be found.
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeResourceInUseException "ResourceInUseException"
|
|
|
|
|
//
|
|
|
|
|
// * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException"
|
|
|
|
|
// The number of requests exceeded your throughput limit. If you want to increase
|
|
|
|
|
// this limit, contact Amazon Rekognition.
|
|
|
|
|
//
|
|
|
|
|
func (c *Rekognition) StopStreamProcessor(input *StopStreamProcessorInput) (*StopStreamProcessorOutput, error) {
|
|
|
|
|
req, out := c.StopStreamProcessorRequest(input)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// StopStreamProcessorWithContext is the same as StopStreamProcessor with the addition of
|
|
|
|
|
// the ability to pass a context and additional request options.
|
|
|
|
|
//
|
|
|
|
|
// See StopStreamProcessor for details on how to use this API operation.
|
|
|
|
|
//
|
|
|
|
|
// The context must be non-nil and will be used for request cancellation. If
|
|
|
|
|
// the context is nil a panic will occur. In the future the SDK may create
|
|
|
|
|
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
|
|
|
|
// for more information on using Contexts.
|
|
|
|
|
func (c *Rekognition) StopStreamProcessorWithContext(ctx aws.Context, input *StopStreamProcessorInput, opts ...request.Option) (*StopStreamProcessorOutput, error) {
|
|
|
|
|
req, out := c.StopStreamProcessorRequest(input)
|
|
|
|
|
req.SetContext(ctx)
|
|
|
|
|
req.ApplyOptions(opts...)
|
|
|
|
|
return out, req.Send()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Structure containing the estimated age range, in years, for a face.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Amazon Rekognition estimates an age range for faces detected in the input
|
|
|
|
|
// image. Estimated age ranges can overlap. A face of a 5-year-old might have
|
|
|
|
|
// an estimated range of 4-6, while the face of a 6-year-old might have an estimated
|
|
|
|
|
// range of 4-8.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type AgeRange struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The highest estimated age.
|
|
|
|
|
High *int64 `type:"integer"`
|
|
|
|
|
|
|
|
|
|
// The lowest estimated age.
|
|
|
|
|
Low *int64 `type:"integer"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s AgeRange) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s AgeRange) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetHigh sets the High field's value.
|
|
|
|
|
func (s *AgeRange) SetHigh(v int64) *AgeRange {
|
|
|
|
|
s.High = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetLow sets the Low field's value.
|
|
|
|
|
func (s *AgeRange) SetLow(v int64) *AgeRange {
|
|
|
|
|
s.Low = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Indicates whether or not the face has a beard, and the confidence level in
|
|
|
|
|
// the determination.
|
|
|
|
|
type Beard struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Level of confidence in the determination.
|
|
|
|
|
Confidence *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// Boolean value that indicates whether the face has beard or not.
|
|
|
|
|
Value *bool `type:"boolean"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s Beard) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s Beard) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetConfidence sets the Confidence field's value.
|
|
|
|
|
func (s *Beard) SetConfidence(v float64) *Beard {
|
|
|
|
|
s.Confidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetValue sets the Value field's value.
|
|
|
|
|
func (s *Beard) SetValue(v bool) *Beard {
|
|
|
|
|
s.Value = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Identifies the bounding box around the label, face, or text. The left (x-coordinate)
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// and top (y-coordinate) are coordinates representing the top and left sides
|
|
|
|
|
// of the bounding box. Note that the upper-left corner of the image is the
|
|
|
|
|
// origin (0,0).
|
|
|
|
|
//
|
|
|
|
|
// The top and left values returned are ratios of the overall image size. For
|
|
|
|
|
// example, if the input image is 700x200 pixels, and the top-left coordinate
|
|
|
|
|
// of the bounding box is 350x50 pixels, the API returns a left value of 0.5
|
|
|
|
|
// (350/700) and a top value of 0.25 (50/200).
|
|
|
|
|
//
|
|
|
|
|
// The width and height values represent the dimensions of the bounding box
|
|
|
|
|
// as a ratio of the overall image dimension. For example, if the input image
|
|
|
|
|
// is 700x200 pixels, and the bounding box width is 70 pixels, the width returned
|
|
|
|
|
// is 0.1.
|
|
|
|
|
//
|
|
|
|
|
// The bounding box coordinates can have negative values. For example, if Amazon
|
|
|
|
|
// Rekognition is able to detect a face that is at the image edge and is only
|
|
|
|
|
// partially visible, the service can return coordinates that are outside the
|
|
|
|
|
// image bounds and, depending on the image edge, you might get negative values
|
|
|
|
|
// or values greater than 1 for the left or top values.
|
|
|
|
|
type BoundingBox struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Height of the bounding box as a ratio of the overall image height.
|
|
|
|
|
Height *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// Left coordinate of the bounding box as a ratio of overall image width.
|
|
|
|
|
Left *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// Top coordinate of the bounding box as a ratio of overall image height.
|
|
|
|
|
Top *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// Width of the bounding box as a ratio of the overall image width.
|
|
|
|
|
Width *float64 `type:"float"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s BoundingBox) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s BoundingBox) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetHeight sets the Height field's value.
|
|
|
|
|
func (s *BoundingBox) SetHeight(v float64) *BoundingBox {
|
|
|
|
|
s.Height = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetLeft sets the Left field's value.
|
|
|
|
|
func (s *BoundingBox) SetLeft(v float64) *BoundingBox {
|
|
|
|
|
s.Left = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetTop sets the Top field's value.
|
|
|
|
|
func (s *BoundingBox) SetTop(v float64) *BoundingBox {
|
|
|
|
|
s.Top = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetWidth sets the Width field's value.
|
|
|
|
|
func (s *BoundingBox) SetWidth(v float64) *BoundingBox {
|
|
|
|
|
s.Width = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Provides information about a celebrity recognized by the RecognizeCelebrities
|
|
|
|
|
// operation.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type Celebrity struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Provides information about the celebrity's face, such as its location on
|
|
|
|
|
// the image.
|
|
|
|
|
Face *ComparedFace `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// A unique identifier for the celebrity.
|
|
|
|
|
Id *string `type:"string"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The confidence, in percentage, that Amazon Rekognition has that the recognized
|
|
|
|
|
// face is the celebrity.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
MatchConfidence *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// The name of the celebrity.
|
|
|
|
|
Name *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// An array of URLs pointing to additional information about the celebrity.
|
|
|
|
|
// If there is no additional information about the celebrity, this list is empty.
|
|
|
|
|
Urls []*string `type:"list"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s Celebrity) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s Celebrity) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFace sets the Face field's value.
|
|
|
|
|
func (s *Celebrity) SetFace(v *ComparedFace) *Celebrity {
|
|
|
|
|
s.Face = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetId sets the Id field's value.
|
|
|
|
|
func (s *Celebrity) SetId(v string) *Celebrity {
|
|
|
|
|
s.Id = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetMatchConfidence sets the MatchConfidence field's value.
|
|
|
|
|
func (s *Celebrity) SetMatchConfidence(v float64) *Celebrity {
|
|
|
|
|
s.MatchConfidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetName sets the Name field's value.
|
|
|
|
|
func (s *Celebrity) SetName(v string) *Celebrity {
|
|
|
|
|
s.Name = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetUrls sets the Urls field's value.
|
|
|
|
|
func (s *Celebrity) SetUrls(v []*string) *Celebrity {
|
|
|
|
|
s.Urls = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Information about a recognized celebrity.
|
|
|
|
|
type CelebrityDetail struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Bounding box around the body of a celebrity.
|
|
|
|
|
BoundingBox *BoundingBox `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The confidence, in percentage, that Amazon Rekognition has that the recognized
|
|
|
|
|
// face is the celebrity.
|
|
|
|
|
Confidence *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// Face details for the recognized celebrity.
|
|
|
|
|
Face *FaceDetail `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The unique identifier for the celebrity.
|
|
|
|
|
Id *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// The name of the celebrity.
|
|
|
|
|
Name *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// An array of URLs pointing to additional celebrity information.
|
|
|
|
|
Urls []*string `type:"list"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s CelebrityDetail) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s CelebrityDetail) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetBoundingBox sets the BoundingBox field's value.
|
|
|
|
|
func (s *CelebrityDetail) SetBoundingBox(v *BoundingBox) *CelebrityDetail {
|
|
|
|
|
s.BoundingBox = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetConfidence sets the Confidence field's value.
|
|
|
|
|
func (s *CelebrityDetail) SetConfidence(v float64) *CelebrityDetail {
|
|
|
|
|
s.Confidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFace sets the Face field's value.
|
|
|
|
|
func (s *CelebrityDetail) SetFace(v *FaceDetail) *CelebrityDetail {
|
|
|
|
|
s.Face = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetId sets the Id field's value.
|
|
|
|
|
func (s *CelebrityDetail) SetId(v string) *CelebrityDetail {
|
|
|
|
|
s.Id = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetName sets the Name field's value.
|
|
|
|
|
func (s *CelebrityDetail) SetName(v string) *CelebrityDetail {
|
|
|
|
|
s.Name = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetUrls sets the Urls field's value.
|
|
|
|
|
func (s *CelebrityDetail) SetUrls(v []*string) *CelebrityDetail {
|
|
|
|
|
s.Urls = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Information about a detected celebrity and the time the celebrity was detected
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// in a stored video. For more information, see GetCelebrityRecognition in the
|
|
|
|
|
// Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type CelebrityRecognition struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Information about a recognized celebrity.
|
|
|
|
|
Celebrity *CelebrityDetail `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The time, in milliseconds from the start of the video, that the celebrity
|
|
|
|
|
// was recognized.
|
|
|
|
|
Timestamp *int64 `type:"long"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s CelebrityRecognition) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s CelebrityRecognition) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetCelebrity sets the Celebrity field's value.
|
|
|
|
|
func (s *CelebrityRecognition) SetCelebrity(v *CelebrityDetail) *CelebrityRecognition {
|
|
|
|
|
s.Celebrity = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetTimestamp sets the Timestamp field's value.
|
|
|
|
|
func (s *CelebrityRecognition) SetTimestamp(v int64) *CelebrityRecognition {
|
|
|
|
|
s.Timestamp = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type CompareFacesInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The minimum level of confidence in the face matches that a match must meet
|
|
|
|
|
// to be included in the FaceMatches array.
|
|
|
|
|
SimilarityThreshold *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// The input image as base64-encoded bytes or an S3 object. If you use the AWS
|
|
|
|
|
// CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
|
|
|
|
|
// is not supported.
|
|
|
|
|
//
|
|
|
|
|
// SourceImage is a required field
|
|
|
|
|
SourceImage *Image `type:"structure" required:"true"`
|
|
|
|
|
|
|
|
|
|
// The target image as base64-encoded bytes or an S3 object. If you use the
|
|
|
|
|
// AWS CLI to call Amazon Rekognition operations, passing base64-encoded image
|
|
|
|
|
// bytes is not supported.
|
|
|
|
|
//
|
|
|
|
|
// TargetImage is a required field
|
|
|
|
|
TargetImage *Image `type:"structure" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s CompareFacesInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s CompareFacesInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *CompareFacesInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "CompareFacesInput"}
|
|
|
|
|
if s.SourceImage == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("SourceImage"))
|
|
|
|
|
}
|
|
|
|
|
if s.TargetImage == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("TargetImage"))
|
|
|
|
|
}
|
|
|
|
|
if s.SourceImage != nil {
|
|
|
|
|
if err := s.SourceImage.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("SourceImage", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if s.TargetImage != nil {
|
|
|
|
|
if err := s.TargetImage.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("TargetImage", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetSimilarityThreshold sets the SimilarityThreshold field's value.
|
|
|
|
|
func (s *CompareFacesInput) SetSimilarityThreshold(v float64) *CompareFacesInput {
|
|
|
|
|
s.SimilarityThreshold = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetSourceImage sets the SourceImage field's value.
|
|
|
|
|
func (s *CompareFacesInput) SetSourceImage(v *Image) *CompareFacesInput {
|
|
|
|
|
s.SourceImage = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetTargetImage sets the TargetImage field's value.
|
|
|
|
|
func (s *CompareFacesInput) SetTargetImage(v *Image) *CompareFacesInput {
|
|
|
|
|
s.TargetImage = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Provides information about a face in a target image that matches the source
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// image face analyzed by CompareFaces. The Face property contains the bounding
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// box of the face in the target image. The Similarity property is the confidence
|
|
|
|
|
// that the source image face matches the face in the bounding box.
|
|
|
|
|
type CompareFacesMatch struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Provides face metadata (bounding box and confidence that the bounding box
|
|
|
|
|
// actually contains a face).
|
|
|
|
|
Face *ComparedFace `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Level of confidence that the faces match.
|
|
|
|
|
Similarity *float64 `type:"float"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s CompareFacesMatch) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s CompareFacesMatch) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFace sets the Face field's value.
|
|
|
|
|
func (s *CompareFacesMatch) SetFace(v *ComparedFace) *CompareFacesMatch {
|
|
|
|
|
s.Face = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetSimilarity sets the Similarity field's value.
|
|
|
|
|
func (s *CompareFacesMatch) SetSimilarity(v float64) *CompareFacesMatch {
|
|
|
|
|
s.Similarity = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type CompareFacesOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// An array of faces in the target image that match the source image face. Each
|
|
|
|
|
// CompareFacesMatch object provides the bounding box, the confidence level
|
|
|
|
|
// that the bounding box contains a face, and the similarity score for the face
|
|
|
|
|
// in the bounding box and the face in the source image.
|
|
|
|
|
FaceMatches []*CompareFacesMatch `type:"list"`
|
|
|
|
|
|
|
|
|
|
// The face in the source image that was used for comparison.
|
|
|
|
|
SourceImageFace *ComparedSourceImageFace `type:"structure"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The value of SourceImageOrientationCorrection is always null.
|
|
|
|
|
//
|
|
|
|
|
// If the input image is in .jpeg format, it might contain exchangeable image
|
|
|
|
|
// file format (Exif) metadata that includes the image's orientation. Amazon
|
|
|
|
|
// Rekognition uses this orientation information to perform image correction.
|
|
|
|
|
// The bounding box coordinates are translated to represent object locations
|
|
|
|
|
// after the orientation information in the Exif metadata is used to correct
|
|
|
|
|
// the image orientation. Images in .png format don't contain Exif metadata.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Amazon Rekognition doesn’t perform image correction for images in .png format
|
|
|
|
|
// and .jpeg images without orientation information in the image Exif metadata.
|
|
|
|
|
// The bounding box coordinates aren't translated and represent the object locations
|
|
|
|
|
// before the image is rotated.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
SourceImageOrientationCorrection *string `type:"string" enum:"OrientationCorrection"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The value of TargetImageOrientationCorrection is always null.
|
|
|
|
|
//
|
|
|
|
|
// If the input image is in .jpeg format, it might contain exchangeable image
|
|
|
|
|
// file format (Exif) metadata that includes the image's orientation. Amazon
|
|
|
|
|
// Rekognition uses this orientation information to perform image correction.
|
|
|
|
|
// The bounding box coordinates are translated to represent object locations
|
|
|
|
|
// after the orientation information in the Exif metadata is used to correct
|
|
|
|
|
// the image orientation. Images in .png format don't contain Exif metadata.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Amazon Rekognition doesn’t perform image correction for images in .png format
|
|
|
|
|
// and .jpeg images without orientation information in the image Exif metadata.
|
|
|
|
|
// The bounding box coordinates aren't translated and represent the object locations
|
|
|
|
|
// before the image is rotated.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
TargetImageOrientationCorrection *string `type:"string" enum:"OrientationCorrection"`
|
|
|
|
|
|
|
|
|
|
// An array of faces in the target image that did not match the source image
|
|
|
|
|
// face.
|
|
|
|
|
UnmatchedFaces []*ComparedFace `type:"list"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s CompareFacesOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s CompareFacesOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceMatches sets the FaceMatches field's value.
|
|
|
|
|
func (s *CompareFacesOutput) SetFaceMatches(v []*CompareFacesMatch) *CompareFacesOutput {
|
|
|
|
|
s.FaceMatches = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetSourceImageFace sets the SourceImageFace field's value.
|
|
|
|
|
func (s *CompareFacesOutput) SetSourceImageFace(v *ComparedSourceImageFace) *CompareFacesOutput {
|
|
|
|
|
s.SourceImageFace = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetSourceImageOrientationCorrection sets the SourceImageOrientationCorrection field's value.
|
|
|
|
|
func (s *CompareFacesOutput) SetSourceImageOrientationCorrection(v string) *CompareFacesOutput {
|
|
|
|
|
s.SourceImageOrientationCorrection = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetTargetImageOrientationCorrection sets the TargetImageOrientationCorrection field's value.
|
|
|
|
|
func (s *CompareFacesOutput) SetTargetImageOrientationCorrection(v string) *CompareFacesOutput {
|
|
|
|
|
s.TargetImageOrientationCorrection = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetUnmatchedFaces sets the UnmatchedFaces field's value.
|
|
|
|
|
func (s *CompareFacesOutput) SetUnmatchedFaces(v []*ComparedFace) *CompareFacesOutput {
|
|
|
|
|
s.UnmatchedFaces = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Provides face metadata for target image faces that are analyzed by CompareFaces
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// and RecognizeCelebrities.
|
|
|
|
|
type ComparedFace struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Bounding box of the face.
|
|
|
|
|
BoundingBox *BoundingBox `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Level of confidence that what the bounding box contains is a face.
|
|
|
|
|
Confidence *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// An array of facial landmarks.
|
|
|
|
|
Landmarks []*Landmark `type:"list"`
|
|
|
|
|
|
|
|
|
|
// Indicates the pose of the face as determined by its pitch, roll, and yaw.
|
|
|
|
|
Pose *Pose `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Identifies face image brightness and sharpness.
|
|
|
|
|
Quality *ImageQuality `type:"structure"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s ComparedFace) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s ComparedFace) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetBoundingBox sets the BoundingBox field's value.
|
|
|
|
|
func (s *ComparedFace) SetBoundingBox(v *BoundingBox) *ComparedFace {
|
|
|
|
|
s.BoundingBox = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetConfidence sets the Confidence field's value.
|
|
|
|
|
func (s *ComparedFace) SetConfidence(v float64) *ComparedFace {
|
|
|
|
|
s.Confidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetLandmarks sets the Landmarks field's value.
|
|
|
|
|
func (s *ComparedFace) SetLandmarks(v []*Landmark) *ComparedFace {
|
|
|
|
|
s.Landmarks = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetPose sets the Pose field's value.
|
|
|
|
|
func (s *ComparedFace) SetPose(v *Pose) *ComparedFace {
|
|
|
|
|
s.Pose = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetQuality sets the Quality field's value.
|
|
|
|
|
func (s *ComparedFace) SetQuality(v *ImageQuality) *ComparedFace {
|
|
|
|
|
s.Quality = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Type that describes the face Amazon Rekognition chose to compare with the
|
|
|
|
|
// faces in the target. This contains a bounding box for the selected face and
|
|
|
|
|
// confidence level that the bounding box contains a face. Note that Amazon
|
|
|
|
|
// Rekognition selects the largest face in the source image for this comparison.
|
|
|
|
|
type ComparedSourceImageFace struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Bounding box of the face.
|
|
|
|
|
BoundingBox *BoundingBox `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Confidence level that the selected bounding box contains a face.
|
|
|
|
|
Confidence *float64 `type:"float"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s ComparedSourceImageFace) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s ComparedSourceImageFace) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetBoundingBox sets the BoundingBox field's value.
|
|
|
|
|
func (s *ComparedSourceImageFace) SetBoundingBox(v *BoundingBox) *ComparedSourceImageFace {
|
|
|
|
|
s.BoundingBox = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetConfidence sets the Confidence field's value.
|
|
|
|
|
func (s *ComparedSourceImageFace) SetConfidence(v float64) *ComparedSourceImageFace {
|
|
|
|
|
s.Confidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Information about a moderation label detection in a stored video.
|
|
|
|
|
type ContentModerationDetection struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The moderation label detected by in the stored video.
|
|
|
|
|
ModerationLabel *ModerationLabel `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Time, in milliseconds from the beginning of the video, that the moderation
|
|
|
|
|
// label was detected.
|
|
|
|
|
Timestamp *int64 `type:"long"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s ContentModerationDetection) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s ContentModerationDetection) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetModerationLabel sets the ModerationLabel field's value.
|
|
|
|
|
func (s *ContentModerationDetection) SetModerationLabel(v *ModerationLabel) *ContentModerationDetection {
|
|
|
|
|
s.ModerationLabel = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetTimestamp sets the Timestamp field's value.
|
|
|
|
|
func (s *ContentModerationDetection) SetTimestamp(v int64) *ContentModerationDetection {
|
|
|
|
|
s.Timestamp = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type CreateCollectionInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// ID for the collection that you are creating.
|
|
|
|
|
//
|
|
|
|
|
// CollectionId is a required field
|
|
|
|
|
CollectionId *string `min:"1" type:"string" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s CreateCollectionInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s CreateCollectionInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *CreateCollectionInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "CreateCollectionInput"}
|
|
|
|
|
if s.CollectionId == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("CollectionId"))
|
|
|
|
|
}
|
|
|
|
|
if s.CollectionId != nil && len(*s.CollectionId) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetCollectionId sets the CollectionId field's value.
|
|
|
|
|
func (s *CreateCollectionInput) SetCollectionId(v string) *CreateCollectionInput {
|
|
|
|
|
s.CollectionId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type CreateCollectionOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Amazon Resource Name (ARN) of the collection. You can use this to manage
|
|
|
|
|
// permissions on your resources.
|
|
|
|
|
CollectionArn *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// Version number of the face detection model associated with the collection
|
|
|
|
|
// you are creating.
|
|
|
|
|
FaceModelVersion *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// HTTP status code indicating the result of the operation.
|
|
|
|
|
StatusCode *int64 `type:"integer"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s CreateCollectionOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s CreateCollectionOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetCollectionArn sets the CollectionArn field's value.
|
|
|
|
|
func (s *CreateCollectionOutput) SetCollectionArn(v string) *CreateCollectionOutput {
|
|
|
|
|
s.CollectionArn = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceModelVersion sets the FaceModelVersion field's value.
|
|
|
|
|
func (s *CreateCollectionOutput) SetFaceModelVersion(v string) *CreateCollectionOutput {
|
|
|
|
|
s.FaceModelVersion = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetStatusCode sets the StatusCode field's value.
|
|
|
|
|
func (s *CreateCollectionOutput) SetStatusCode(v int64) *CreateCollectionOutput {
|
|
|
|
|
s.StatusCode = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type CreateStreamProcessorInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Kinesis video stream stream that provides the source streaming video. If
|
|
|
|
|
// you are using the AWS CLI, the parameter name is StreamProcessorInput.
|
|
|
|
|
//
|
|
|
|
|
// Input is a required field
|
|
|
|
|
Input *StreamProcessorInput `type:"structure" required:"true"`
|
|
|
|
|
|
|
|
|
|
// An identifier you assign to the stream processor. You can use Name to manage
|
|
|
|
|
// the stream processor. For example, you can get the current status of the
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// stream processor by calling DescribeStreamProcessor. Name is idempotent.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Name is a required field
|
|
|
|
|
Name *string `min:"1" type:"string" required:"true"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Kinesis data stream stream to which Amazon Rekognition Video puts the analysis
|
|
|
|
|
// results. If you are using the AWS CLI, the parameter name is StreamProcessorOutput.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Output is a required field
|
|
|
|
|
Output *StreamProcessorOutput `type:"structure" required:"true"`
|
|
|
|
|
|
|
|
|
|
// ARN of the IAM role that allows access to the stream processor.
|
|
|
|
|
//
|
|
|
|
|
// RoleArn is a required field
|
|
|
|
|
RoleArn *string `type:"string" required:"true"`
|
|
|
|
|
|
|
|
|
|
// Face recognition input parameters to be used by the stream processor. Includes
|
|
|
|
|
// the collection to use for face recognition and the face attributes to detect.
|
|
|
|
|
//
|
|
|
|
|
// Settings is a required field
|
|
|
|
|
Settings *StreamProcessorSettings `type:"structure" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s CreateStreamProcessorInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s CreateStreamProcessorInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *CreateStreamProcessorInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "CreateStreamProcessorInput"}
|
|
|
|
|
if s.Input == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Input"))
|
|
|
|
|
}
|
|
|
|
|
if s.Name == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Name"))
|
|
|
|
|
}
|
|
|
|
|
if s.Name != nil && len(*s.Name) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("Name", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.Output == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Output"))
|
|
|
|
|
}
|
|
|
|
|
if s.RoleArn == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("RoleArn"))
|
|
|
|
|
}
|
|
|
|
|
if s.Settings == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Settings"))
|
|
|
|
|
}
|
|
|
|
|
if s.Settings != nil {
|
|
|
|
|
if err := s.Settings.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("Settings", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetInput sets the Input field's value.
|
|
|
|
|
func (s *CreateStreamProcessorInput) SetInput(v *StreamProcessorInput) *CreateStreamProcessorInput {
|
|
|
|
|
s.Input = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetName sets the Name field's value.
|
|
|
|
|
func (s *CreateStreamProcessorInput) SetName(v string) *CreateStreamProcessorInput {
|
|
|
|
|
s.Name = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetOutput sets the Output field's value.
|
|
|
|
|
func (s *CreateStreamProcessorInput) SetOutput(v *StreamProcessorOutput) *CreateStreamProcessorInput {
|
|
|
|
|
s.Output = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetRoleArn sets the RoleArn field's value.
|
|
|
|
|
func (s *CreateStreamProcessorInput) SetRoleArn(v string) *CreateStreamProcessorInput {
|
|
|
|
|
s.RoleArn = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetSettings sets the Settings field's value.
|
|
|
|
|
func (s *CreateStreamProcessorInput) SetSettings(v *StreamProcessorSettings) *CreateStreamProcessorInput {
|
|
|
|
|
s.Settings = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type CreateStreamProcessorOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// ARN for the newly create stream processor.
|
|
|
|
|
StreamProcessorArn *string `type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s CreateStreamProcessorOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s CreateStreamProcessorOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetStreamProcessorArn sets the StreamProcessorArn field's value.
|
|
|
|
|
func (s *CreateStreamProcessorOutput) SetStreamProcessorArn(v string) *CreateStreamProcessorOutput {
|
|
|
|
|
s.StreamProcessorArn = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type DeleteCollectionInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// ID of the collection to delete.
|
|
|
|
|
//
|
|
|
|
|
// CollectionId is a required field
|
|
|
|
|
CollectionId *string `min:"1" type:"string" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s DeleteCollectionInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s DeleteCollectionInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *DeleteCollectionInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "DeleteCollectionInput"}
|
|
|
|
|
if s.CollectionId == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("CollectionId"))
|
|
|
|
|
}
|
|
|
|
|
if s.CollectionId != nil && len(*s.CollectionId) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetCollectionId sets the CollectionId field's value.
|
|
|
|
|
func (s *DeleteCollectionInput) SetCollectionId(v string) *DeleteCollectionInput {
|
|
|
|
|
s.CollectionId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type DeleteCollectionOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// HTTP status code that indicates the result of the operation.
|
|
|
|
|
StatusCode *int64 `type:"integer"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s DeleteCollectionOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s DeleteCollectionOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetStatusCode sets the StatusCode field's value.
|
|
|
|
|
func (s *DeleteCollectionOutput) SetStatusCode(v int64) *DeleteCollectionOutput {
|
|
|
|
|
s.StatusCode = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type DeleteFacesInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Collection from which to remove the specific faces.
|
|
|
|
|
//
|
|
|
|
|
// CollectionId is a required field
|
|
|
|
|
CollectionId *string `min:"1" type:"string" required:"true"`
|
|
|
|
|
|
|
|
|
|
// An array of face IDs to delete.
|
|
|
|
|
//
|
|
|
|
|
// FaceIds is a required field
|
|
|
|
|
FaceIds []*string `min:"1" type:"list" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s DeleteFacesInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s DeleteFacesInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *DeleteFacesInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "DeleteFacesInput"}
|
|
|
|
|
if s.CollectionId == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("CollectionId"))
|
|
|
|
|
}
|
|
|
|
|
if s.CollectionId != nil && len(*s.CollectionId) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.FaceIds == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("FaceIds"))
|
|
|
|
|
}
|
|
|
|
|
if s.FaceIds != nil && len(s.FaceIds) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("FaceIds", 1))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetCollectionId sets the CollectionId field's value.
|
|
|
|
|
func (s *DeleteFacesInput) SetCollectionId(v string) *DeleteFacesInput {
|
|
|
|
|
s.CollectionId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceIds sets the FaceIds field's value.
|
|
|
|
|
func (s *DeleteFacesInput) SetFaceIds(v []*string) *DeleteFacesInput {
|
|
|
|
|
s.FaceIds = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type DeleteFacesOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// An array of strings (face IDs) of the faces that were deleted.
|
|
|
|
|
DeletedFaces []*string `min:"1" type:"list"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s DeleteFacesOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s DeleteFacesOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetDeletedFaces sets the DeletedFaces field's value.
|
|
|
|
|
func (s *DeleteFacesOutput) SetDeletedFaces(v []*string) *DeleteFacesOutput {
|
|
|
|
|
s.DeletedFaces = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type DeleteStreamProcessorInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The name of the stream processor you want to delete.
|
|
|
|
|
//
|
|
|
|
|
// Name is a required field
|
|
|
|
|
Name *string `min:"1" type:"string" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s DeleteStreamProcessorInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s DeleteStreamProcessorInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *DeleteStreamProcessorInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "DeleteStreamProcessorInput"}
|
|
|
|
|
if s.Name == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Name"))
|
|
|
|
|
}
|
|
|
|
|
if s.Name != nil && len(*s.Name) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("Name", 1))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetName sets the Name field's value.
|
|
|
|
|
func (s *DeleteStreamProcessorInput) SetName(v string) *DeleteStreamProcessorInput {
|
|
|
|
|
s.Name = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type DeleteStreamProcessorOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s DeleteStreamProcessorOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s DeleteStreamProcessorOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
type DescribeCollectionInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The ID of the collection to describe.
|
|
|
|
|
//
|
|
|
|
|
// CollectionId is a required field
|
|
|
|
|
CollectionId *string `min:"1" type:"string" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s DescribeCollectionInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s DescribeCollectionInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *DescribeCollectionInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "DescribeCollectionInput"}
|
|
|
|
|
if s.CollectionId == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("CollectionId"))
|
|
|
|
|
}
|
|
|
|
|
if s.CollectionId != nil && len(*s.CollectionId) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetCollectionId sets the CollectionId field's value.
|
|
|
|
|
func (s *DescribeCollectionInput) SetCollectionId(v string) *DescribeCollectionInput {
|
|
|
|
|
s.CollectionId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type DescribeCollectionOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The Amazon Resource Name (ARN) of the collection.
|
|
|
|
|
CollectionARN *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// The number of milliseconds since the Unix epoch time until the creation of
|
|
|
|
|
// the collection. The Unix epoch time is 00:00:00 Coordinated Universal Time
|
|
|
|
|
// (UTC), Thursday, 1 January 1970.
|
|
|
|
|
CreationTimestamp *time.Time `type:"timestamp"`
|
|
|
|
|
|
|
|
|
|
// The number of faces that are indexed into the collection. To index faces
|
|
|
|
|
// into a collection, use IndexFaces.
|
|
|
|
|
FaceCount *int64 `type:"long"`
|
|
|
|
|
|
|
|
|
|
// The version of the face model that's used by the collection for face detection.
|
|
|
|
|
//
|
|
|
|
|
// For more information, see Model Versioning in the Amazon Rekognition Developer
|
|
|
|
|
// Guide.
|
|
|
|
|
FaceModelVersion *string `type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s DescribeCollectionOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s DescribeCollectionOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetCollectionARN sets the CollectionARN field's value.
|
|
|
|
|
func (s *DescribeCollectionOutput) SetCollectionARN(v string) *DescribeCollectionOutput {
|
|
|
|
|
s.CollectionARN = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetCreationTimestamp sets the CreationTimestamp field's value.
|
|
|
|
|
func (s *DescribeCollectionOutput) SetCreationTimestamp(v time.Time) *DescribeCollectionOutput {
|
|
|
|
|
s.CreationTimestamp = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceCount sets the FaceCount field's value.
|
|
|
|
|
func (s *DescribeCollectionOutput) SetFaceCount(v int64) *DescribeCollectionOutput {
|
|
|
|
|
s.FaceCount = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceModelVersion sets the FaceModelVersion field's value.
|
|
|
|
|
func (s *DescribeCollectionOutput) SetFaceModelVersion(v string) *DescribeCollectionOutput {
|
|
|
|
|
s.FaceModelVersion = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type DescribeStreamProcessorInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Name of the stream processor for which you want information.
|
|
|
|
|
//
|
|
|
|
|
// Name is a required field
|
|
|
|
|
Name *string `min:"1" type:"string" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s DescribeStreamProcessorInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s DescribeStreamProcessorInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *DescribeStreamProcessorInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "DescribeStreamProcessorInput"}
|
|
|
|
|
if s.Name == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Name"))
|
|
|
|
|
}
|
|
|
|
|
if s.Name != nil && len(*s.Name) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("Name", 1))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetName sets the Name field's value.
|
|
|
|
|
func (s *DescribeStreamProcessorInput) SetName(v string) *DescribeStreamProcessorInput {
|
|
|
|
|
s.Name = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type DescribeStreamProcessorOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Date and time the stream processor was created
|
2019-01-21 14:27:20 +00:00
|
|
|
|
CreationTimestamp *time.Time `type:"timestamp"`
|
2017-12-08 12:03:10 +00:00
|
|
|
|
|
|
|
|
|
// Kinesis video stream that provides the source streaming video.
|
|
|
|
|
Input *StreamProcessorInput `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The time, in Unix format, the stream processor was last updated. For example,
|
|
|
|
|
// when the stream processor moves from a running state to a failed state, or
|
|
|
|
|
// when the user starts or stops the stream processor.
|
2019-01-21 14:27:20 +00:00
|
|
|
|
LastUpdateTimestamp *time.Time `type:"timestamp"`
|
2017-12-08 12:03:10 +00:00
|
|
|
|
|
|
|
|
|
// Name of the stream processor.
|
|
|
|
|
Name *string `min:"1" type:"string"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Kinesis data stream to which Amazon Rekognition Video puts the analysis results.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
Output *StreamProcessorOutput `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// ARN of the IAM role that allows access to the stream processor.
|
|
|
|
|
RoleArn *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// Face recognition input parameters that are being used by the stream processor.
|
|
|
|
|
// Includes the collection to use for face recognition and the face attributes
|
|
|
|
|
// to detect.
|
|
|
|
|
Settings *StreamProcessorSettings `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Current status of the stream processor.
|
|
|
|
|
Status *string `type:"string" enum:"StreamProcessorStatus"`
|
|
|
|
|
|
|
|
|
|
// Detailed status message about the stream processor.
|
|
|
|
|
StatusMessage *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// ARN of the stream processor.
|
|
|
|
|
StreamProcessorArn *string `type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s DescribeStreamProcessorOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s DescribeStreamProcessorOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetCreationTimestamp sets the CreationTimestamp field's value.
|
|
|
|
|
func (s *DescribeStreamProcessorOutput) SetCreationTimestamp(v time.Time) *DescribeStreamProcessorOutput {
|
|
|
|
|
s.CreationTimestamp = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetInput sets the Input field's value.
|
|
|
|
|
func (s *DescribeStreamProcessorOutput) SetInput(v *StreamProcessorInput) *DescribeStreamProcessorOutput {
|
|
|
|
|
s.Input = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetLastUpdateTimestamp sets the LastUpdateTimestamp field's value.
|
|
|
|
|
func (s *DescribeStreamProcessorOutput) SetLastUpdateTimestamp(v time.Time) *DescribeStreamProcessorOutput {
|
|
|
|
|
s.LastUpdateTimestamp = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetName sets the Name field's value.
|
|
|
|
|
func (s *DescribeStreamProcessorOutput) SetName(v string) *DescribeStreamProcessorOutput {
|
|
|
|
|
s.Name = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetOutput sets the Output field's value.
|
|
|
|
|
func (s *DescribeStreamProcessorOutput) SetOutput(v *StreamProcessorOutput) *DescribeStreamProcessorOutput {
|
|
|
|
|
s.Output = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetRoleArn sets the RoleArn field's value.
|
|
|
|
|
func (s *DescribeStreamProcessorOutput) SetRoleArn(v string) *DescribeStreamProcessorOutput {
|
|
|
|
|
s.RoleArn = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetSettings sets the Settings field's value.
|
|
|
|
|
func (s *DescribeStreamProcessorOutput) SetSettings(v *StreamProcessorSettings) *DescribeStreamProcessorOutput {
|
|
|
|
|
s.Settings = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetStatus sets the Status field's value.
|
|
|
|
|
func (s *DescribeStreamProcessorOutput) SetStatus(v string) *DescribeStreamProcessorOutput {
|
|
|
|
|
s.Status = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetStatusMessage sets the StatusMessage field's value.
|
|
|
|
|
func (s *DescribeStreamProcessorOutput) SetStatusMessage(v string) *DescribeStreamProcessorOutput {
|
|
|
|
|
s.StatusMessage = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetStreamProcessorArn sets the StreamProcessorArn field's value.
|
|
|
|
|
func (s *DescribeStreamProcessorOutput) SetStreamProcessorArn(v string) *DescribeStreamProcessorOutput {
|
|
|
|
|
s.StreamProcessorArn = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type DetectFacesInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// An array of facial attributes you want to be returned. This can be the default
|
|
|
|
|
// list of attributes or all attributes. If you don't specify a value for Attributes
|
|
|
|
|
// or if you specify ["DEFAULT"], the API returns the following subset of facial
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// attributes: BoundingBox, Confidence, Pose, Quality, and Landmarks. If you
|
|
|
|
|
// provide ["ALL"], all facial attributes are returned, but the operation takes
|
|
|
|
|
// longer to complete.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// If you provide both, ["ALL", "DEFAULT"], the service uses a logical AND operator
|
|
|
|
|
// to determine which attributes to return (in this case, all attributes).
|
|
|
|
|
Attributes []*string `type:"list"`
|
|
|
|
|
|
|
|
|
|
// The input image as base64-encoded bytes or an S3 object. If you use the AWS
|
|
|
|
|
// CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
|
|
|
|
|
// is not supported.
|
|
|
|
|
//
|
|
|
|
|
// Image is a required field
|
|
|
|
|
Image *Image `type:"structure" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s DetectFacesInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s DetectFacesInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *DetectFacesInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "DetectFacesInput"}
|
|
|
|
|
if s.Image == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Image"))
|
|
|
|
|
}
|
|
|
|
|
if s.Image != nil {
|
|
|
|
|
if err := s.Image.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetAttributes sets the Attributes field's value.
|
|
|
|
|
func (s *DetectFacesInput) SetAttributes(v []*string) *DetectFacesInput {
|
|
|
|
|
s.Attributes = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetImage sets the Image field's value.
|
|
|
|
|
func (s *DetectFacesInput) SetImage(v *Image) *DetectFacesInput {
|
|
|
|
|
s.Image = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type DetectFacesOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Details of each face found in the image.
|
|
|
|
|
FaceDetails []*FaceDetail `type:"list"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The value of OrientationCorrection is always null.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// If the input image is in .jpeg format, it might contain exchangeable image
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// file format (Exif) metadata that includes the image's orientation. Amazon
|
|
|
|
|
// Rekognition uses this orientation information to perform image correction.
|
|
|
|
|
// The bounding box coordinates are translated to represent object locations
|
|
|
|
|
// after the orientation information in the Exif metadata is used to correct
|
|
|
|
|
// the image orientation. Images in .png format don't contain Exif metadata.
|
|
|
|
|
//
|
|
|
|
|
// Amazon Rekognition doesn’t perform image correction for images in .png format
|
|
|
|
|
// and .jpeg images without orientation information in the image Exif metadata.
|
|
|
|
|
// The bounding box coordinates aren't translated and represent the object locations
|
|
|
|
|
// before the image is rotated.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
OrientationCorrection *string `type:"string" enum:"OrientationCorrection"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s DetectFacesOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s DetectFacesOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceDetails sets the FaceDetails field's value.
|
|
|
|
|
func (s *DetectFacesOutput) SetFaceDetails(v []*FaceDetail) *DetectFacesOutput {
|
|
|
|
|
s.FaceDetails = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetOrientationCorrection sets the OrientationCorrection field's value.
|
|
|
|
|
func (s *DetectFacesOutput) SetOrientationCorrection(v string) *DetectFacesOutput {
|
|
|
|
|
s.OrientationCorrection = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type DetectLabelsInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The input image as base64-encoded bytes or an S3 object. If you use the AWS
|
|
|
|
|
// CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
|
|
|
|
|
// is not supported.
|
|
|
|
|
//
|
|
|
|
|
// Image is a required field
|
|
|
|
|
Image *Image `type:"structure" required:"true"`
|
|
|
|
|
|
|
|
|
|
// Maximum number of labels you want the service to return in the response.
|
|
|
|
|
// The service returns the specified number of highest confidence labels.
|
|
|
|
|
MaxLabels *int64 `type:"integer"`
|
|
|
|
|
|
|
|
|
|
// Specifies the minimum confidence level for the labels to return. Amazon Rekognition
|
|
|
|
|
// doesn't return any labels with confidence lower than this specified value.
|
|
|
|
|
//
|
|
|
|
|
// If MinConfidence is not specified, the operation returns labels with a confidence
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// values greater than or equal to 55 percent.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
MinConfidence *float64 `type:"float"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s DetectLabelsInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s DetectLabelsInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *DetectLabelsInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "DetectLabelsInput"}
|
|
|
|
|
if s.Image == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Image"))
|
|
|
|
|
}
|
|
|
|
|
if s.Image != nil {
|
|
|
|
|
if err := s.Image.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetImage sets the Image field's value.
|
|
|
|
|
func (s *DetectLabelsInput) SetImage(v *Image) *DetectLabelsInput {
|
|
|
|
|
s.Image = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetMaxLabels sets the MaxLabels field's value.
|
|
|
|
|
func (s *DetectLabelsInput) SetMaxLabels(v int64) *DetectLabelsInput {
|
|
|
|
|
s.MaxLabels = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetMinConfidence sets the MinConfidence field's value.
|
|
|
|
|
func (s *DetectLabelsInput) SetMinConfidence(v float64) *DetectLabelsInput {
|
|
|
|
|
s.MinConfidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type DetectLabelsOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Version number of the label detection model that was used to detect labels.
|
|
|
|
|
LabelModelVersion *string `type:"string"`
|
|
|
|
|
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// An array of labels for the real-world objects detected.
|
|
|
|
|
Labels []*Label `type:"list"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The value of OrientationCorrection is always null.
|
|
|
|
|
//
|
|
|
|
|
// If the input image is in .jpeg format, it might contain exchangeable image
|
|
|
|
|
// file format (Exif) metadata that includes the image's orientation. Amazon
|
|
|
|
|
// Rekognition uses this orientation information to perform image correction.
|
|
|
|
|
// The bounding box coordinates are translated to represent object locations
|
|
|
|
|
// after the orientation information in the Exif metadata is used to correct
|
|
|
|
|
// the image orientation. Images in .png format don't contain Exif metadata.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Amazon Rekognition doesn’t perform image correction for images in .png format
|
|
|
|
|
// and .jpeg images without orientation information in the image Exif metadata.
|
|
|
|
|
// The bounding box coordinates aren't translated and represent the object locations
|
|
|
|
|
// before the image is rotated.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
OrientationCorrection *string `type:"string" enum:"OrientationCorrection"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s DetectLabelsOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s DetectLabelsOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// SetLabelModelVersion sets the LabelModelVersion field's value.
|
|
|
|
|
func (s *DetectLabelsOutput) SetLabelModelVersion(v string) *DetectLabelsOutput {
|
|
|
|
|
s.LabelModelVersion = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// SetLabels sets the Labels field's value.
|
|
|
|
|
func (s *DetectLabelsOutput) SetLabels(v []*Label) *DetectLabelsOutput {
|
|
|
|
|
s.Labels = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetOrientationCorrection sets the OrientationCorrection field's value.
|
|
|
|
|
func (s *DetectLabelsOutput) SetOrientationCorrection(v string) *DetectLabelsOutput {
|
|
|
|
|
s.OrientationCorrection = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type DetectModerationLabelsInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The input image as base64-encoded bytes or an S3 object. If you use the AWS
|
|
|
|
|
// CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
|
|
|
|
|
// is not supported.
|
|
|
|
|
//
|
|
|
|
|
// Image is a required field
|
|
|
|
|
Image *Image `type:"structure" required:"true"`
|
|
|
|
|
|
|
|
|
|
// Specifies the minimum confidence level for the labels to return. Amazon Rekognition
|
|
|
|
|
// doesn't return any labels with a confidence level lower than this specified
|
|
|
|
|
// value.
|
|
|
|
|
//
|
|
|
|
|
// If you don't specify MinConfidence, the operation returns labels with confidence
|
|
|
|
|
// values greater than or equal to 50 percent.
|
|
|
|
|
MinConfidence *float64 `type:"float"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s DetectModerationLabelsInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s DetectModerationLabelsInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *DetectModerationLabelsInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "DetectModerationLabelsInput"}
|
|
|
|
|
if s.Image == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Image"))
|
|
|
|
|
}
|
|
|
|
|
if s.Image != nil {
|
|
|
|
|
if err := s.Image.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetImage sets the Image field's value.
|
|
|
|
|
func (s *DetectModerationLabelsInput) SetImage(v *Image) *DetectModerationLabelsInput {
|
|
|
|
|
s.Image = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetMinConfidence sets the MinConfidence field's value.
|
|
|
|
|
func (s *DetectModerationLabelsInput) SetMinConfidence(v float64) *DetectModerationLabelsInput {
|
|
|
|
|
s.MinConfidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type DetectModerationLabelsOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Array of detected Moderation labels and the time, in millseconds from the
|
|
|
|
|
// start of the video, they were detected.
|
|
|
|
|
ModerationLabels []*ModerationLabel `type:"list"`
|
2019-01-21 14:27:20 +00:00
|
|
|
|
|
|
|
|
|
// Version number of the moderation detection model that was used to detect
|
|
|
|
|
// unsafe content.
|
|
|
|
|
ModerationModelVersion *string `type:"string"`
|
2017-12-08 12:03:10 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s DetectModerationLabelsOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s DetectModerationLabelsOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetModerationLabels sets the ModerationLabels field's value.
|
|
|
|
|
func (s *DetectModerationLabelsOutput) SetModerationLabels(v []*ModerationLabel) *DetectModerationLabelsOutput {
|
|
|
|
|
s.ModerationLabels = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// SetModerationModelVersion sets the ModerationModelVersion field's value.
|
|
|
|
|
func (s *DetectModerationLabelsOutput) SetModerationModelVersion(v string) *DetectModerationLabelsOutput {
|
|
|
|
|
s.ModerationModelVersion = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type DetectTextInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The input image as base64-encoded bytes or an Amazon S3 object. If you use
|
|
|
|
|
// the AWS CLI to call Amazon Rekognition operations, you can't pass image bytes.
|
|
|
|
|
//
|
|
|
|
|
// Image is a required field
|
|
|
|
|
Image *Image `type:"structure" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s DetectTextInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s DetectTextInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *DetectTextInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "DetectTextInput"}
|
|
|
|
|
if s.Image == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Image"))
|
|
|
|
|
}
|
|
|
|
|
if s.Image != nil {
|
|
|
|
|
if err := s.Image.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetImage sets the Image field's value.
|
|
|
|
|
func (s *DetectTextInput) SetImage(v *Image) *DetectTextInput {
|
|
|
|
|
s.Image = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type DetectTextOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// An array of text that was detected in the input image.
|
|
|
|
|
TextDetections []*TextDetection `type:"list"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s DetectTextOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s DetectTextOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetTextDetections sets the TextDetections field's value.
|
|
|
|
|
func (s *DetectTextOutput) SetTextDetections(v []*TextDetection) *DetectTextOutput {
|
|
|
|
|
s.TextDetections = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// The emotions detected on the face, and the confidence level in the determination.
|
|
|
|
|
// For example, HAPPY, SAD, and ANGRY.
|
|
|
|
|
type Emotion struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Level of confidence in the determination.
|
|
|
|
|
Confidence *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// Type of emotion detected.
|
|
|
|
|
Type *string `type:"string" enum:"EmotionName"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s Emotion) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s Emotion) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetConfidence sets the Confidence field's value.
|
|
|
|
|
func (s *Emotion) SetConfidence(v float64) *Emotion {
|
|
|
|
|
s.Confidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetType sets the Type field's value.
|
|
|
|
|
func (s *Emotion) SetType(v string) *Emotion {
|
|
|
|
|
s.Type = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Indicates whether or not the eyes on the face are open, and the confidence
|
|
|
|
|
// level in the determination.
|
|
|
|
|
type EyeOpen struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Level of confidence in the determination.
|
|
|
|
|
Confidence *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// Boolean value that indicates whether the eyes on the face are open.
|
|
|
|
|
Value *bool `type:"boolean"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s EyeOpen) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s EyeOpen) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetConfidence sets the Confidence field's value.
|
|
|
|
|
func (s *EyeOpen) SetConfidence(v float64) *EyeOpen {
|
|
|
|
|
s.Confidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetValue sets the Value field's value.
|
|
|
|
|
func (s *EyeOpen) SetValue(v bool) *EyeOpen {
|
|
|
|
|
s.Value = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Indicates whether or not the face is wearing eye glasses, and the confidence
|
|
|
|
|
// level in the determination.
|
|
|
|
|
type Eyeglasses struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Level of confidence in the determination.
|
|
|
|
|
Confidence *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// Boolean value that indicates whether the face is wearing eye glasses or not.
|
|
|
|
|
Value *bool `type:"boolean"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s Eyeglasses) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s Eyeglasses) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetConfidence sets the Confidence field's value.
|
|
|
|
|
func (s *Eyeglasses) SetConfidence(v float64) *Eyeglasses {
|
|
|
|
|
s.Confidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetValue sets the Value field's value.
|
|
|
|
|
func (s *Eyeglasses) SetValue(v bool) *Eyeglasses {
|
|
|
|
|
s.Value = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Describes the face properties such as the bounding box, face ID, image ID
|
|
|
|
|
// of the input image, and external image ID that you assigned.
|
|
|
|
|
type Face struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Bounding box of the face.
|
|
|
|
|
BoundingBox *BoundingBox `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Confidence level that the bounding box contains a face (and not a different
|
|
|
|
|
// object such as a tree).
|
|
|
|
|
Confidence *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// Identifier that you assign to all the faces in the input image.
|
|
|
|
|
ExternalImageId *string `min:"1" type:"string"`
|
|
|
|
|
|
|
|
|
|
// Unique identifier that Amazon Rekognition assigns to the face.
|
|
|
|
|
FaceId *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// Unique identifier that Amazon Rekognition assigns to the input image.
|
|
|
|
|
ImageId *string `type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s Face) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s Face) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetBoundingBox sets the BoundingBox field's value.
|
|
|
|
|
func (s *Face) SetBoundingBox(v *BoundingBox) *Face {
|
|
|
|
|
s.BoundingBox = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetConfidence sets the Confidence field's value.
|
|
|
|
|
func (s *Face) SetConfidence(v float64) *Face {
|
|
|
|
|
s.Confidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetExternalImageId sets the ExternalImageId field's value.
|
|
|
|
|
func (s *Face) SetExternalImageId(v string) *Face {
|
|
|
|
|
s.ExternalImageId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceId sets the FaceId field's value.
|
|
|
|
|
func (s *Face) SetFaceId(v string) *Face {
|
|
|
|
|
s.FaceId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetImageId sets the ImageId field's value.
|
|
|
|
|
func (s *Face) SetImageId(v string) *Face {
|
|
|
|
|
s.ImageId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Structure containing attributes of the face that the algorithm detected.
|
2019-01-21 14:27:20 +00:00
|
|
|
|
//
|
|
|
|
|
// A FaceDetail object contains either the default facial attributes or all
|
|
|
|
|
// facial attributes. The default attributes are BoundingBox, Confidence, Landmarks,
|
|
|
|
|
// Pose, and Quality.
|
|
|
|
|
//
|
|
|
|
|
// GetFaceDetection is the only Amazon Rekognition Video stored video operation
|
|
|
|
|
// that can return a FaceDetail object with all attributes. To specify which
|
|
|
|
|
// attributes to return, use the FaceAttributes input parameter for StartFaceDetection.
|
|
|
|
|
// The following Amazon Rekognition Video operations return only the default
|
|
|
|
|
// attributes. The corresponding Start operations don't have a FaceAttributes
|
|
|
|
|
// input parameter.
|
|
|
|
|
//
|
|
|
|
|
// * GetCelebrityRecognition
|
|
|
|
|
//
|
|
|
|
|
// * GetPersonTracking
|
|
|
|
|
//
|
|
|
|
|
// * GetFaceSearch
|
|
|
|
|
//
|
|
|
|
|
// The Amazon Rekognition Image DetectFaces and IndexFaces operations can return
|
|
|
|
|
// all facial attributes. To specify which attributes to return, use the Attributes
|
|
|
|
|
// input parameter for DetectFaces. For IndexFaces, use the DetectAttributes
|
|
|
|
|
// input parameter.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type FaceDetail struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The estimated age range, in years, for the face. Low represents the lowest
|
|
|
|
|
// estimated age and High represents the highest estimated age.
|
|
|
|
|
AgeRange *AgeRange `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Indicates whether or not the face has a beard, and the confidence level in
|
|
|
|
|
// the determination.
|
|
|
|
|
Beard *Beard `type:"structure"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Bounding box of the face. Default attribute.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
BoundingBox *BoundingBox `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Confidence level that the bounding box contains a face (and not a different
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// object such as a tree). Default attribute.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
Confidence *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// The emotions detected on the face, and the confidence level in the determination.
|
|
|
|
|
// For example, HAPPY, SAD, and ANGRY.
|
|
|
|
|
Emotions []*Emotion `type:"list"`
|
|
|
|
|
|
|
|
|
|
// Indicates whether or not the face is wearing eye glasses, and the confidence
|
|
|
|
|
// level in the determination.
|
|
|
|
|
Eyeglasses *Eyeglasses `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Indicates whether or not the eyes on the face are open, and the confidence
|
|
|
|
|
// level in the determination.
|
|
|
|
|
EyesOpen *EyeOpen `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Gender of the face and the confidence level in the determination.
|
|
|
|
|
Gender *Gender `type:"structure"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Indicates the location of landmarks on the face. Default attribute.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
Landmarks []*Landmark `type:"list"`
|
|
|
|
|
|
|
|
|
|
// Indicates whether or not the mouth on the face is open, and the confidence
|
|
|
|
|
// level in the determination.
|
|
|
|
|
MouthOpen *MouthOpen `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Indicates whether or not the face has a mustache, and the confidence level
|
|
|
|
|
// in the determination.
|
|
|
|
|
Mustache *Mustache `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Indicates the pose of the face as determined by its pitch, roll, and yaw.
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Default attribute.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
Pose *Pose `type:"structure"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Identifies image brightness and sharpness. Default attribute.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
Quality *ImageQuality `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Indicates whether or not the face is smiling, and the confidence level in
|
|
|
|
|
// the determination.
|
|
|
|
|
Smile *Smile `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Indicates whether or not the face is wearing sunglasses, and the confidence
|
|
|
|
|
// level in the determination.
|
|
|
|
|
Sunglasses *Sunglasses `type:"structure"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s FaceDetail) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s FaceDetail) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetAgeRange sets the AgeRange field's value.
|
|
|
|
|
func (s *FaceDetail) SetAgeRange(v *AgeRange) *FaceDetail {
|
|
|
|
|
s.AgeRange = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetBeard sets the Beard field's value.
|
|
|
|
|
func (s *FaceDetail) SetBeard(v *Beard) *FaceDetail {
|
|
|
|
|
s.Beard = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetBoundingBox sets the BoundingBox field's value.
|
|
|
|
|
func (s *FaceDetail) SetBoundingBox(v *BoundingBox) *FaceDetail {
|
|
|
|
|
s.BoundingBox = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetConfidence sets the Confidence field's value.
|
|
|
|
|
func (s *FaceDetail) SetConfidence(v float64) *FaceDetail {
|
|
|
|
|
s.Confidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetEmotions sets the Emotions field's value.
|
|
|
|
|
func (s *FaceDetail) SetEmotions(v []*Emotion) *FaceDetail {
|
|
|
|
|
s.Emotions = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetEyeglasses sets the Eyeglasses field's value.
|
|
|
|
|
func (s *FaceDetail) SetEyeglasses(v *Eyeglasses) *FaceDetail {
|
|
|
|
|
s.Eyeglasses = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetEyesOpen sets the EyesOpen field's value.
|
|
|
|
|
func (s *FaceDetail) SetEyesOpen(v *EyeOpen) *FaceDetail {
|
|
|
|
|
s.EyesOpen = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetGender sets the Gender field's value.
|
|
|
|
|
func (s *FaceDetail) SetGender(v *Gender) *FaceDetail {
|
|
|
|
|
s.Gender = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetLandmarks sets the Landmarks field's value.
|
|
|
|
|
func (s *FaceDetail) SetLandmarks(v []*Landmark) *FaceDetail {
|
|
|
|
|
s.Landmarks = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetMouthOpen sets the MouthOpen field's value.
|
|
|
|
|
func (s *FaceDetail) SetMouthOpen(v *MouthOpen) *FaceDetail {
|
|
|
|
|
s.MouthOpen = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetMustache sets the Mustache field's value.
|
|
|
|
|
func (s *FaceDetail) SetMustache(v *Mustache) *FaceDetail {
|
|
|
|
|
s.Mustache = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetPose sets the Pose field's value.
|
|
|
|
|
func (s *FaceDetail) SetPose(v *Pose) *FaceDetail {
|
|
|
|
|
s.Pose = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetQuality sets the Quality field's value.
|
|
|
|
|
func (s *FaceDetail) SetQuality(v *ImageQuality) *FaceDetail {
|
|
|
|
|
s.Quality = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetSmile sets the Smile field's value.
|
|
|
|
|
func (s *FaceDetail) SetSmile(v *Smile) *FaceDetail {
|
|
|
|
|
s.Smile = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetSunglasses sets the Sunglasses field's value.
|
|
|
|
|
func (s *FaceDetail) SetSunglasses(v *Sunglasses) *FaceDetail {
|
|
|
|
|
s.Sunglasses = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Information about a face detected in a video analysis request and the time
|
|
|
|
|
// the face was detected in the video.
|
|
|
|
|
type FaceDetection struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The face properties for the detected face.
|
|
|
|
|
Face *FaceDetail `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Time, in milliseconds from the start of the video, that the face was detected.
|
|
|
|
|
Timestamp *int64 `type:"long"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s FaceDetection) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s FaceDetection) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFace sets the Face field's value.
|
|
|
|
|
func (s *FaceDetection) SetFace(v *FaceDetail) *FaceDetection {
|
|
|
|
|
s.Face = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetTimestamp sets the Timestamp field's value.
|
|
|
|
|
func (s *FaceDetection) SetTimestamp(v int64) *FaceDetection {
|
|
|
|
|
s.Timestamp = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Provides face metadata. In addition, it also provides the confidence in the
|
|
|
|
|
// match of this face with the input face.
|
|
|
|
|
type FaceMatch struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Describes the face properties such as the bounding box, face ID, image ID
|
|
|
|
|
// of the source image, and external image ID that you assigned.
|
|
|
|
|
Face *Face `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Confidence in the match of this face with the input face.
|
|
|
|
|
Similarity *float64 `type:"float"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s FaceMatch) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s FaceMatch) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFace sets the Face field's value.
|
|
|
|
|
func (s *FaceMatch) SetFace(v *Face) *FaceMatch {
|
|
|
|
|
s.Face = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetSimilarity sets the Similarity field's value.
|
|
|
|
|
func (s *FaceMatch) SetSimilarity(v float64) *FaceMatch {
|
|
|
|
|
s.Similarity = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Object containing both the face metadata (stored in the backend database),
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// and facial attributes that are detected but aren't stored in the database.
|
|
|
|
|
type FaceRecord struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Describes the face properties such as the bounding box, face ID, image ID
|
|
|
|
|
// of the input image, and external image ID that you assigned.
|
|
|
|
|
Face *Face `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Structure containing attributes of the face that the algorithm detected.
|
|
|
|
|
FaceDetail *FaceDetail `type:"structure"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s FaceRecord) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s FaceRecord) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFace sets the Face field's value.
|
|
|
|
|
func (s *FaceRecord) SetFace(v *Face) *FaceRecord {
|
|
|
|
|
s.Face = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceDetail sets the FaceDetail field's value.
|
|
|
|
|
func (s *FaceRecord) SetFaceDetail(v *FaceDetail) *FaceRecord {
|
|
|
|
|
s.FaceDetail = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Input face recognition parameters for an Amazon Rekognition stream processor.
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// FaceRecognitionSettings is a request parameter for CreateStreamProcessor.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type FaceSearchSettings struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The ID of a collection that contains faces that you want to search for.
|
|
|
|
|
CollectionId *string `min:"1" type:"string"`
|
|
|
|
|
|
|
|
|
|
// Minimum face match confidence score that must be met to return a result for
|
|
|
|
|
// a recognized face. Default is 70. 0 is the lowest confidence. 100 is the
|
|
|
|
|
// highest confidence.
|
|
|
|
|
FaceMatchThreshold *float64 `type:"float"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s FaceSearchSettings) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s FaceSearchSettings) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *FaceSearchSettings) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "FaceSearchSettings"}
|
|
|
|
|
if s.CollectionId != nil && len(*s.CollectionId) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetCollectionId sets the CollectionId field's value.
|
|
|
|
|
func (s *FaceSearchSettings) SetCollectionId(v string) *FaceSearchSettings {
|
|
|
|
|
s.CollectionId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceMatchThreshold sets the FaceMatchThreshold field's value.
|
|
|
|
|
func (s *FaceSearchSettings) SetFaceMatchThreshold(v float64) *FaceSearchSettings {
|
|
|
|
|
s.FaceMatchThreshold = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Gender of the face and the confidence level in the determination.
|
|
|
|
|
type Gender struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Level of confidence in the determination.
|
|
|
|
|
Confidence *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// Gender of the face.
|
|
|
|
|
Value *string `type:"string" enum:"GenderType"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s Gender) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s Gender) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetConfidence sets the Confidence field's value.
|
|
|
|
|
func (s *Gender) SetConfidence(v float64) *Gender {
|
|
|
|
|
s.Confidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetValue sets the Value field's value.
|
|
|
|
|
func (s *Gender) SetValue(v string) *Gender {
|
|
|
|
|
s.Value = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Information about where the text detected by DetectText is located on an
|
|
|
|
|
// image.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type Geometry struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// An axis-aligned coarse representation of the detected text's location on
|
|
|
|
|
// the image.
|
|
|
|
|
BoundingBox *BoundingBox `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Within the bounding box, a fine-grained polygon around the detected text.
|
|
|
|
|
Polygon []*Point `type:"list"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s Geometry) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s Geometry) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetBoundingBox sets the BoundingBox field's value.
|
|
|
|
|
func (s *Geometry) SetBoundingBox(v *BoundingBox) *Geometry {
|
|
|
|
|
s.BoundingBox = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetPolygon sets the Polygon field's value.
|
|
|
|
|
func (s *Geometry) SetPolygon(v []*Point) *Geometry {
|
|
|
|
|
s.Polygon = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type GetCelebrityInfoInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The ID for the celebrity. You get the celebrity ID from a call to the RecognizeCelebrities
|
|
|
|
|
// operation, which recognizes celebrities in an image.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Id is a required field
|
|
|
|
|
Id *string `type:"string" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s GetCelebrityInfoInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s GetCelebrityInfoInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *GetCelebrityInfoInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "GetCelebrityInfoInput"}
|
|
|
|
|
if s.Id == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Id"))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetId sets the Id field's value.
|
|
|
|
|
func (s *GetCelebrityInfoInput) SetId(v string) *GetCelebrityInfoInput {
|
|
|
|
|
s.Id = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type GetCelebrityInfoOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The name of the celebrity.
|
|
|
|
|
Name *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// An array of URLs pointing to additional celebrity information.
|
|
|
|
|
Urls []*string `type:"list"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s GetCelebrityInfoOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s GetCelebrityInfoOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetName sets the Name field's value.
|
|
|
|
|
func (s *GetCelebrityInfoOutput) SetName(v string) *GetCelebrityInfoOutput {
|
|
|
|
|
s.Name = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetUrls sets the Urls field's value.
|
|
|
|
|
func (s *GetCelebrityInfoOutput) SetUrls(v []*string) *GetCelebrityInfoOutput {
|
|
|
|
|
s.Urls = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type GetCelebrityRecognitionInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Job identifier for the required celebrity recognition analysis. You can get
|
|
|
|
|
// the job identifer from a call to StartCelebrityRecognition.
|
|
|
|
|
//
|
|
|
|
|
// JobId is a required field
|
|
|
|
|
JobId *string `min:"1" type:"string" required:"true"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Maximum number of results to return per paginated call. The largest value
|
|
|
|
|
// you can specify is 1000. If you specify a value greater than 1000, a maximum
|
|
|
|
|
// of 1000 results is returned. The default value is 1000.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
MaxResults *int64 `min:"1" type:"integer"`
|
|
|
|
|
|
|
|
|
|
// If the previous response was incomplete (because there is more recognized
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// celebrities to retrieve), Amazon Rekognition Video returns a pagination token
|
|
|
|
|
// in the response. You can use this pagination token to retrieve the next set
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// of celebrities.
|
|
|
|
|
NextToken *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// Sort to use for celebrities returned in Celebrities field. Specify ID to
|
|
|
|
|
// sort by the celebrity identifier, specify TIMESTAMP to sort by the time the
|
|
|
|
|
// celebrity was recognized.
|
|
|
|
|
SortBy *string `type:"string" enum:"CelebrityRecognitionSortBy"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s GetCelebrityRecognitionInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s GetCelebrityRecognitionInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *GetCelebrityRecognitionInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "GetCelebrityRecognitionInput"}
|
|
|
|
|
if s.JobId == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("JobId"))
|
|
|
|
|
}
|
|
|
|
|
if s.JobId != nil && len(*s.JobId) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.MaxResults != nil && *s.MaxResults < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobId sets the JobId field's value.
|
|
|
|
|
func (s *GetCelebrityRecognitionInput) SetJobId(v string) *GetCelebrityRecognitionInput {
|
|
|
|
|
s.JobId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetMaxResults sets the MaxResults field's value.
|
|
|
|
|
func (s *GetCelebrityRecognitionInput) SetMaxResults(v int64) *GetCelebrityRecognitionInput {
|
|
|
|
|
s.MaxResults = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNextToken sets the NextToken field's value.
|
|
|
|
|
func (s *GetCelebrityRecognitionInput) SetNextToken(v string) *GetCelebrityRecognitionInput {
|
|
|
|
|
s.NextToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetSortBy sets the SortBy field's value.
|
|
|
|
|
func (s *GetCelebrityRecognitionInput) SetSortBy(v string) *GetCelebrityRecognitionInput {
|
|
|
|
|
s.SortBy = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type GetCelebrityRecognitionOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Array of celebrities recognized in the video.
|
|
|
|
|
Celebrities []*CelebrityRecognition `type:"list"`
|
|
|
|
|
|
|
|
|
|
// The current status of the celebrity recognition job.
|
|
|
|
|
JobStatus *string `type:"string" enum:"VideoJobStatus"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// If the response is truncated, Amazon Rekognition Video returns this token
|
|
|
|
|
// that you can use in the subsequent request to retrieve the next set of celebrities.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
NextToken *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// If the job fails, StatusMessage provides a descriptive error message.
|
|
|
|
|
StatusMessage *string `type:"string"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Information about a video that Amazon Rekognition Video analyzed. Videometadata
|
|
|
|
|
// is returned in every page of paginated responses from a Amazon Rekognition
|
|
|
|
|
// Video operation.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
VideoMetadata *VideoMetadata `type:"structure"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s GetCelebrityRecognitionOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s GetCelebrityRecognitionOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetCelebrities sets the Celebrities field's value.
|
|
|
|
|
func (s *GetCelebrityRecognitionOutput) SetCelebrities(v []*CelebrityRecognition) *GetCelebrityRecognitionOutput {
|
|
|
|
|
s.Celebrities = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobStatus sets the JobStatus field's value.
|
|
|
|
|
func (s *GetCelebrityRecognitionOutput) SetJobStatus(v string) *GetCelebrityRecognitionOutput {
|
|
|
|
|
s.JobStatus = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNextToken sets the NextToken field's value.
|
|
|
|
|
func (s *GetCelebrityRecognitionOutput) SetNextToken(v string) *GetCelebrityRecognitionOutput {
|
|
|
|
|
s.NextToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetStatusMessage sets the StatusMessage field's value.
|
|
|
|
|
func (s *GetCelebrityRecognitionOutput) SetStatusMessage(v string) *GetCelebrityRecognitionOutput {
|
|
|
|
|
s.StatusMessage = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetVideoMetadata sets the VideoMetadata field's value.
|
|
|
|
|
func (s *GetCelebrityRecognitionOutput) SetVideoMetadata(v *VideoMetadata) *GetCelebrityRecognitionOutput {
|
|
|
|
|
s.VideoMetadata = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type GetContentModerationInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The identifier for the content moderation job. Use JobId to identify the
|
|
|
|
|
// job in a subsequent call to GetContentModeration.
|
|
|
|
|
//
|
|
|
|
|
// JobId is a required field
|
|
|
|
|
JobId *string `min:"1" type:"string" required:"true"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Maximum number of results to return per paginated call. The largest value
|
|
|
|
|
// you can specify is 1000. If you specify a value greater than 1000, a maximum
|
|
|
|
|
// of 1000 results is returned. The default value is 1000.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
MaxResults *int64 `min:"1" type:"integer"`
|
|
|
|
|
|
|
|
|
|
// If the previous response was incomplete (because there is more data to retrieve),
|
|
|
|
|
// Amazon Rekognition returns a pagination token in the response. You can use
|
|
|
|
|
// this pagination token to retrieve the next set of content moderation labels.
|
|
|
|
|
NextToken *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// Sort to use for elements in the ModerationLabelDetections array. Use TIMESTAMP
|
|
|
|
|
// to sort array elements by the time labels are detected. Use NAME to alphabetically
|
|
|
|
|
// group elements for a label together. Within each label group, the array element
|
|
|
|
|
// are sorted by detection confidence. The default sort is by TIMESTAMP.
|
|
|
|
|
SortBy *string `type:"string" enum:"ContentModerationSortBy"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s GetContentModerationInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s GetContentModerationInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *GetContentModerationInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "GetContentModerationInput"}
|
|
|
|
|
if s.JobId == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("JobId"))
|
|
|
|
|
}
|
|
|
|
|
if s.JobId != nil && len(*s.JobId) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.MaxResults != nil && *s.MaxResults < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobId sets the JobId field's value.
|
|
|
|
|
func (s *GetContentModerationInput) SetJobId(v string) *GetContentModerationInput {
|
|
|
|
|
s.JobId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetMaxResults sets the MaxResults field's value.
|
|
|
|
|
func (s *GetContentModerationInput) SetMaxResults(v int64) *GetContentModerationInput {
|
|
|
|
|
s.MaxResults = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNextToken sets the NextToken field's value.
|
|
|
|
|
func (s *GetContentModerationInput) SetNextToken(v string) *GetContentModerationInput {
|
|
|
|
|
s.NextToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetSortBy sets the SortBy field's value.
|
|
|
|
|
func (s *GetContentModerationInput) SetSortBy(v string) *GetContentModerationInput {
|
|
|
|
|
s.SortBy = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type GetContentModerationOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The current status of the content moderation job.
|
|
|
|
|
JobStatus *string `type:"string" enum:"VideoJobStatus"`
|
|
|
|
|
|
|
|
|
|
// The detected moderation labels and the time(s) they were detected.
|
|
|
|
|
ModerationLabels []*ContentModerationDetection `type:"list"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// If the response is truncated, Amazon Rekognition Video returns this token
|
|
|
|
|
// that you can use in the subsequent request to retrieve the next set of moderation
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// labels.
|
|
|
|
|
NextToken *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// If the job fails, StatusMessage provides a descriptive error message.
|
|
|
|
|
StatusMessage *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// Information about a video that Amazon Rekognition analyzed. Videometadata
|
|
|
|
|
// is returned in every page of paginated responses from GetContentModeration.
|
|
|
|
|
VideoMetadata *VideoMetadata `type:"structure"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s GetContentModerationOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s GetContentModerationOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobStatus sets the JobStatus field's value.
|
|
|
|
|
func (s *GetContentModerationOutput) SetJobStatus(v string) *GetContentModerationOutput {
|
|
|
|
|
s.JobStatus = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetModerationLabels sets the ModerationLabels field's value.
|
|
|
|
|
func (s *GetContentModerationOutput) SetModerationLabels(v []*ContentModerationDetection) *GetContentModerationOutput {
|
|
|
|
|
s.ModerationLabels = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNextToken sets the NextToken field's value.
|
|
|
|
|
func (s *GetContentModerationOutput) SetNextToken(v string) *GetContentModerationOutput {
|
|
|
|
|
s.NextToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetStatusMessage sets the StatusMessage field's value.
|
|
|
|
|
func (s *GetContentModerationOutput) SetStatusMessage(v string) *GetContentModerationOutput {
|
|
|
|
|
s.StatusMessage = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetVideoMetadata sets the VideoMetadata field's value.
|
|
|
|
|
func (s *GetContentModerationOutput) SetVideoMetadata(v *VideoMetadata) *GetContentModerationOutput {
|
|
|
|
|
s.VideoMetadata = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type GetFaceDetectionInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Unique identifier for the face detection job. The JobId is returned from
|
|
|
|
|
// StartFaceDetection.
|
|
|
|
|
//
|
|
|
|
|
// JobId is a required field
|
|
|
|
|
JobId *string `min:"1" type:"string" required:"true"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Maximum number of results to return per paginated call. The largest value
|
|
|
|
|
// you can specify is 1000. If you specify a value greater than 1000, a maximum
|
|
|
|
|
// of 1000 results is returned. The default value is 1000.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
MaxResults *int64 `min:"1" type:"integer"`
|
|
|
|
|
|
|
|
|
|
// If the previous response was incomplete (because there are more faces to
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// retrieve), Amazon Rekognition Video returns a pagination token in the response.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// You can use this pagination token to retrieve the next set of faces.
|
|
|
|
|
NextToken *string `type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s GetFaceDetectionInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s GetFaceDetectionInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *GetFaceDetectionInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "GetFaceDetectionInput"}
|
|
|
|
|
if s.JobId == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("JobId"))
|
|
|
|
|
}
|
|
|
|
|
if s.JobId != nil && len(*s.JobId) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.MaxResults != nil && *s.MaxResults < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobId sets the JobId field's value.
|
|
|
|
|
func (s *GetFaceDetectionInput) SetJobId(v string) *GetFaceDetectionInput {
|
|
|
|
|
s.JobId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetMaxResults sets the MaxResults field's value.
|
|
|
|
|
func (s *GetFaceDetectionInput) SetMaxResults(v int64) *GetFaceDetectionInput {
|
|
|
|
|
s.MaxResults = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNextToken sets the NextToken field's value.
|
|
|
|
|
func (s *GetFaceDetectionInput) SetNextToken(v string) *GetFaceDetectionInput {
|
|
|
|
|
s.NextToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type GetFaceDetectionOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// An array of faces detected in the video. Each element contains a detected
|
|
|
|
|
// face's details and the time, in milliseconds from the start of the video,
|
|
|
|
|
// the face was detected.
|
|
|
|
|
Faces []*FaceDetection `type:"list"`
|
|
|
|
|
|
|
|
|
|
// The current status of the face detection job.
|
|
|
|
|
JobStatus *string `type:"string" enum:"VideoJobStatus"`
|
|
|
|
|
|
|
|
|
|
// If the response is truncated, Amazon Rekognition returns this token that
|
|
|
|
|
// you can use in the subsequent request to retrieve the next set of faces.
|
|
|
|
|
NextToken *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// If the job fails, StatusMessage provides a descriptive error message.
|
|
|
|
|
StatusMessage *string `type:"string"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Information about a video that Amazon Rekognition Video analyzed. Videometadata
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// is returned in every page of paginated responses from a Amazon Rekognition
|
|
|
|
|
// video operation.
|
|
|
|
|
VideoMetadata *VideoMetadata `type:"structure"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s GetFaceDetectionOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s GetFaceDetectionOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaces sets the Faces field's value.
|
|
|
|
|
func (s *GetFaceDetectionOutput) SetFaces(v []*FaceDetection) *GetFaceDetectionOutput {
|
|
|
|
|
s.Faces = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobStatus sets the JobStatus field's value.
|
|
|
|
|
func (s *GetFaceDetectionOutput) SetJobStatus(v string) *GetFaceDetectionOutput {
|
|
|
|
|
s.JobStatus = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNextToken sets the NextToken field's value.
|
|
|
|
|
func (s *GetFaceDetectionOutput) SetNextToken(v string) *GetFaceDetectionOutput {
|
|
|
|
|
s.NextToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetStatusMessage sets the StatusMessage field's value.
|
|
|
|
|
func (s *GetFaceDetectionOutput) SetStatusMessage(v string) *GetFaceDetectionOutput {
|
|
|
|
|
s.StatusMessage = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetVideoMetadata sets the VideoMetadata field's value.
|
|
|
|
|
func (s *GetFaceDetectionOutput) SetVideoMetadata(v *VideoMetadata) *GetFaceDetectionOutput {
|
|
|
|
|
s.VideoMetadata = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type GetFaceSearchInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The job identifer for the search request. You get the job identifier from
|
|
|
|
|
// an initial call to StartFaceSearch.
|
|
|
|
|
//
|
|
|
|
|
// JobId is a required field
|
|
|
|
|
JobId *string `min:"1" type:"string" required:"true"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Maximum number of results to return per paginated call. The largest value
|
|
|
|
|
// you can specify is 1000. If you specify a value greater than 1000, a maximum
|
|
|
|
|
// of 1000 results is returned. The default value is 1000.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
MaxResults *int64 `min:"1" type:"integer"`
|
|
|
|
|
|
|
|
|
|
// If the previous response was incomplete (because there is more search results
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// to retrieve), Amazon Rekognition Video returns a pagination token in the
|
|
|
|
|
// response. You can use this pagination token to retrieve the next set of search
|
|
|
|
|
// results.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
NextToken *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// Sort to use for grouping faces in the response. Use TIMESTAMP to group faces
|
|
|
|
|
// by the time that they are recognized. Use INDEX to sort by recognized faces.
|
|
|
|
|
SortBy *string `type:"string" enum:"FaceSearchSortBy"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s GetFaceSearchInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s GetFaceSearchInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *GetFaceSearchInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "GetFaceSearchInput"}
|
|
|
|
|
if s.JobId == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("JobId"))
|
|
|
|
|
}
|
|
|
|
|
if s.JobId != nil && len(*s.JobId) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.MaxResults != nil && *s.MaxResults < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobId sets the JobId field's value.
|
|
|
|
|
func (s *GetFaceSearchInput) SetJobId(v string) *GetFaceSearchInput {
|
|
|
|
|
s.JobId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetMaxResults sets the MaxResults field's value.
|
|
|
|
|
func (s *GetFaceSearchInput) SetMaxResults(v int64) *GetFaceSearchInput {
|
|
|
|
|
s.MaxResults = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNextToken sets the NextToken field's value.
|
|
|
|
|
func (s *GetFaceSearchInput) SetNextToken(v string) *GetFaceSearchInput {
|
|
|
|
|
s.NextToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetSortBy sets the SortBy field's value.
|
|
|
|
|
func (s *GetFaceSearchInput) SetSortBy(v string) *GetFaceSearchInput {
|
|
|
|
|
s.SortBy = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type GetFaceSearchOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The current status of the face search job.
|
|
|
|
|
JobStatus *string `type:"string" enum:"VideoJobStatus"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// If the response is truncated, Amazon Rekognition Video returns this token
|
|
|
|
|
// that you can use in the subsequent request to retrieve the next set of search
|
|
|
|
|
// results.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
NextToken *string `type:"string"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// An array of persons, PersonMatch, in the video whose face(s) match the face(s)
|
|
|
|
|
// in an Amazon Rekognition collection. It also includes time information for
|
|
|
|
|
// when persons are matched in the video. You specify the input collection in
|
|
|
|
|
// an initial call to StartFaceSearch. Each Persons element includes a time
|
|
|
|
|
// the person was matched, face match details (FaceMatches) for matching faces
|
|
|
|
|
// in the collection, and person information (Person) for the matched person.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
Persons []*PersonMatch `type:"list"`
|
|
|
|
|
|
|
|
|
|
// If the job fails, StatusMessage provides a descriptive error message.
|
|
|
|
|
StatusMessage *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// Information about a video that Amazon Rekognition analyzed. Videometadata
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// is returned in every page of paginated responses from a Amazon Rekognition
|
|
|
|
|
// Video operation.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
VideoMetadata *VideoMetadata `type:"structure"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s GetFaceSearchOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s GetFaceSearchOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobStatus sets the JobStatus field's value.
|
|
|
|
|
func (s *GetFaceSearchOutput) SetJobStatus(v string) *GetFaceSearchOutput {
|
|
|
|
|
s.JobStatus = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNextToken sets the NextToken field's value.
|
|
|
|
|
func (s *GetFaceSearchOutput) SetNextToken(v string) *GetFaceSearchOutput {
|
|
|
|
|
s.NextToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetPersons sets the Persons field's value.
|
|
|
|
|
func (s *GetFaceSearchOutput) SetPersons(v []*PersonMatch) *GetFaceSearchOutput {
|
|
|
|
|
s.Persons = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetStatusMessage sets the StatusMessage field's value.
|
|
|
|
|
func (s *GetFaceSearchOutput) SetStatusMessage(v string) *GetFaceSearchOutput {
|
|
|
|
|
s.StatusMessage = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetVideoMetadata sets the VideoMetadata field's value.
|
|
|
|
|
func (s *GetFaceSearchOutput) SetVideoMetadata(v *VideoMetadata) *GetFaceSearchOutput {
|
|
|
|
|
s.VideoMetadata = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type GetLabelDetectionInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Job identifier for the label detection operation for which you want results
|
|
|
|
|
// returned. You get the job identifer from an initial call to StartlabelDetection.
|
|
|
|
|
//
|
|
|
|
|
// JobId is a required field
|
|
|
|
|
JobId *string `min:"1" type:"string" required:"true"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Maximum number of results to return per paginated call. The largest value
|
|
|
|
|
// you can specify is 1000. If you specify a value greater than 1000, a maximum
|
|
|
|
|
// of 1000 results is returned. The default value is 1000.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
MaxResults *int64 `min:"1" type:"integer"`
|
|
|
|
|
|
|
|
|
|
// If the previous response was incomplete (because there are more labels to
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// retrieve), Amazon Rekognition Video returns a pagination token in the response.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// You can use this pagination token to retrieve the next set of labels.
|
|
|
|
|
NextToken *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// Sort to use for elements in the Labels array. Use TIMESTAMP to sort array
|
|
|
|
|
// elements by the time labels are detected. Use NAME to alphabetically group
|
|
|
|
|
// elements for a label together. Within each label group, the array element
|
|
|
|
|
// are sorted by detection confidence. The default sort is by TIMESTAMP.
|
|
|
|
|
SortBy *string `type:"string" enum:"LabelDetectionSortBy"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s GetLabelDetectionInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s GetLabelDetectionInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *GetLabelDetectionInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "GetLabelDetectionInput"}
|
|
|
|
|
if s.JobId == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("JobId"))
|
|
|
|
|
}
|
|
|
|
|
if s.JobId != nil && len(*s.JobId) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.MaxResults != nil && *s.MaxResults < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobId sets the JobId field's value.
|
|
|
|
|
func (s *GetLabelDetectionInput) SetJobId(v string) *GetLabelDetectionInput {
|
|
|
|
|
s.JobId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetMaxResults sets the MaxResults field's value.
|
|
|
|
|
func (s *GetLabelDetectionInput) SetMaxResults(v int64) *GetLabelDetectionInput {
|
|
|
|
|
s.MaxResults = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNextToken sets the NextToken field's value.
|
|
|
|
|
func (s *GetLabelDetectionInput) SetNextToken(v string) *GetLabelDetectionInput {
|
|
|
|
|
s.NextToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetSortBy sets the SortBy field's value.
|
|
|
|
|
func (s *GetLabelDetectionInput) SetSortBy(v string) *GetLabelDetectionInput {
|
|
|
|
|
s.SortBy = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type GetLabelDetectionOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The current status of the label detection job.
|
|
|
|
|
JobStatus *string `type:"string" enum:"VideoJobStatus"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Version number of the label detection model that was used to detect labels.
|
|
|
|
|
LabelModelVersion *string `type:"string"`
|
|
|
|
|
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// An array of labels detected in the video. Each element contains the detected
|
|
|
|
|
// label and the time, in milliseconds from the start of the video, that the
|
|
|
|
|
// label was detected.
|
|
|
|
|
Labels []*LabelDetection `type:"list"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// If the response is truncated, Amazon Rekognition Video returns this token
|
|
|
|
|
// that you can use in the subsequent request to retrieve the next set of labels.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
NextToken *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// If the job fails, StatusMessage provides a descriptive error message.
|
|
|
|
|
StatusMessage *string `type:"string"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Information about a video that Amazon Rekognition Video analyzed. Videometadata
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// is returned in every page of paginated responses from a Amazon Rekognition
|
|
|
|
|
// video operation.
|
|
|
|
|
VideoMetadata *VideoMetadata `type:"structure"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s GetLabelDetectionOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s GetLabelDetectionOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobStatus sets the JobStatus field's value.
|
|
|
|
|
func (s *GetLabelDetectionOutput) SetJobStatus(v string) *GetLabelDetectionOutput {
|
|
|
|
|
s.JobStatus = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// SetLabelModelVersion sets the LabelModelVersion field's value.
|
|
|
|
|
func (s *GetLabelDetectionOutput) SetLabelModelVersion(v string) *GetLabelDetectionOutput {
|
|
|
|
|
s.LabelModelVersion = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// SetLabels sets the Labels field's value.
|
|
|
|
|
func (s *GetLabelDetectionOutput) SetLabels(v []*LabelDetection) *GetLabelDetectionOutput {
|
|
|
|
|
s.Labels = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNextToken sets the NextToken field's value.
|
|
|
|
|
func (s *GetLabelDetectionOutput) SetNextToken(v string) *GetLabelDetectionOutput {
|
|
|
|
|
s.NextToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetStatusMessage sets the StatusMessage field's value.
|
|
|
|
|
func (s *GetLabelDetectionOutput) SetStatusMessage(v string) *GetLabelDetectionOutput {
|
|
|
|
|
s.StatusMessage = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetVideoMetadata sets the VideoMetadata field's value.
|
|
|
|
|
func (s *GetLabelDetectionOutput) SetVideoMetadata(v *VideoMetadata) *GetLabelDetectionOutput {
|
|
|
|
|
s.VideoMetadata = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type GetPersonTrackingInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The identifier for a job that tracks persons in a video. You get the JobId
|
|
|
|
|
// from a call to StartPersonTracking.
|
|
|
|
|
//
|
|
|
|
|
// JobId is a required field
|
|
|
|
|
JobId *string `min:"1" type:"string" required:"true"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Maximum number of results to return per paginated call. The largest value
|
|
|
|
|
// you can specify is 1000. If you specify a value greater than 1000, a maximum
|
|
|
|
|
// of 1000 results is returned. The default value is 1000.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
MaxResults *int64 `min:"1" type:"integer"`
|
|
|
|
|
|
|
|
|
|
// If the previous response was incomplete (because there are more persons to
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// retrieve), Amazon Rekognition Video returns a pagination token in the response.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// You can use this pagination token to retrieve the next set of persons.
|
|
|
|
|
NextToken *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// Sort to use for elements in the Persons array. Use TIMESTAMP to sort array
|
|
|
|
|
// elements by the time persons are detected. Use INDEX to sort by the tracked
|
|
|
|
|
// persons. If you sort by INDEX, the array elements for each person are sorted
|
|
|
|
|
// by detection confidence. The default sort is by TIMESTAMP.
|
|
|
|
|
SortBy *string `type:"string" enum:"PersonTrackingSortBy"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s GetPersonTrackingInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s GetPersonTrackingInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *GetPersonTrackingInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "GetPersonTrackingInput"}
|
|
|
|
|
if s.JobId == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("JobId"))
|
|
|
|
|
}
|
|
|
|
|
if s.JobId != nil && len(*s.JobId) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("JobId", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.MaxResults != nil && *s.MaxResults < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobId sets the JobId field's value.
|
|
|
|
|
func (s *GetPersonTrackingInput) SetJobId(v string) *GetPersonTrackingInput {
|
|
|
|
|
s.JobId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetMaxResults sets the MaxResults field's value.
|
|
|
|
|
func (s *GetPersonTrackingInput) SetMaxResults(v int64) *GetPersonTrackingInput {
|
|
|
|
|
s.MaxResults = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNextToken sets the NextToken field's value.
|
|
|
|
|
func (s *GetPersonTrackingInput) SetNextToken(v string) *GetPersonTrackingInput {
|
|
|
|
|
s.NextToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetSortBy sets the SortBy field's value.
|
|
|
|
|
func (s *GetPersonTrackingInput) SetSortBy(v string) *GetPersonTrackingInput {
|
|
|
|
|
s.SortBy = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type GetPersonTrackingOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The current status of the person tracking job.
|
|
|
|
|
JobStatus *string `type:"string" enum:"VideoJobStatus"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// If the response is truncated, Amazon Rekognition Video returns this token
|
|
|
|
|
// that you can use in the subsequent request to retrieve the next set of persons.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
NextToken *string `type:"string"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// An array of the persons detected in the video and the time(s) their path
|
|
|
|
|
// was tracked throughout the video. An array element will exist for each time
|
|
|
|
|
// a person's path is tracked.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
Persons []*PersonDetection `type:"list"`
|
|
|
|
|
|
|
|
|
|
// If the job fails, StatusMessage provides a descriptive error message.
|
|
|
|
|
StatusMessage *string `type:"string"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Information about a video that Amazon Rekognition Video analyzed. Videometadata
|
|
|
|
|
// is returned in every page of paginated responses from a Amazon Rekognition
|
|
|
|
|
// Video operation.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
VideoMetadata *VideoMetadata `type:"structure"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s GetPersonTrackingOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s GetPersonTrackingOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobStatus sets the JobStatus field's value.
|
|
|
|
|
func (s *GetPersonTrackingOutput) SetJobStatus(v string) *GetPersonTrackingOutput {
|
|
|
|
|
s.JobStatus = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNextToken sets the NextToken field's value.
|
|
|
|
|
func (s *GetPersonTrackingOutput) SetNextToken(v string) *GetPersonTrackingOutput {
|
|
|
|
|
s.NextToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetPersons sets the Persons field's value.
|
|
|
|
|
func (s *GetPersonTrackingOutput) SetPersons(v []*PersonDetection) *GetPersonTrackingOutput {
|
|
|
|
|
s.Persons = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetStatusMessage sets the StatusMessage field's value.
|
|
|
|
|
func (s *GetPersonTrackingOutput) SetStatusMessage(v string) *GetPersonTrackingOutput {
|
|
|
|
|
s.StatusMessage = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetVideoMetadata sets the VideoMetadata field's value.
|
|
|
|
|
func (s *GetPersonTrackingOutput) SetVideoMetadata(v *VideoMetadata) *GetPersonTrackingOutput {
|
|
|
|
|
s.VideoMetadata = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Provides the input image either as bytes or an S3 object.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// You pass image bytes to an Amazon Rekognition API operation by using the
|
|
|
|
|
// Bytes property. For example, you would use the Bytes property to pass an
|
|
|
|
|
// image loaded from a local file system. Image bytes passed by using the Bytes
|
|
|
|
|
// property must be base64-encoded. Your code may not need to encode image bytes
|
|
|
|
|
// if you are using an AWS SDK to call Amazon Rekognition API operations.
|
|
|
|
|
//
|
|
|
|
|
// For more information, see Analyzing an Image Loaded from a Local File System
|
|
|
|
|
// in the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// You pass images stored in an S3 bucket to an Amazon Rekognition API operation
|
|
|
|
|
// by using the S3Object property. Images stored in an S3 bucket do not need
|
|
|
|
|
// to be base64-encoded.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// The region for the S3 bucket containing the S3 object must match the region
|
|
|
|
|
// you use for Amazon Rekognition operations.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// If you use the AWS CLI to call Amazon Rekognition operations, passing image
|
|
|
|
|
// bytes using the Bytes property is not supported. You must first upload the
|
|
|
|
|
// image to an Amazon S3 bucket and then call the operation using the S3Object
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// property.
|
|
|
|
|
//
|
|
|
|
|
// For Amazon Rekognition to process an S3 object, the user must have permission
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// to access the S3 object. For more information, see Resource Based Policies
|
|
|
|
|
// in the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type Image struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Blob of image bytes up to 5 MBs.
|
|
|
|
|
//
|
|
|
|
|
// Bytes is automatically base64 encoded/decoded by the SDK.
|
|
|
|
|
Bytes []byte `min:"1" type:"blob"`
|
|
|
|
|
|
|
|
|
|
// Identifies an S3 object as the image source.
|
|
|
|
|
S3Object *S3Object `type:"structure"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s Image) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s Image) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *Image) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "Image"}
|
|
|
|
|
if s.Bytes != nil && len(s.Bytes) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("Bytes", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.S3Object != nil {
|
|
|
|
|
if err := s.S3Object.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("S3Object", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetBytes sets the Bytes field's value.
|
|
|
|
|
func (s *Image) SetBytes(v []byte) *Image {
|
|
|
|
|
s.Bytes = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetS3Object sets the S3Object field's value.
|
|
|
|
|
func (s *Image) SetS3Object(v *S3Object) *Image {
|
|
|
|
|
s.S3Object = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Identifies face image brightness and sharpness.
|
|
|
|
|
type ImageQuality struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Value representing brightness of the face. The service returns a value between
|
|
|
|
|
// 0 and 100 (inclusive). A higher value indicates a brighter face image.
|
|
|
|
|
Brightness *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// Value representing sharpness of the face. The service returns a value between
|
|
|
|
|
// 0 and 100 (inclusive). A higher value indicates a sharper face image.
|
|
|
|
|
Sharpness *float64 `type:"float"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s ImageQuality) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s ImageQuality) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetBrightness sets the Brightness field's value.
|
|
|
|
|
func (s *ImageQuality) SetBrightness(v float64) *ImageQuality {
|
|
|
|
|
s.Brightness = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetSharpness sets the Sharpness field's value.
|
|
|
|
|
func (s *ImageQuality) SetSharpness(v float64) *ImageQuality {
|
|
|
|
|
s.Sharpness = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type IndexFacesInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The ID of an existing collection to which you want to add the faces that
|
|
|
|
|
// are detected in the input images.
|
|
|
|
|
//
|
|
|
|
|
// CollectionId is a required field
|
|
|
|
|
CollectionId *string `min:"1" type:"string" required:"true"`
|
|
|
|
|
|
|
|
|
|
// An array of facial attributes that you want to be returned. This can be the
|
|
|
|
|
// default list of attributes or all attributes. If you don't specify a value
|
|
|
|
|
// for Attributes or if you specify ["DEFAULT"], the API returns the following
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// subset of facial attributes: BoundingBox, Confidence, Pose, Quality, and
|
|
|
|
|
// Landmarks. If you provide ["ALL"], all facial attributes are returned, but
|
|
|
|
|
// the operation takes longer to complete.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// If you provide both, ["ALL", "DEFAULT"], the service uses a logical AND operator
|
|
|
|
|
// to determine which attributes to return (in this case, all attributes).
|
|
|
|
|
DetectionAttributes []*string `type:"list"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The ID you want to assign to all the faces detected in the image.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
ExternalImageId *string `min:"1" type:"string"`
|
|
|
|
|
|
|
|
|
|
// The input image as base64-encoded bytes or an S3 object. If you use the AWS
|
|
|
|
|
// CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// isn't supported.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Image is a required field
|
|
|
|
|
Image *Image `type:"structure" required:"true"`
|
2019-01-21 14:27:20 +00:00
|
|
|
|
|
|
|
|
|
// The maximum number of faces to index. The value of MaxFaces must be greater
|
|
|
|
|
// than or equal to 1. IndexFaces returns no more than 100 detected faces in
|
|
|
|
|
// an image, even if you specify a larger value for MaxFaces.
|
|
|
|
|
//
|
|
|
|
|
// If IndexFaces detects more faces than the value of MaxFaces, the faces with
|
|
|
|
|
// the lowest quality are filtered out first. If there are still more faces
|
|
|
|
|
// than the value of MaxFaces, the faces with the smallest bounding boxes are
|
|
|
|
|
// filtered out (up to the number that's needed to satisfy the value of MaxFaces).
|
|
|
|
|
// Information about the unindexed faces is available in the UnindexedFaces
|
|
|
|
|
// array.
|
|
|
|
|
//
|
|
|
|
|
// The faces that are returned by IndexFaces are sorted by the largest face
|
|
|
|
|
// bounding box size to the smallest size, in descending order.
|
|
|
|
|
//
|
|
|
|
|
// MaxFaces can be used with a collection associated with any version of the
|
|
|
|
|
// face model.
|
|
|
|
|
MaxFaces *int64 `min:"1" type:"integer"`
|
|
|
|
|
|
|
|
|
|
// A filter that specifies how much filtering is done to identify faces that
|
|
|
|
|
// are detected with low quality. Filtered faces aren't indexed. If you specify
|
|
|
|
|
// AUTO, filtering prioritizes the identification of faces that don’t meet the
|
|
|
|
|
// required quality bar chosen by Amazon Rekognition. The quality bar is based
|
|
|
|
|
// on a variety of common use cases. Low-quality detections can occur for a
|
|
|
|
|
// number of reasons. Some examples are an object that's misidentified as a
|
|
|
|
|
// face, a face that's too blurry, or a face with a pose that's too extreme
|
|
|
|
|
// to use. If you specify NONE, no filtering is performed. The default value
|
|
|
|
|
// is AUTO.
|
|
|
|
|
//
|
|
|
|
|
// To use quality filtering, the collection you are using must be associated
|
|
|
|
|
// with version 3 of the face model.
|
|
|
|
|
QualityFilter *string `type:"string" enum:"QualityFilter"`
|
2017-12-08 12:03:10 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s IndexFacesInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s IndexFacesInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *IndexFacesInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "IndexFacesInput"}
|
|
|
|
|
if s.CollectionId == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("CollectionId"))
|
|
|
|
|
}
|
|
|
|
|
if s.CollectionId != nil && len(*s.CollectionId) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.ExternalImageId != nil && len(*s.ExternalImageId) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("ExternalImageId", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.Image == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Image"))
|
|
|
|
|
}
|
2019-01-21 14:27:20 +00:00
|
|
|
|
if s.MaxFaces != nil && *s.MaxFaces < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinValue("MaxFaces", 1))
|
|
|
|
|
}
|
2017-12-08 12:03:10 +00:00
|
|
|
|
if s.Image != nil {
|
|
|
|
|
if err := s.Image.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetCollectionId sets the CollectionId field's value.
|
|
|
|
|
func (s *IndexFacesInput) SetCollectionId(v string) *IndexFacesInput {
|
|
|
|
|
s.CollectionId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetDetectionAttributes sets the DetectionAttributes field's value.
|
|
|
|
|
func (s *IndexFacesInput) SetDetectionAttributes(v []*string) *IndexFacesInput {
|
|
|
|
|
s.DetectionAttributes = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetExternalImageId sets the ExternalImageId field's value.
|
|
|
|
|
func (s *IndexFacesInput) SetExternalImageId(v string) *IndexFacesInput {
|
|
|
|
|
s.ExternalImageId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetImage sets the Image field's value.
|
|
|
|
|
func (s *IndexFacesInput) SetImage(v *Image) *IndexFacesInput {
|
|
|
|
|
s.Image = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// SetMaxFaces sets the MaxFaces field's value.
|
|
|
|
|
func (s *IndexFacesInput) SetMaxFaces(v int64) *IndexFacesInput {
|
|
|
|
|
s.MaxFaces = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetQualityFilter sets the QualityFilter field's value.
|
|
|
|
|
func (s *IndexFacesInput) SetQualityFilter(v string) *IndexFacesInput {
|
|
|
|
|
s.QualityFilter = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type IndexFacesOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The version number of the face detection model that's associated with the
|
|
|
|
|
// input collection (CollectionId).
|
2017-12-08 12:03:10 +00:00
|
|
|
|
FaceModelVersion *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// An array of faces detected and added to the collection. For more information,
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// see Searching Faces in a Collection in the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
FaceRecords []*FaceRecord `type:"list"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// If your collection is associated with a face detection model that's later
|
|
|
|
|
// than version 3.0, the value of OrientationCorrection is always null and no
|
|
|
|
|
// orientation information is returned.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// If your collection is associated with a face detection model that's version
|
|
|
|
|
// 3.0 or earlier, the following applies:
|
|
|
|
|
//
|
|
|
|
|
// * If the input image is in .jpeg format, it might contain exchangeable
|
|
|
|
|
// image file format (Exif) metadata that includes the image's orientation.
|
|
|
|
|
// Amazon Rekognition uses this orientation information to perform image
|
|
|
|
|
// correction - the bounding box coordinates are translated to represent
|
|
|
|
|
// object locations after the orientation information in the Exif metadata
|
|
|
|
|
// is used to correct the image orientation. Images in .png format don't
|
|
|
|
|
// contain Exif metadata. The value of OrientationCorrection is null.
|
|
|
|
|
//
|
|
|
|
|
// * If the image doesn't contain orientation information in its Exif metadata,
|
|
|
|
|
// Amazon Rekognition returns an estimated orientation (ROTATE_0, ROTATE_90,
|
|
|
|
|
// ROTATE_180, ROTATE_270). Amazon Rekognition doesn’t perform image correction
|
|
|
|
|
// for images. The bounding box coordinates aren't translated and represent
|
|
|
|
|
// the object locations before the image is rotated.
|
|
|
|
|
//
|
|
|
|
|
// Bounding box information is returned in the FaceRecords array. You can get
|
|
|
|
|
// the version of the face detection model by calling DescribeCollection.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
OrientationCorrection *string `type:"string" enum:"OrientationCorrection"`
|
2019-01-21 14:27:20 +00:00
|
|
|
|
|
|
|
|
|
// An array of faces that were detected in the image but weren't indexed. They
|
|
|
|
|
// weren't indexed because the quality filter identified them as low quality,
|
|
|
|
|
// or the MaxFaces request parameter filtered them out. To use the quality filter,
|
|
|
|
|
// you specify the QualityFilter request parameter.
|
|
|
|
|
UnindexedFaces []*UnindexedFace `type:"list"`
|
2017-12-08 12:03:10 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s IndexFacesOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s IndexFacesOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceModelVersion sets the FaceModelVersion field's value.
|
|
|
|
|
func (s *IndexFacesOutput) SetFaceModelVersion(v string) *IndexFacesOutput {
|
|
|
|
|
s.FaceModelVersion = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceRecords sets the FaceRecords field's value.
|
|
|
|
|
func (s *IndexFacesOutput) SetFaceRecords(v []*FaceRecord) *IndexFacesOutput {
|
|
|
|
|
s.FaceRecords = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetOrientationCorrection sets the OrientationCorrection field's value.
|
|
|
|
|
func (s *IndexFacesOutput) SetOrientationCorrection(v string) *IndexFacesOutput {
|
|
|
|
|
s.OrientationCorrection = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// SetUnindexedFaces sets the UnindexedFaces field's value.
|
|
|
|
|
func (s *IndexFacesOutput) SetUnindexedFaces(v []*UnindexedFace) *IndexFacesOutput {
|
|
|
|
|
s.UnindexedFaces = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// An instance of a label returned by Amazon Rekognition Image (DetectLabels)
|
|
|
|
|
// or by Amazon Rekognition Video (GetLabelDetection).
|
|
|
|
|
type Instance struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The position of the label instance on the image.
|
|
|
|
|
BoundingBox *BoundingBox `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The confidence that Amazon Rekognition has in the accuracy of the bounding
|
|
|
|
|
// box.
|
|
|
|
|
Confidence *float64 `type:"float"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s Instance) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s Instance) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetBoundingBox sets the BoundingBox field's value.
|
|
|
|
|
func (s *Instance) SetBoundingBox(v *BoundingBox) *Instance {
|
|
|
|
|
s.BoundingBox = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetConfidence sets the Confidence field's value.
|
|
|
|
|
func (s *Instance) SetConfidence(v float64) *Instance {
|
|
|
|
|
s.Confidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// The Kinesis data stream Amazon Rekognition to which the analysis results
|
|
|
|
|
// of a Amazon Rekognition stream processor are streamed. For more information,
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// see CreateStreamProcessor in the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type KinesisDataStream struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// ARN of the output Amazon Kinesis Data Streams stream.
|
|
|
|
|
Arn *string `type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s KinesisDataStream) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s KinesisDataStream) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetArn sets the Arn field's value.
|
|
|
|
|
func (s *KinesisDataStream) SetArn(v string) *KinesisDataStream {
|
|
|
|
|
s.Arn = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Kinesis video stream stream that provides the source streaming video for
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// a Amazon Rekognition Video stream processor. For more information, see CreateStreamProcessor
|
|
|
|
|
// in the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type KinesisVideoStream struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// ARN of the Kinesis video stream stream that streams the source video.
|
|
|
|
|
Arn *string `type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s KinesisVideoStream) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s KinesisVideoStream) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetArn sets the Arn field's value.
|
|
|
|
|
func (s *KinesisVideoStream) SetArn(v string) *KinesisVideoStream {
|
|
|
|
|
s.Arn = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Structure containing details about the detected label, including the name,
|
|
|
|
|
// detected instances, parent labels, and level of confidence.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type Label struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Level of confidence.
|
|
|
|
|
Confidence *float64 `type:"float"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// If Label represents an object, Instances contains the bounding boxes for
|
|
|
|
|
// each instance of the detected object. Bounding boxes are returned for common
|
|
|
|
|
// object labels such as people, cars, furniture, apparel or pets.
|
|
|
|
|
Instances []*Instance `type:"list"`
|
|
|
|
|
|
|
|
|
|
// The name (label) of the object or scene.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
Name *string `type:"string"`
|
2019-01-21 14:27:20 +00:00
|
|
|
|
|
|
|
|
|
// The parent labels for a label. The response includes all ancestor labels.
|
|
|
|
|
Parents []*Parent `type:"list"`
|
2017-12-08 12:03:10 +00:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s Label) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s Label) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetConfidence sets the Confidence field's value.
|
|
|
|
|
func (s *Label) SetConfidence(v float64) *Label {
|
|
|
|
|
s.Confidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// SetInstances sets the Instances field's value.
|
|
|
|
|
func (s *Label) SetInstances(v []*Instance) *Label {
|
|
|
|
|
s.Instances = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// SetName sets the Name field's value.
|
|
|
|
|
func (s *Label) SetName(v string) *Label {
|
|
|
|
|
s.Name = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// SetParents sets the Parents field's value.
|
|
|
|
|
func (s *Label) SetParents(v []*Parent) *Label {
|
|
|
|
|
s.Parents = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// Information about a label detected in a video analysis request and the time
|
|
|
|
|
// the label was detected in the video.
|
|
|
|
|
type LabelDetection struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Details about the detected label.
|
|
|
|
|
Label *Label `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Time, in milliseconds from the start of the video, that the label was detected.
|
|
|
|
|
Timestamp *int64 `type:"long"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s LabelDetection) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s LabelDetection) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetLabel sets the Label field's value.
|
|
|
|
|
func (s *LabelDetection) SetLabel(v *Label) *LabelDetection {
|
|
|
|
|
s.Label = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetTimestamp sets the Timestamp field's value.
|
|
|
|
|
func (s *LabelDetection) SetTimestamp(v int64) *LabelDetection {
|
|
|
|
|
s.Timestamp = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Indicates the location of the landmark on the face.
|
|
|
|
|
type Landmark struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Type of landmark.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
Type *string `type:"string" enum:"LandmarkType"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The x-coordinate from the top left of the landmark expressed as the ratio
|
|
|
|
|
// of the width of the image. For example, if the image is 700 x 200 and the
|
|
|
|
|
// x-coordinate of the landmark is at 350 pixels, this value is 0.5.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
X *float64 `type:"float"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The y-coordinate from the top left of the landmark expressed as the ratio
|
|
|
|
|
// of the height of the image. For example, if the image is 700 x 200 and the
|
|
|
|
|
// y-coordinate of the landmark is at 100 pixels, this value is 0.5.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
Y *float64 `type:"float"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s Landmark) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s Landmark) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetType sets the Type field's value.
|
|
|
|
|
func (s *Landmark) SetType(v string) *Landmark {
|
|
|
|
|
s.Type = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetX sets the X field's value.
|
|
|
|
|
func (s *Landmark) SetX(v float64) *Landmark {
|
|
|
|
|
s.X = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetY sets the Y field's value.
|
|
|
|
|
func (s *Landmark) SetY(v float64) *Landmark {
|
|
|
|
|
s.Y = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type ListCollectionsInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Maximum number of collection IDs to return.
|
|
|
|
|
MaxResults *int64 `type:"integer"`
|
|
|
|
|
|
|
|
|
|
// Pagination token from the previous response.
|
|
|
|
|
NextToken *string `type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s ListCollectionsInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s ListCollectionsInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetMaxResults sets the MaxResults field's value.
|
|
|
|
|
func (s *ListCollectionsInput) SetMaxResults(v int64) *ListCollectionsInput {
|
|
|
|
|
s.MaxResults = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNextToken sets the NextToken field's value.
|
|
|
|
|
func (s *ListCollectionsInput) SetNextToken(v string) *ListCollectionsInput {
|
|
|
|
|
s.NextToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type ListCollectionsOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// An array of collection IDs.
|
|
|
|
|
CollectionIds []*string `type:"list"`
|
|
|
|
|
|
|
|
|
|
// Version numbers of the face detection models associated with the collections
|
|
|
|
|
// in the array CollectionIds. For example, the value of FaceModelVersions[2]
|
|
|
|
|
// is the version number for the face detection model used by the collection
|
|
|
|
|
// in CollectionId[2].
|
|
|
|
|
FaceModelVersions []*string `type:"list"`
|
|
|
|
|
|
|
|
|
|
// If the result is truncated, the response provides a NextToken that you can
|
|
|
|
|
// use in the subsequent request to fetch the next set of collection IDs.
|
|
|
|
|
NextToken *string `type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s ListCollectionsOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s ListCollectionsOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetCollectionIds sets the CollectionIds field's value.
|
|
|
|
|
func (s *ListCollectionsOutput) SetCollectionIds(v []*string) *ListCollectionsOutput {
|
|
|
|
|
s.CollectionIds = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceModelVersions sets the FaceModelVersions field's value.
|
|
|
|
|
func (s *ListCollectionsOutput) SetFaceModelVersions(v []*string) *ListCollectionsOutput {
|
|
|
|
|
s.FaceModelVersions = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNextToken sets the NextToken field's value.
|
|
|
|
|
func (s *ListCollectionsOutput) SetNextToken(v string) *ListCollectionsOutput {
|
|
|
|
|
s.NextToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type ListFacesInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// ID of the collection from which to list the faces.
|
|
|
|
|
//
|
|
|
|
|
// CollectionId is a required field
|
|
|
|
|
CollectionId *string `min:"1" type:"string" required:"true"`
|
|
|
|
|
|
|
|
|
|
// Maximum number of faces to return.
|
|
|
|
|
MaxResults *int64 `type:"integer"`
|
|
|
|
|
|
|
|
|
|
// If the previous response was incomplete (because there is more data to retrieve),
|
|
|
|
|
// Amazon Rekognition returns a pagination token in the response. You can use
|
|
|
|
|
// this pagination token to retrieve the next set of faces.
|
|
|
|
|
NextToken *string `type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s ListFacesInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s ListFacesInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *ListFacesInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "ListFacesInput"}
|
|
|
|
|
if s.CollectionId == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("CollectionId"))
|
|
|
|
|
}
|
|
|
|
|
if s.CollectionId != nil && len(*s.CollectionId) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetCollectionId sets the CollectionId field's value.
|
|
|
|
|
func (s *ListFacesInput) SetCollectionId(v string) *ListFacesInput {
|
|
|
|
|
s.CollectionId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetMaxResults sets the MaxResults field's value.
|
|
|
|
|
func (s *ListFacesInput) SetMaxResults(v int64) *ListFacesInput {
|
|
|
|
|
s.MaxResults = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNextToken sets the NextToken field's value.
|
|
|
|
|
func (s *ListFacesInput) SetNextToken(v string) *ListFacesInput {
|
|
|
|
|
s.NextToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type ListFacesOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Version number of the face detection model associated with the input collection
|
|
|
|
|
// (CollectionId).
|
|
|
|
|
FaceModelVersion *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// An array of Face objects.
|
|
|
|
|
Faces []*Face `type:"list"`
|
|
|
|
|
|
|
|
|
|
// If the response is truncated, Amazon Rekognition returns this token that
|
|
|
|
|
// you can use in the subsequent request to retrieve the next set of faces.
|
|
|
|
|
NextToken *string `type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s ListFacesOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s ListFacesOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceModelVersion sets the FaceModelVersion field's value.
|
|
|
|
|
func (s *ListFacesOutput) SetFaceModelVersion(v string) *ListFacesOutput {
|
|
|
|
|
s.FaceModelVersion = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaces sets the Faces field's value.
|
|
|
|
|
func (s *ListFacesOutput) SetFaces(v []*Face) *ListFacesOutput {
|
|
|
|
|
s.Faces = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNextToken sets the NextToken field's value.
|
|
|
|
|
func (s *ListFacesOutput) SetNextToken(v string) *ListFacesOutput {
|
|
|
|
|
s.NextToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type ListStreamProcessorsInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Maximum number of stream processors you want Amazon Rekognition Video to
|
|
|
|
|
// return in the response. The default is 1000.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
MaxResults *int64 `min:"1" type:"integer"`
|
|
|
|
|
|
|
|
|
|
// If the previous response was incomplete (because there are more stream processors
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// to retrieve), Amazon Rekognition Video returns a pagination token in the
|
|
|
|
|
// response. You can use this pagination token to retrieve the next set of stream
|
|
|
|
|
// processors.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
NextToken *string `type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s ListStreamProcessorsInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s ListStreamProcessorsInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *ListStreamProcessorsInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "ListStreamProcessorsInput"}
|
|
|
|
|
if s.MaxResults != nil && *s.MaxResults < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 1))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetMaxResults sets the MaxResults field's value.
|
|
|
|
|
func (s *ListStreamProcessorsInput) SetMaxResults(v int64) *ListStreamProcessorsInput {
|
|
|
|
|
s.MaxResults = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNextToken sets the NextToken field's value.
|
|
|
|
|
func (s *ListStreamProcessorsInput) SetNextToken(v string) *ListStreamProcessorsInput {
|
|
|
|
|
s.NextToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type ListStreamProcessorsOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// If the response is truncated, Amazon Rekognition Video returns this token
|
|
|
|
|
// that you can use in the subsequent request to retrieve the next set of stream
|
|
|
|
|
// processors.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
NextToken *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// List of stream processors that you have created.
|
|
|
|
|
StreamProcessors []*StreamProcessor `type:"list"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s ListStreamProcessorsOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s ListStreamProcessorsOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNextToken sets the NextToken field's value.
|
|
|
|
|
func (s *ListStreamProcessorsOutput) SetNextToken(v string) *ListStreamProcessorsOutput {
|
|
|
|
|
s.NextToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetStreamProcessors sets the StreamProcessors field's value.
|
|
|
|
|
func (s *ListStreamProcessorsOutput) SetStreamProcessors(v []*StreamProcessor) *ListStreamProcessorsOutput {
|
|
|
|
|
s.StreamProcessors = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Provides information about a single type of moderated content found in an
|
|
|
|
|
// image or video. Each type of moderated content has a label within a hierarchical
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// taxonomy. For more information, see Detecting Unsafe Content in the Amazon
|
|
|
|
|
// Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type ModerationLabel struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Specifies the confidence that Amazon Rekognition has that the label has been
|
|
|
|
|
// correctly identified.
|
|
|
|
|
//
|
|
|
|
|
// If you don't specify the MinConfidence parameter in the call to DetectModerationLabels,
|
|
|
|
|
// the operation returns labels with a confidence value greater than or equal
|
|
|
|
|
// to 50 percent.
|
|
|
|
|
Confidence *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// The label name for the type of content detected in the image.
|
|
|
|
|
Name *string `type:"string"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The name for the parent label. Labels at the top level of the hierarchy have
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// the parent label "".
|
|
|
|
|
ParentName *string `type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s ModerationLabel) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s ModerationLabel) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetConfidence sets the Confidence field's value.
|
|
|
|
|
func (s *ModerationLabel) SetConfidence(v float64) *ModerationLabel {
|
|
|
|
|
s.Confidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetName sets the Name field's value.
|
|
|
|
|
func (s *ModerationLabel) SetName(v string) *ModerationLabel {
|
|
|
|
|
s.Name = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetParentName sets the ParentName field's value.
|
|
|
|
|
func (s *ModerationLabel) SetParentName(v string) *ModerationLabel {
|
|
|
|
|
s.ParentName = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Indicates whether or not the mouth on the face is open, and the confidence
|
|
|
|
|
// level in the determination.
|
|
|
|
|
type MouthOpen struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Level of confidence in the determination.
|
|
|
|
|
Confidence *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// Boolean value that indicates whether the mouth on the face is open or not.
|
|
|
|
|
Value *bool `type:"boolean"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s MouthOpen) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s MouthOpen) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetConfidence sets the Confidence field's value.
|
|
|
|
|
func (s *MouthOpen) SetConfidence(v float64) *MouthOpen {
|
|
|
|
|
s.Confidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetValue sets the Value field's value.
|
|
|
|
|
func (s *MouthOpen) SetValue(v bool) *MouthOpen {
|
|
|
|
|
s.Value = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Indicates whether or not the face has a mustache, and the confidence level
|
|
|
|
|
// in the determination.
|
|
|
|
|
type Mustache struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Level of confidence in the determination.
|
|
|
|
|
Confidence *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// Boolean value that indicates whether the face has mustache or not.
|
|
|
|
|
Value *bool `type:"boolean"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s Mustache) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s Mustache) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetConfidence sets the Confidence field's value.
|
|
|
|
|
func (s *Mustache) SetConfidence(v float64) *Mustache {
|
|
|
|
|
s.Confidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetValue sets the Value field's value.
|
|
|
|
|
func (s *Mustache) SetValue(v bool) *Mustache {
|
|
|
|
|
s.Value = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// The Amazon Simple Notification Service topic to which Amazon Rekognition
|
|
|
|
|
// publishes the completion status of a video analysis operation. For more information,
|
|
|
|
|
// see api-video.
|
|
|
|
|
type NotificationChannel struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The ARN of an IAM role that gives Amazon Rekognition publishing permissions
|
|
|
|
|
// to the Amazon SNS topic.
|
|
|
|
|
//
|
|
|
|
|
// RoleArn is a required field
|
|
|
|
|
RoleArn *string `type:"string" required:"true"`
|
|
|
|
|
|
|
|
|
|
// The Amazon SNS topic to which Amazon Rekognition to posts the completion
|
|
|
|
|
// status.
|
|
|
|
|
//
|
|
|
|
|
// SNSTopicArn is a required field
|
|
|
|
|
SNSTopicArn *string `type:"string" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s NotificationChannel) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s NotificationChannel) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *NotificationChannel) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "NotificationChannel"}
|
|
|
|
|
if s.RoleArn == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("RoleArn"))
|
|
|
|
|
}
|
|
|
|
|
if s.SNSTopicArn == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("SNSTopicArn"))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetRoleArn sets the RoleArn field's value.
|
|
|
|
|
func (s *NotificationChannel) SetRoleArn(v string) *NotificationChannel {
|
|
|
|
|
s.RoleArn = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetSNSTopicArn sets the SNSTopicArn field's value.
|
|
|
|
|
func (s *NotificationChannel) SetSNSTopicArn(v string) *NotificationChannel {
|
|
|
|
|
s.SNSTopicArn = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// A parent label for a label. A label can have 0, 1, or more parents.
|
|
|
|
|
type Parent struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The name of the parent label.
|
|
|
|
|
Name *string `type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s Parent) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s Parent) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetName sets the Name field's value.
|
|
|
|
|
func (s *Parent) SetName(v string) *Parent {
|
|
|
|
|
s.Name = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// Details about a person detected in a video analysis request.
|
|
|
|
|
type PersonDetail struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Bounding box around the detected person.
|
|
|
|
|
BoundingBox *BoundingBox `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Face details for the detected person.
|
|
|
|
|
Face *FaceDetail `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Identifier for the person detected person within a video. Use to keep track
|
|
|
|
|
// of the person throughout the video. The identifier is not stored by Amazon
|
|
|
|
|
// Rekognition.
|
|
|
|
|
Index *int64 `type:"long"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s PersonDetail) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s PersonDetail) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetBoundingBox sets the BoundingBox field's value.
|
|
|
|
|
func (s *PersonDetail) SetBoundingBox(v *BoundingBox) *PersonDetail {
|
|
|
|
|
s.BoundingBox = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFace sets the Face field's value.
|
|
|
|
|
func (s *PersonDetail) SetFace(v *FaceDetail) *PersonDetail {
|
|
|
|
|
s.Face = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetIndex sets the Index field's value.
|
|
|
|
|
func (s *PersonDetail) SetIndex(v int64) *PersonDetail {
|
|
|
|
|
s.Index = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Details and path tracking information for a single time a person's path is
|
|
|
|
|
// tracked in a video. Amazon Rekognition operations that track people's paths
|
|
|
|
|
// return an array of PersonDetection objects with elements for each time a
|
|
|
|
|
// person's path is tracked in a video.
|
|
|
|
|
//
|
|
|
|
|
// For more information, see GetPersonTracking in the Amazon Rekognition Developer
|
|
|
|
|
// Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type PersonDetection struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Details about a person whose path was tracked in a video.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
Person *PersonDetail `type:"structure"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The time, in milliseconds from the start of the video, that the person's
|
|
|
|
|
// path was tracked.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
Timestamp *int64 `type:"long"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s PersonDetection) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s PersonDetection) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetPerson sets the Person field's value.
|
|
|
|
|
func (s *PersonDetection) SetPerson(v *PersonDetail) *PersonDetection {
|
|
|
|
|
s.Person = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetTimestamp sets the Timestamp field's value.
|
|
|
|
|
func (s *PersonDetection) SetTimestamp(v int64) *PersonDetection {
|
|
|
|
|
s.Timestamp = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Information about a person whose face matches a face(s) in an Amazon Rekognition
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// collection. Includes information about the faces in the Amazon Rekognition
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// collection (FaceMatch), information about the person (PersonDetail), and
|
|
|
|
|
// the time stamp for when the person was detected in a video. An array of PersonMatch
|
|
|
|
|
// objects is returned by GetFaceSearch.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type PersonMatch struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Information about the faces in the input collection that match the face of
|
|
|
|
|
// a person in the video.
|
|
|
|
|
FaceMatches []*FaceMatch `type:"list"`
|
|
|
|
|
|
|
|
|
|
// Information about the matched person.
|
|
|
|
|
Person *PersonDetail `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The time, in milliseconds from the beginning of the video, that the person
|
|
|
|
|
// was matched in the video.
|
|
|
|
|
Timestamp *int64 `type:"long"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s PersonMatch) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s PersonMatch) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceMatches sets the FaceMatches field's value.
|
|
|
|
|
func (s *PersonMatch) SetFaceMatches(v []*FaceMatch) *PersonMatch {
|
|
|
|
|
s.FaceMatches = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetPerson sets the Person field's value.
|
|
|
|
|
func (s *PersonMatch) SetPerson(v *PersonDetail) *PersonMatch {
|
|
|
|
|
s.Person = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetTimestamp sets the Timestamp field's value.
|
|
|
|
|
func (s *PersonMatch) SetTimestamp(v int64) *PersonMatch {
|
|
|
|
|
s.Timestamp = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// The X and Y coordinates of a point on an image. The X and Y values returned
|
|
|
|
|
// are ratios of the overall image size. For example, if the input image is
|
|
|
|
|
// 700x200 and the operation returns X=0.5 and Y=0.25, then the point is at
|
|
|
|
|
// the (350,50) pixel coordinate on the image.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// An array of Point objects, Polygon, is returned by DetectText. Polygon represents
|
|
|
|
|
// a fine-grained polygon around detected text. For more information, see Geometry
|
|
|
|
|
// in the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type Point struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The value of the X coordinate for a point on a Polygon.
|
|
|
|
|
X *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// The value of the Y coordinate for a point on a Polygon.
|
|
|
|
|
Y *float64 `type:"float"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s Point) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s Point) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetX sets the X field's value.
|
|
|
|
|
func (s *Point) SetX(v float64) *Point {
|
|
|
|
|
s.X = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetY sets the Y field's value.
|
|
|
|
|
func (s *Point) SetY(v float64) *Point {
|
|
|
|
|
s.Y = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Indicates the pose of the face as determined by its pitch, roll, and yaw.
|
|
|
|
|
type Pose struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Value representing the face rotation on the pitch axis.
|
|
|
|
|
Pitch *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// Value representing the face rotation on the roll axis.
|
|
|
|
|
Roll *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// Value representing the face rotation on the yaw axis.
|
|
|
|
|
Yaw *float64 `type:"float"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s Pose) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s Pose) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetPitch sets the Pitch field's value.
|
|
|
|
|
func (s *Pose) SetPitch(v float64) *Pose {
|
|
|
|
|
s.Pitch = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetRoll sets the Roll field's value.
|
|
|
|
|
func (s *Pose) SetRoll(v float64) *Pose {
|
|
|
|
|
s.Roll = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetYaw sets the Yaw field's value.
|
|
|
|
|
func (s *Pose) SetYaw(v float64) *Pose {
|
|
|
|
|
s.Yaw = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type RecognizeCelebritiesInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The input image as base64-encoded bytes or an S3 object. If you use the AWS
|
|
|
|
|
// CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
|
|
|
|
|
// is not supported.
|
|
|
|
|
//
|
|
|
|
|
// Image is a required field
|
|
|
|
|
Image *Image `type:"structure" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s RecognizeCelebritiesInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s RecognizeCelebritiesInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *RecognizeCelebritiesInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "RecognizeCelebritiesInput"}
|
|
|
|
|
if s.Image == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Image"))
|
|
|
|
|
}
|
|
|
|
|
if s.Image != nil {
|
|
|
|
|
if err := s.Image.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetImage sets the Image field's value.
|
|
|
|
|
func (s *RecognizeCelebritiesInput) SetImage(v *Image) *RecognizeCelebritiesInput {
|
|
|
|
|
s.Image = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type RecognizeCelebritiesOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Details about each celebrity found in the image. Amazon Rekognition can detect
|
|
|
|
|
// a maximum of 15 celebrities in an image.
|
|
|
|
|
CelebrityFaces []*Celebrity `type:"list"`
|
|
|
|
|
|
|
|
|
|
// The orientation of the input image (counterclockwise direction). If your
|
|
|
|
|
// application displays the image, you can use this value to correct the orientation.
|
|
|
|
|
// The bounding box coordinates returned in CelebrityFaces and UnrecognizedFaces
|
|
|
|
|
// represent face locations before the image orientation is corrected.
|
|
|
|
|
//
|
|
|
|
|
// If the input image is in .jpeg format, it might contain exchangeable image
|
|
|
|
|
// (Exif) metadata that includes the image's orientation. If so, and the Exif
|
|
|
|
|
// metadata for the input image populates the orientation field, the value of
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// OrientationCorrection is null. The CelebrityFaces and UnrecognizedFaces bounding
|
|
|
|
|
// box coordinates represent face locations after Exif metadata is used to correct
|
|
|
|
|
// the image orientation. Images in .png format don't contain Exif metadata.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
OrientationCorrection *string `type:"string" enum:"OrientationCorrection"`
|
|
|
|
|
|
|
|
|
|
// Details about each unrecognized face in the image.
|
|
|
|
|
UnrecognizedFaces []*ComparedFace `type:"list"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s RecognizeCelebritiesOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s RecognizeCelebritiesOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetCelebrityFaces sets the CelebrityFaces field's value.
|
|
|
|
|
func (s *RecognizeCelebritiesOutput) SetCelebrityFaces(v []*Celebrity) *RecognizeCelebritiesOutput {
|
|
|
|
|
s.CelebrityFaces = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetOrientationCorrection sets the OrientationCorrection field's value.
|
|
|
|
|
func (s *RecognizeCelebritiesOutput) SetOrientationCorrection(v string) *RecognizeCelebritiesOutput {
|
|
|
|
|
s.OrientationCorrection = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetUnrecognizedFaces sets the UnrecognizedFaces field's value.
|
|
|
|
|
func (s *RecognizeCelebritiesOutput) SetUnrecognizedFaces(v []*ComparedFace) *RecognizeCelebritiesOutput {
|
|
|
|
|
s.UnrecognizedFaces = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Provides the S3 bucket name and object name.
|
|
|
|
|
//
|
|
|
|
|
// The region for the S3 bucket containing the S3 object must match the region
|
|
|
|
|
// you use for Amazon Rekognition operations.
|
|
|
|
|
//
|
|
|
|
|
// For Amazon Rekognition to process an S3 object, the user must have permission
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// to access the S3 object. For more information, see Resource-Based Policies
|
|
|
|
|
// in the Amazon Rekognition Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type S3Object struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Name of the S3 bucket.
|
|
|
|
|
Bucket *string `min:"3" type:"string"`
|
|
|
|
|
|
|
|
|
|
// S3 object key name.
|
|
|
|
|
Name *string `min:"1" type:"string"`
|
|
|
|
|
|
|
|
|
|
// If the bucket is versioning enabled, you can specify the object version.
|
|
|
|
|
Version *string `min:"1" type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s S3Object) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s S3Object) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *S3Object) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "S3Object"}
|
|
|
|
|
if s.Bucket != nil && len(*s.Bucket) < 3 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("Bucket", 3))
|
|
|
|
|
}
|
|
|
|
|
if s.Name != nil && len(*s.Name) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("Name", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.Version != nil && len(*s.Version) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("Version", 1))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetBucket sets the Bucket field's value.
|
|
|
|
|
func (s *S3Object) SetBucket(v string) *S3Object {
|
|
|
|
|
s.Bucket = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetName sets the Name field's value.
|
|
|
|
|
func (s *S3Object) SetName(v string) *S3Object {
|
|
|
|
|
s.Name = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetVersion sets the Version field's value.
|
|
|
|
|
func (s *S3Object) SetVersion(v string) *S3Object {
|
|
|
|
|
s.Version = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type SearchFacesByImageInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// ID of the collection to search.
|
|
|
|
|
//
|
|
|
|
|
// CollectionId is a required field
|
|
|
|
|
CollectionId *string `min:"1" type:"string" required:"true"`
|
|
|
|
|
|
|
|
|
|
// (Optional) Specifies the minimum confidence in the face match to return.
|
|
|
|
|
// For example, don't return any matches where confidence in matches is less
|
|
|
|
|
// than 70%.
|
|
|
|
|
FaceMatchThreshold *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// The input image as base64-encoded bytes or an S3 object. If you use the AWS
|
|
|
|
|
// CLI to call Amazon Rekognition operations, passing base64-encoded image bytes
|
|
|
|
|
// is not supported.
|
|
|
|
|
//
|
|
|
|
|
// Image is a required field
|
|
|
|
|
Image *Image `type:"structure" required:"true"`
|
|
|
|
|
|
|
|
|
|
// Maximum number of faces to return. The operation returns the maximum number
|
|
|
|
|
// of faces with the highest confidence in the match.
|
|
|
|
|
MaxFaces *int64 `min:"1" type:"integer"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s SearchFacesByImageInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s SearchFacesByImageInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *SearchFacesByImageInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "SearchFacesByImageInput"}
|
|
|
|
|
if s.CollectionId == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("CollectionId"))
|
|
|
|
|
}
|
|
|
|
|
if s.CollectionId != nil && len(*s.CollectionId) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.Image == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Image"))
|
|
|
|
|
}
|
|
|
|
|
if s.MaxFaces != nil && *s.MaxFaces < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinValue("MaxFaces", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.Image != nil {
|
|
|
|
|
if err := s.Image.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("Image", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetCollectionId sets the CollectionId field's value.
|
|
|
|
|
func (s *SearchFacesByImageInput) SetCollectionId(v string) *SearchFacesByImageInput {
|
|
|
|
|
s.CollectionId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceMatchThreshold sets the FaceMatchThreshold field's value.
|
|
|
|
|
func (s *SearchFacesByImageInput) SetFaceMatchThreshold(v float64) *SearchFacesByImageInput {
|
|
|
|
|
s.FaceMatchThreshold = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetImage sets the Image field's value.
|
|
|
|
|
func (s *SearchFacesByImageInput) SetImage(v *Image) *SearchFacesByImageInput {
|
|
|
|
|
s.Image = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetMaxFaces sets the MaxFaces field's value.
|
|
|
|
|
func (s *SearchFacesByImageInput) SetMaxFaces(v int64) *SearchFacesByImageInput {
|
|
|
|
|
s.MaxFaces = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type SearchFacesByImageOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// An array of faces that match the input face, along with the confidence in
|
|
|
|
|
// the match.
|
|
|
|
|
FaceMatches []*FaceMatch `type:"list"`
|
|
|
|
|
|
|
|
|
|
// Version number of the face detection model associated with the input collection
|
|
|
|
|
// (CollectionId).
|
|
|
|
|
FaceModelVersion *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// The bounding box around the face in the input image that Amazon Rekognition
|
|
|
|
|
// used for the search.
|
|
|
|
|
SearchedFaceBoundingBox *BoundingBox `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The level of confidence that the searchedFaceBoundingBox, contains a face.
|
|
|
|
|
SearchedFaceConfidence *float64 `type:"float"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s SearchFacesByImageOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s SearchFacesByImageOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceMatches sets the FaceMatches field's value.
|
|
|
|
|
func (s *SearchFacesByImageOutput) SetFaceMatches(v []*FaceMatch) *SearchFacesByImageOutput {
|
|
|
|
|
s.FaceMatches = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceModelVersion sets the FaceModelVersion field's value.
|
|
|
|
|
func (s *SearchFacesByImageOutput) SetFaceModelVersion(v string) *SearchFacesByImageOutput {
|
|
|
|
|
s.FaceModelVersion = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetSearchedFaceBoundingBox sets the SearchedFaceBoundingBox field's value.
|
|
|
|
|
func (s *SearchFacesByImageOutput) SetSearchedFaceBoundingBox(v *BoundingBox) *SearchFacesByImageOutput {
|
|
|
|
|
s.SearchedFaceBoundingBox = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetSearchedFaceConfidence sets the SearchedFaceConfidence field's value.
|
|
|
|
|
func (s *SearchFacesByImageOutput) SetSearchedFaceConfidence(v float64) *SearchFacesByImageOutput {
|
|
|
|
|
s.SearchedFaceConfidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type SearchFacesInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// ID of the collection the face belongs to.
|
|
|
|
|
//
|
|
|
|
|
// CollectionId is a required field
|
|
|
|
|
CollectionId *string `min:"1" type:"string" required:"true"`
|
|
|
|
|
|
|
|
|
|
// ID of a face to find matches for in the collection.
|
|
|
|
|
//
|
|
|
|
|
// FaceId is a required field
|
|
|
|
|
FaceId *string `type:"string" required:"true"`
|
|
|
|
|
|
|
|
|
|
// Optional value specifying the minimum confidence in the face match to return.
|
|
|
|
|
// For example, don't return any matches where confidence in matches is less
|
|
|
|
|
// than 70%.
|
|
|
|
|
FaceMatchThreshold *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// Maximum number of faces to return. The operation returns the maximum number
|
|
|
|
|
// of faces with the highest confidence in the match.
|
|
|
|
|
MaxFaces *int64 `min:"1" type:"integer"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s SearchFacesInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s SearchFacesInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *SearchFacesInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "SearchFacesInput"}
|
|
|
|
|
if s.CollectionId == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("CollectionId"))
|
|
|
|
|
}
|
|
|
|
|
if s.CollectionId != nil && len(*s.CollectionId) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.FaceId == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("FaceId"))
|
|
|
|
|
}
|
|
|
|
|
if s.MaxFaces != nil && *s.MaxFaces < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinValue("MaxFaces", 1))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetCollectionId sets the CollectionId field's value.
|
|
|
|
|
func (s *SearchFacesInput) SetCollectionId(v string) *SearchFacesInput {
|
|
|
|
|
s.CollectionId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceId sets the FaceId field's value.
|
|
|
|
|
func (s *SearchFacesInput) SetFaceId(v string) *SearchFacesInput {
|
|
|
|
|
s.FaceId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceMatchThreshold sets the FaceMatchThreshold field's value.
|
|
|
|
|
func (s *SearchFacesInput) SetFaceMatchThreshold(v float64) *SearchFacesInput {
|
|
|
|
|
s.FaceMatchThreshold = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetMaxFaces sets the MaxFaces field's value.
|
|
|
|
|
func (s *SearchFacesInput) SetMaxFaces(v int64) *SearchFacesInput {
|
|
|
|
|
s.MaxFaces = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type SearchFacesOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// An array of faces that matched the input face, along with the confidence
|
|
|
|
|
// in the match.
|
|
|
|
|
FaceMatches []*FaceMatch `type:"list"`
|
|
|
|
|
|
|
|
|
|
// Version number of the face detection model associated with the input collection
|
|
|
|
|
// (CollectionId).
|
|
|
|
|
FaceModelVersion *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// ID of the face that was searched for matches in a collection.
|
|
|
|
|
SearchedFaceId *string `type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s SearchFacesOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s SearchFacesOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceMatches sets the FaceMatches field's value.
|
|
|
|
|
func (s *SearchFacesOutput) SetFaceMatches(v []*FaceMatch) *SearchFacesOutput {
|
|
|
|
|
s.FaceMatches = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceModelVersion sets the FaceModelVersion field's value.
|
|
|
|
|
func (s *SearchFacesOutput) SetFaceModelVersion(v string) *SearchFacesOutput {
|
|
|
|
|
s.FaceModelVersion = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetSearchedFaceId sets the SearchedFaceId field's value.
|
|
|
|
|
func (s *SearchFacesOutput) SetSearchedFaceId(v string) *SearchFacesOutput {
|
|
|
|
|
s.SearchedFaceId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Indicates whether or not the face is smiling, and the confidence level in
|
|
|
|
|
// the determination.
|
|
|
|
|
type Smile struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Level of confidence in the determination.
|
|
|
|
|
Confidence *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// Boolean value that indicates whether the face is smiling or not.
|
|
|
|
|
Value *bool `type:"boolean"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s Smile) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s Smile) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetConfidence sets the Confidence field's value.
|
|
|
|
|
func (s *Smile) SetConfidence(v float64) *Smile {
|
|
|
|
|
s.Confidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetValue sets the Value field's value.
|
|
|
|
|
func (s *Smile) SetValue(v bool) *Smile {
|
|
|
|
|
s.Value = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type StartCelebrityRecognitionInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Idempotent token used to identify the start request. If you use the same
|
|
|
|
|
// token with multiple StartCelebrityRecognition requests, the same JobId is
|
|
|
|
|
// returned. Use ClientRequestToken to prevent the same job from being accidently
|
|
|
|
|
// started more than once.
|
|
|
|
|
ClientRequestToken *string `min:"1" type:"string"`
|
|
|
|
|
|
|
|
|
|
// Unique identifier you specify to identify the job in the completion status
|
|
|
|
|
// published to the Amazon Simple Notification Service topic.
|
|
|
|
|
JobTag *string `min:"1" type:"string"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish
|
|
|
|
|
// the completion status of the celebrity recognition analysis to.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
NotificationChannel *NotificationChannel `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The video in which you want to recognize celebrities. The video must be stored
|
|
|
|
|
// in an Amazon S3 bucket.
|
|
|
|
|
//
|
|
|
|
|
// Video is a required field
|
|
|
|
|
Video *Video `type:"structure" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s StartCelebrityRecognitionInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s StartCelebrityRecognitionInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *StartCelebrityRecognitionInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "StartCelebrityRecognitionInput"}
|
|
|
|
|
if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.JobTag != nil && len(*s.JobTag) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.Video == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Video"))
|
|
|
|
|
}
|
|
|
|
|
if s.NotificationChannel != nil {
|
|
|
|
|
if err := s.NotificationChannel.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if s.Video != nil {
|
|
|
|
|
if err := s.Video.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetClientRequestToken sets the ClientRequestToken field's value.
|
|
|
|
|
func (s *StartCelebrityRecognitionInput) SetClientRequestToken(v string) *StartCelebrityRecognitionInput {
|
|
|
|
|
s.ClientRequestToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobTag sets the JobTag field's value.
|
|
|
|
|
func (s *StartCelebrityRecognitionInput) SetJobTag(v string) *StartCelebrityRecognitionInput {
|
|
|
|
|
s.JobTag = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNotificationChannel sets the NotificationChannel field's value.
|
|
|
|
|
func (s *StartCelebrityRecognitionInput) SetNotificationChannel(v *NotificationChannel) *StartCelebrityRecognitionInput {
|
|
|
|
|
s.NotificationChannel = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetVideo sets the Video field's value.
|
|
|
|
|
func (s *StartCelebrityRecognitionInput) SetVideo(v *Video) *StartCelebrityRecognitionInput {
|
|
|
|
|
s.Video = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type StartCelebrityRecognitionOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The identifier for the celebrity recognition analysis job. Use JobId to identify
|
|
|
|
|
// the job in a subsequent call to GetCelebrityRecognition.
|
|
|
|
|
JobId *string `min:"1" type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s StartCelebrityRecognitionOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s StartCelebrityRecognitionOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobId sets the JobId field's value.
|
|
|
|
|
func (s *StartCelebrityRecognitionOutput) SetJobId(v string) *StartCelebrityRecognitionOutput {
|
|
|
|
|
s.JobId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type StartContentModerationInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Idempotent token used to identify the start request. If you use the same
|
|
|
|
|
// token with multiple StartContentModeration requests, the same JobId is returned.
|
|
|
|
|
// Use ClientRequestToken to prevent the same job from being accidently started
|
|
|
|
|
// more than once.
|
|
|
|
|
ClientRequestToken *string `min:"1" type:"string"`
|
|
|
|
|
|
|
|
|
|
// Unique identifier you specify to identify the job in the completion status
|
|
|
|
|
// published to the Amazon Simple Notification Service topic.
|
|
|
|
|
JobTag *string `min:"1" type:"string"`
|
|
|
|
|
|
|
|
|
|
// Specifies the minimum confidence that Amazon Rekognition must have in order
|
|
|
|
|
// to return a moderated content label. Confidence represents how certain Amazon
|
|
|
|
|
// Rekognition is that the moderated content is correctly identified. 0 is the
|
|
|
|
|
// lowest confidence. 100 is the highest confidence. Amazon Rekognition doesn't
|
|
|
|
|
// return any moderated content labels with a confidence level lower than this
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// specified value. If you don't specify MinConfidence, GetContentModeration
|
|
|
|
|
// returns labels with confidence values greater than or equal to 50 percent.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
MinConfidence *float64 `type:"float"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The Amazon SNS topic ARN that you want Amazon Rekognition Video to publish
|
|
|
|
|
// the completion status of the content moderation analysis to.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
NotificationChannel *NotificationChannel `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The video in which you want to moderate content. The video must be stored
|
|
|
|
|
// in an Amazon S3 bucket.
|
|
|
|
|
//
|
|
|
|
|
// Video is a required field
|
|
|
|
|
Video *Video `type:"structure" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s StartContentModerationInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s StartContentModerationInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *StartContentModerationInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "StartContentModerationInput"}
|
|
|
|
|
if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.JobTag != nil && len(*s.JobTag) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.Video == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Video"))
|
|
|
|
|
}
|
|
|
|
|
if s.NotificationChannel != nil {
|
|
|
|
|
if err := s.NotificationChannel.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if s.Video != nil {
|
|
|
|
|
if err := s.Video.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetClientRequestToken sets the ClientRequestToken field's value.
|
|
|
|
|
func (s *StartContentModerationInput) SetClientRequestToken(v string) *StartContentModerationInput {
|
|
|
|
|
s.ClientRequestToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobTag sets the JobTag field's value.
|
|
|
|
|
func (s *StartContentModerationInput) SetJobTag(v string) *StartContentModerationInput {
|
|
|
|
|
s.JobTag = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetMinConfidence sets the MinConfidence field's value.
|
|
|
|
|
func (s *StartContentModerationInput) SetMinConfidence(v float64) *StartContentModerationInput {
|
|
|
|
|
s.MinConfidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNotificationChannel sets the NotificationChannel field's value.
|
|
|
|
|
func (s *StartContentModerationInput) SetNotificationChannel(v *NotificationChannel) *StartContentModerationInput {
|
|
|
|
|
s.NotificationChannel = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetVideo sets the Video field's value.
|
|
|
|
|
func (s *StartContentModerationInput) SetVideo(v *Video) *StartContentModerationInput {
|
|
|
|
|
s.Video = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type StartContentModerationOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The identifier for the content moderation analysis job. Use JobId to identify
|
|
|
|
|
// the job in a subsequent call to GetContentModeration.
|
|
|
|
|
JobId *string `min:"1" type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s StartContentModerationOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s StartContentModerationOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobId sets the JobId field's value.
|
|
|
|
|
func (s *StartContentModerationOutput) SetJobId(v string) *StartContentModerationOutput {
|
|
|
|
|
s.JobId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type StartFaceDetectionInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Idempotent token used to identify the start request. If you use the same
|
|
|
|
|
// token with multiple StartFaceDetection requests, the same JobId is returned.
|
|
|
|
|
// Use ClientRequestToken to prevent the same job from being accidently started
|
|
|
|
|
// more than once.
|
|
|
|
|
ClientRequestToken *string `min:"1" type:"string"`
|
|
|
|
|
|
|
|
|
|
// The face attributes you want returned.
|
|
|
|
|
//
|
|
|
|
|
// DEFAULT - The following subset of facial attributes are returned: BoundingBox,
|
|
|
|
|
// Confidence, Pose, Quality and Landmarks.
|
|
|
|
|
//
|
|
|
|
|
// ALL - All facial attributes are returned.
|
|
|
|
|
FaceAttributes *string `type:"string" enum:"FaceAttributes"`
|
|
|
|
|
|
|
|
|
|
// Unique identifier you specify to identify the job in the completion status
|
|
|
|
|
// published to the Amazon Simple Notification Service topic.
|
|
|
|
|
JobTag *string `min:"1" type:"string"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video
|
|
|
|
|
// to publish the completion status of the face detection operation.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
NotificationChannel *NotificationChannel `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The video in which you want to detect faces. The video must be stored in
|
|
|
|
|
// an Amazon S3 bucket.
|
|
|
|
|
//
|
|
|
|
|
// Video is a required field
|
|
|
|
|
Video *Video `type:"structure" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s StartFaceDetectionInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s StartFaceDetectionInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *StartFaceDetectionInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "StartFaceDetectionInput"}
|
|
|
|
|
if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.JobTag != nil && len(*s.JobTag) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.Video == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Video"))
|
|
|
|
|
}
|
|
|
|
|
if s.NotificationChannel != nil {
|
|
|
|
|
if err := s.NotificationChannel.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if s.Video != nil {
|
|
|
|
|
if err := s.Video.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetClientRequestToken sets the ClientRequestToken field's value.
|
|
|
|
|
func (s *StartFaceDetectionInput) SetClientRequestToken(v string) *StartFaceDetectionInput {
|
|
|
|
|
s.ClientRequestToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceAttributes sets the FaceAttributes field's value.
|
|
|
|
|
func (s *StartFaceDetectionInput) SetFaceAttributes(v string) *StartFaceDetectionInput {
|
|
|
|
|
s.FaceAttributes = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobTag sets the JobTag field's value.
|
|
|
|
|
func (s *StartFaceDetectionInput) SetJobTag(v string) *StartFaceDetectionInput {
|
|
|
|
|
s.JobTag = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNotificationChannel sets the NotificationChannel field's value.
|
|
|
|
|
func (s *StartFaceDetectionInput) SetNotificationChannel(v *NotificationChannel) *StartFaceDetectionInput {
|
|
|
|
|
s.NotificationChannel = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetVideo sets the Video field's value.
|
|
|
|
|
func (s *StartFaceDetectionInput) SetVideo(v *Video) *StartFaceDetectionInput {
|
|
|
|
|
s.Video = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type StartFaceDetectionOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The identifier for the face detection job. Use JobId to identify the job
|
|
|
|
|
// in a subsequent call to GetFaceDetection.
|
|
|
|
|
JobId *string `min:"1" type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s StartFaceDetectionOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s StartFaceDetectionOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobId sets the JobId field's value.
|
|
|
|
|
func (s *StartFaceDetectionOutput) SetJobId(v string) *StartFaceDetectionOutput {
|
|
|
|
|
s.JobId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type StartFaceSearchInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Idempotent token used to identify the start request. If you use the same
|
|
|
|
|
// token with multiple StartFaceSearch requests, the same JobId is returned.
|
|
|
|
|
// Use ClientRequestToken to prevent the same job from being accidently started
|
|
|
|
|
// more than once.
|
|
|
|
|
ClientRequestToken *string `min:"1" type:"string"`
|
|
|
|
|
|
|
|
|
|
// ID of the collection that contains the faces you want to search for.
|
|
|
|
|
//
|
|
|
|
|
// CollectionId is a required field
|
|
|
|
|
CollectionId *string `min:"1" type:"string" required:"true"`
|
|
|
|
|
|
|
|
|
|
// The minimum confidence in the person match to return. For example, don't
|
|
|
|
|
// return any matches where confidence in matches is less than 70%.
|
|
|
|
|
FaceMatchThreshold *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// Unique identifier you specify to identify the job in the completion status
|
|
|
|
|
// published to the Amazon Simple Notification Service topic.
|
|
|
|
|
JobTag *string `min:"1" type:"string"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The ARN of the Amazon SNS topic to which you want Amazon Rekognition Video
|
|
|
|
|
// to publish the completion status of the search.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
NotificationChannel *NotificationChannel `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The video you want to search. The video must be stored in an Amazon S3 bucket.
|
|
|
|
|
//
|
|
|
|
|
// Video is a required field
|
|
|
|
|
Video *Video `type:"structure" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s StartFaceSearchInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s StartFaceSearchInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *StartFaceSearchInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "StartFaceSearchInput"}
|
|
|
|
|
if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.CollectionId == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("CollectionId"))
|
|
|
|
|
}
|
|
|
|
|
if s.CollectionId != nil && len(*s.CollectionId) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.JobTag != nil && len(*s.JobTag) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.Video == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Video"))
|
|
|
|
|
}
|
|
|
|
|
if s.NotificationChannel != nil {
|
|
|
|
|
if err := s.NotificationChannel.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if s.Video != nil {
|
|
|
|
|
if err := s.Video.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetClientRequestToken sets the ClientRequestToken field's value.
|
|
|
|
|
func (s *StartFaceSearchInput) SetClientRequestToken(v string) *StartFaceSearchInput {
|
|
|
|
|
s.ClientRequestToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetCollectionId sets the CollectionId field's value.
|
|
|
|
|
func (s *StartFaceSearchInput) SetCollectionId(v string) *StartFaceSearchInput {
|
|
|
|
|
s.CollectionId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceMatchThreshold sets the FaceMatchThreshold field's value.
|
|
|
|
|
func (s *StartFaceSearchInput) SetFaceMatchThreshold(v float64) *StartFaceSearchInput {
|
|
|
|
|
s.FaceMatchThreshold = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobTag sets the JobTag field's value.
|
|
|
|
|
func (s *StartFaceSearchInput) SetJobTag(v string) *StartFaceSearchInput {
|
|
|
|
|
s.JobTag = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNotificationChannel sets the NotificationChannel field's value.
|
|
|
|
|
func (s *StartFaceSearchInput) SetNotificationChannel(v *NotificationChannel) *StartFaceSearchInput {
|
|
|
|
|
s.NotificationChannel = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetVideo sets the Video field's value.
|
|
|
|
|
func (s *StartFaceSearchInput) SetVideo(v *Video) *StartFaceSearchInput {
|
|
|
|
|
s.Video = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type StartFaceSearchOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The identifier for the search job. Use JobId to identify the job in a subsequent
|
|
|
|
|
// call to GetFaceSearch.
|
|
|
|
|
JobId *string `min:"1" type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s StartFaceSearchOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s StartFaceSearchOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobId sets the JobId field's value.
|
|
|
|
|
func (s *StartFaceSearchOutput) SetJobId(v string) *StartFaceSearchOutput {
|
|
|
|
|
s.JobId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type StartLabelDetectionInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Idempotent token used to identify the start request. If you use the same
|
|
|
|
|
// token with multiple StartLabelDetection requests, the same JobId is returned.
|
|
|
|
|
// Use ClientRequestToken to prevent the same job from being accidently started
|
|
|
|
|
// more than once.
|
|
|
|
|
ClientRequestToken *string `min:"1" type:"string"`
|
|
|
|
|
|
|
|
|
|
// Unique identifier you specify to identify the job in the completion status
|
|
|
|
|
// published to the Amazon Simple Notification Service topic.
|
|
|
|
|
JobTag *string `min:"1" type:"string"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Specifies the minimum confidence that Amazon Rekognition Video must have
|
|
|
|
|
// in order to return a detected label. Confidence represents how certain Amazon
|
|
|
|
|
// Rekognition is that a label is correctly identified.0 is the lowest confidence.
|
|
|
|
|
// 100 is the highest confidence. Amazon Rekognition Video doesn't return any
|
|
|
|
|
// labels with a confidence level lower than this specified value.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// If you don't specify MinConfidence, the operation returns labels with confidence
|
|
|
|
|
// values greater than or equal to 50 percent.
|
|
|
|
|
MinConfidence *float64 `type:"float"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The Amazon SNS topic ARN you want Amazon Rekognition Video to publish the
|
|
|
|
|
// completion status of the label detection operation to.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
NotificationChannel *NotificationChannel `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The video in which you want to detect labels. The video must be stored in
|
|
|
|
|
// an Amazon S3 bucket.
|
|
|
|
|
//
|
|
|
|
|
// Video is a required field
|
|
|
|
|
Video *Video `type:"structure" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s StartLabelDetectionInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s StartLabelDetectionInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *StartLabelDetectionInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "StartLabelDetectionInput"}
|
|
|
|
|
if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.JobTag != nil && len(*s.JobTag) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.Video == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Video"))
|
|
|
|
|
}
|
|
|
|
|
if s.NotificationChannel != nil {
|
|
|
|
|
if err := s.NotificationChannel.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if s.Video != nil {
|
|
|
|
|
if err := s.Video.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetClientRequestToken sets the ClientRequestToken field's value.
|
|
|
|
|
func (s *StartLabelDetectionInput) SetClientRequestToken(v string) *StartLabelDetectionInput {
|
|
|
|
|
s.ClientRequestToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobTag sets the JobTag field's value.
|
|
|
|
|
func (s *StartLabelDetectionInput) SetJobTag(v string) *StartLabelDetectionInput {
|
|
|
|
|
s.JobTag = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetMinConfidence sets the MinConfidence field's value.
|
|
|
|
|
func (s *StartLabelDetectionInput) SetMinConfidence(v float64) *StartLabelDetectionInput {
|
|
|
|
|
s.MinConfidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNotificationChannel sets the NotificationChannel field's value.
|
|
|
|
|
func (s *StartLabelDetectionInput) SetNotificationChannel(v *NotificationChannel) *StartLabelDetectionInput {
|
|
|
|
|
s.NotificationChannel = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetVideo sets the Video field's value.
|
|
|
|
|
func (s *StartLabelDetectionInput) SetVideo(v *Video) *StartLabelDetectionInput {
|
|
|
|
|
s.Video = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type StartLabelDetectionOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The identifier for the label detection job. Use JobId to identify the job
|
|
|
|
|
// in a subsequent call to GetLabelDetection.
|
|
|
|
|
JobId *string `min:"1" type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s StartLabelDetectionOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s StartLabelDetectionOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobId sets the JobId field's value.
|
|
|
|
|
func (s *StartLabelDetectionOutput) SetJobId(v string) *StartLabelDetectionOutput {
|
|
|
|
|
s.JobId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type StartPersonTrackingInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Idempotent token used to identify the start request. If you use the same
|
|
|
|
|
// token with multiple StartPersonTracking requests, the same JobId is returned.
|
|
|
|
|
// Use ClientRequestToken to prevent the same job from being accidently started
|
|
|
|
|
// more than once.
|
|
|
|
|
ClientRequestToken *string `min:"1" type:"string"`
|
|
|
|
|
|
|
|
|
|
// Unique identifier you specify to identify the job in the completion status
|
|
|
|
|
// published to the Amazon Simple Notification Service topic.
|
|
|
|
|
JobTag *string `min:"1" type:"string"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The Amazon SNS topic ARN you want Amazon Rekognition Video to publish the
|
|
|
|
|
// completion status of the people detection operation to.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
NotificationChannel *NotificationChannel `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The video in which you want to detect people. The video must be stored in
|
|
|
|
|
// an Amazon S3 bucket.
|
|
|
|
|
//
|
|
|
|
|
// Video is a required field
|
|
|
|
|
Video *Video `type:"structure" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s StartPersonTrackingInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s StartPersonTrackingInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *StartPersonTrackingInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "StartPersonTrackingInput"}
|
|
|
|
|
if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.JobTag != nil && len(*s.JobTag) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("JobTag", 1))
|
|
|
|
|
}
|
|
|
|
|
if s.Video == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Video"))
|
|
|
|
|
}
|
|
|
|
|
if s.NotificationChannel != nil {
|
|
|
|
|
if err := s.NotificationChannel.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("NotificationChannel", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if s.Video != nil {
|
|
|
|
|
if err := s.Video.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("Video", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetClientRequestToken sets the ClientRequestToken field's value.
|
|
|
|
|
func (s *StartPersonTrackingInput) SetClientRequestToken(v string) *StartPersonTrackingInput {
|
|
|
|
|
s.ClientRequestToken = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobTag sets the JobTag field's value.
|
|
|
|
|
func (s *StartPersonTrackingInput) SetJobTag(v string) *StartPersonTrackingInput {
|
|
|
|
|
s.JobTag = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetNotificationChannel sets the NotificationChannel field's value.
|
|
|
|
|
func (s *StartPersonTrackingInput) SetNotificationChannel(v *NotificationChannel) *StartPersonTrackingInput {
|
|
|
|
|
s.NotificationChannel = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetVideo sets the Video field's value.
|
|
|
|
|
func (s *StartPersonTrackingInput) SetVideo(v *Video) *StartPersonTrackingInput {
|
|
|
|
|
s.Video = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type StartPersonTrackingOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The identifier for the person detection job. Use JobId to identify the job
|
|
|
|
|
// in a subsequent call to GetPersonTracking.
|
|
|
|
|
JobId *string `min:"1" type:"string"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s StartPersonTrackingOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s StartPersonTrackingOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetJobId sets the JobId field's value.
|
|
|
|
|
func (s *StartPersonTrackingOutput) SetJobId(v string) *StartPersonTrackingOutput {
|
|
|
|
|
s.JobId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type StartStreamProcessorInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The name of the stream processor to start processing.
|
|
|
|
|
//
|
|
|
|
|
// Name is a required field
|
|
|
|
|
Name *string `min:"1" type:"string" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s StartStreamProcessorInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s StartStreamProcessorInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *StartStreamProcessorInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "StartStreamProcessorInput"}
|
|
|
|
|
if s.Name == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Name"))
|
|
|
|
|
}
|
|
|
|
|
if s.Name != nil && len(*s.Name) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("Name", 1))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetName sets the Name field's value.
|
|
|
|
|
func (s *StartStreamProcessorInput) SetName(v string) *StartStreamProcessorInput {
|
|
|
|
|
s.Name = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type StartStreamProcessorOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s StartStreamProcessorOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s StartStreamProcessorOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type StopStreamProcessorInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// The name of a stream processor created by CreateStreamProcessor.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// Name is a required field
|
|
|
|
|
Name *string `min:"1" type:"string" required:"true"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s StopStreamProcessorInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s StopStreamProcessorInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *StopStreamProcessorInput) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "StopStreamProcessorInput"}
|
|
|
|
|
if s.Name == nil {
|
|
|
|
|
invalidParams.Add(request.NewErrParamRequired("Name"))
|
|
|
|
|
}
|
|
|
|
|
if s.Name != nil && len(*s.Name) < 1 {
|
|
|
|
|
invalidParams.Add(request.NewErrParamMinLen("Name", 1))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetName sets the Name field's value.
|
|
|
|
|
func (s *StopStreamProcessorInput) SetName(v string) *StopStreamProcessorInput {
|
|
|
|
|
s.Name = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
type StopStreamProcessorOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s StopStreamProcessorOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s StopStreamProcessorOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// An object that recognizes faces in a streaming video. An Amazon Rekognition
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// stream processor is created by a call to CreateStreamProcessor. The request
|
|
|
|
|
// parameters for CreateStreamProcessor describe the Kinesis video stream source
|
|
|
|
|
// for the streaming video, face recognition parameters, and where to stream
|
|
|
|
|
// the analysis resullts.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type StreamProcessor struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Name of the Amazon Rekognition stream processor.
|
|
|
|
|
Name *string `min:"1" type:"string"`
|
|
|
|
|
|
|
|
|
|
// Current status of the Amazon Rekognition stream processor.
|
|
|
|
|
Status *string `type:"string" enum:"StreamProcessorStatus"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s StreamProcessor) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s StreamProcessor) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetName sets the Name field's value.
|
|
|
|
|
func (s *StreamProcessor) SetName(v string) *StreamProcessor {
|
|
|
|
|
s.Name = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetStatus sets the Status field's value.
|
|
|
|
|
func (s *StreamProcessor) SetStatus(v string) *StreamProcessor {
|
|
|
|
|
s.Status = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Information about the source streaming video.
|
|
|
|
|
type StreamProcessorInput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The Kinesis video stream input stream for the source streaming video.
|
|
|
|
|
KinesisVideoStream *KinesisVideoStream `type:"structure"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s StreamProcessorInput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s StreamProcessorInput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetKinesisVideoStream sets the KinesisVideoStream field's value.
|
|
|
|
|
func (s *StreamProcessorInput) SetKinesisVideoStream(v *KinesisVideoStream) *StreamProcessorInput {
|
|
|
|
|
s.KinesisVideoStream = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Information about the Amazon Kinesis Data Streams stream to which a Amazon
|
|
|
|
|
// Rekognition Video stream processor streams the results of a video analysis.
|
|
|
|
|
// For more information, see CreateStreamProcessor in the Amazon Rekognition
|
|
|
|
|
// Developer Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type StreamProcessorOutput struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The Amazon Kinesis Data Streams stream to which the Amazon Rekognition stream
|
|
|
|
|
// processor streams the analysis results.
|
|
|
|
|
KinesisDataStream *KinesisDataStream `type:"structure"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s StreamProcessorOutput) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s StreamProcessorOutput) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetKinesisDataStream sets the KinesisDataStream field's value.
|
|
|
|
|
func (s *StreamProcessorOutput) SetKinesisDataStream(v *KinesisDataStream) *StreamProcessorOutput {
|
|
|
|
|
s.KinesisDataStream = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Input parameters used to recognize faces in a streaming video analyzed by
|
|
|
|
|
// a Amazon Rekognition stream processor.
|
|
|
|
|
type StreamProcessorSettings struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Face search settings to use on a streaming video.
|
|
|
|
|
FaceSearch *FaceSearchSettings `type:"structure"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s StreamProcessorSettings) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s StreamProcessorSettings) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *StreamProcessorSettings) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "StreamProcessorSettings"}
|
|
|
|
|
if s.FaceSearch != nil {
|
|
|
|
|
if err := s.FaceSearch.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("FaceSearch", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceSearch sets the FaceSearch field's value.
|
|
|
|
|
func (s *StreamProcessorSettings) SetFaceSearch(v *FaceSearchSettings) *StreamProcessorSettings {
|
|
|
|
|
s.FaceSearch = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Indicates whether or not the face is wearing sunglasses, and the confidence
|
|
|
|
|
// level in the determination.
|
|
|
|
|
type Sunglasses struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Level of confidence in the determination.
|
|
|
|
|
Confidence *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// Boolean value that indicates whether the face is wearing sunglasses or not.
|
|
|
|
|
Value *bool `type:"boolean"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s Sunglasses) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s Sunglasses) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetConfidence sets the Confidence field's value.
|
|
|
|
|
func (s *Sunglasses) SetConfidence(v float64) *Sunglasses {
|
|
|
|
|
s.Confidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetValue sets the Value field's value.
|
|
|
|
|
func (s *Sunglasses) SetValue(v bool) *Sunglasses {
|
|
|
|
|
s.Value = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// Information about a word or line of text detected by DetectText.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
//
|
|
|
|
|
// The DetectedText field contains the text that Amazon Rekognition detected
|
|
|
|
|
// in the image.
|
|
|
|
|
//
|
|
|
|
|
// Every word and line has an identifier (Id). Each word belongs to a line and
|
|
|
|
|
// has a parent identifier (ParentId) that identifies the line of text in which
|
|
|
|
|
// the word appears. The word Id is also an index for the word within a line
|
|
|
|
|
// of words.
|
|
|
|
|
//
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// For more information, see Detecting Text in the Amazon Rekognition Developer
|
|
|
|
|
// Guide.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type TextDetection struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The confidence that Amazon Rekognition has in the accuracy of the detected
|
|
|
|
|
// text and the accuracy of the geometry points around the detected text.
|
|
|
|
|
Confidence *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// The word or line of text recognized by Amazon Rekognition.
|
|
|
|
|
DetectedText *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// The location of the detected text on the image. Includes an axis aligned
|
|
|
|
|
// coarse bounding box surrounding the text and a finer grain polygon for more
|
|
|
|
|
// accurate spatial information.
|
|
|
|
|
Geometry *Geometry `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The identifier for the detected text. The identifier is only unique for a
|
|
|
|
|
// single call to DetectText.
|
|
|
|
|
Id *int64 `type:"integer"`
|
|
|
|
|
|
|
|
|
|
// The Parent identifier for the detected text identified by the value of ID.
|
|
|
|
|
// If the type of detected text is LINE, the value of ParentId is Null.
|
|
|
|
|
ParentId *int64 `type:"integer"`
|
|
|
|
|
|
|
|
|
|
// The type of text that was detected.
|
|
|
|
|
Type *string `type:"string" enum:"TextTypes"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s TextDetection) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s TextDetection) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetConfidence sets the Confidence field's value.
|
|
|
|
|
func (s *TextDetection) SetConfidence(v float64) *TextDetection {
|
|
|
|
|
s.Confidence = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetDetectedText sets the DetectedText field's value.
|
|
|
|
|
func (s *TextDetection) SetDetectedText(v string) *TextDetection {
|
|
|
|
|
s.DetectedText = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetGeometry sets the Geometry field's value.
|
|
|
|
|
func (s *TextDetection) SetGeometry(v *Geometry) *TextDetection {
|
|
|
|
|
s.Geometry = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetId sets the Id field's value.
|
|
|
|
|
func (s *TextDetection) SetId(v int64) *TextDetection {
|
|
|
|
|
s.Id = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetParentId sets the ParentId field's value.
|
|
|
|
|
func (s *TextDetection) SetParentId(v int64) *TextDetection {
|
|
|
|
|
s.ParentId = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetType sets the Type field's value.
|
|
|
|
|
func (s *TextDetection) SetType(v string) *TextDetection {
|
|
|
|
|
s.Type = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// A face that IndexFaces detected, but didn't index. Use the Reasons response
|
|
|
|
|
// attribute to determine why a face wasn't indexed.
|
|
|
|
|
type UnindexedFace struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The structure that contains attributes of a face that IndexFacesdetected,
|
|
|
|
|
// but didn't index.
|
|
|
|
|
FaceDetail *FaceDetail `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// An array of reasons that specify why a face wasn't indexed.
|
|
|
|
|
//
|
|
|
|
|
// * EXTREME_POSE - The face is at a pose that can't be detected. For example,
|
|
|
|
|
// the head is turned too far away from the camera.
|
|
|
|
|
//
|
|
|
|
|
// * EXCEEDS_MAX_FACES - The number of faces detected is already higher than
|
|
|
|
|
// that specified by the MaxFaces input parameter for IndexFaces.
|
|
|
|
|
//
|
|
|
|
|
// * LOW_BRIGHTNESS - The image is too dark.
|
|
|
|
|
//
|
|
|
|
|
// * LOW_SHARPNESS - The image is too blurry.
|
|
|
|
|
//
|
|
|
|
|
// * LOW_CONFIDENCE - The face was detected with a low confidence.
|
|
|
|
|
//
|
|
|
|
|
// * SMALL_BOUNDING_BOX - The bounding box around the face is too small.
|
|
|
|
|
Reasons []*string `type:"list"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s UnindexedFace) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s UnindexedFace) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFaceDetail sets the FaceDetail field's value.
|
|
|
|
|
func (s *UnindexedFace) SetFaceDetail(v *FaceDetail) *UnindexedFace {
|
|
|
|
|
s.FaceDetail = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetReasons sets the Reasons field's value.
|
|
|
|
|
func (s *UnindexedFace) SetReasons(v []*string) *UnindexedFace {
|
|
|
|
|
s.Reasons = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-08 12:03:10 +00:00
|
|
|
|
// Video file stored in an Amazon S3 bucket. Amazon Rekognition video start
|
2019-01-21 14:27:20 +00:00
|
|
|
|
// operations such as StartLabelDetection use Video to specify a video for analysis.
|
|
|
|
|
// The supported file formats are .mp4, .mov and .avi.
|
2017-12-08 12:03:10 +00:00
|
|
|
|
type Video struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// The Amazon S3 bucket name and file name for the video.
|
|
|
|
|
S3Object *S3Object `type:"structure"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s Video) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s Video) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Validate inspects the fields of the type to determine if they are valid.
|
|
|
|
|
func (s *Video) Validate() error {
|
|
|
|
|
invalidParams := request.ErrInvalidParams{Context: "Video"}
|
|
|
|
|
if s.S3Object != nil {
|
|
|
|
|
if err := s.S3Object.Validate(); err != nil {
|
|
|
|
|
invalidParams.AddNested("S3Object", err.(request.ErrInvalidParams))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if invalidParams.Len() > 0 {
|
|
|
|
|
return invalidParams
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetS3Object sets the S3Object field's value.
|
|
|
|
|
func (s *Video) SetS3Object(v *S3Object) *Video {
|
|
|
|
|
s.S3Object = v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Information about a video that Amazon Rekognition analyzed. Videometadata
|
|
|
|
|
// is returned in every page of paginated responses from a Amazon Rekognition
|
|
|
|
|
// video operation.
|
|
|
|
|
type VideoMetadata struct {
|
|
|
|
|
_ struct{} `type:"structure"`
|
|
|
|
|
|
|
|
|
|
// Type of compression used in the analyzed video.
|
|
|
|
|
Codec *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// Length of the video in milliseconds.
|
|
|
|
|
DurationMillis *int64 `type:"long"`
|
|
|
|
|
|
|
|
|
|
// Format of the analyzed video. Possible values are MP4, MOV and AVI.
|
|
|
|
|
Format *string `type:"string"`
|
|
|
|
|
|
|
|
|
|
// Vertical pixel dimension of the video.
|
|
|
|
|
FrameHeight *int64 `type:"long"`
|
|
|
|
|
|
|
|
|
|
// Number of frames per second in the video.
|
|
|
|
|
FrameRate *float64 `type:"float"`
|
|
|
|
|
|
|
|
|
|
// Horizontal pixel dimension of the video.
|
|
|
|
|
FrameWidth *int64 `type:"long"`
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// String returns the string representation
|
|
|
|
|
func (s VideoMetadata) String() string {
|
|
|
|
|
return awsutil.Prettify(s)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// GoString returns the string representation
|
|
|
|
|
func (s VideoMetadata) GoString() string {
|
|
|
|
|
return s.String()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetCodec sets the Codec field's value.
|
|
|
|
|
func (s *VideoMetadata) SetCodec(v string) *VideoMetadata {
|
|
|
|
|
s.Codec = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetDurationMillis sets the DurationMillis field's value.
|
|
|
|
|
func (s *VideoMetadata) SetDurationMillis(v int64) *VideoMetadata {
|
|
|
|
|
s.DurationMillis = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFormat sets the Format field's value.
|
|
|
|
|
func (s *VideoMetadata) SetFormat(v string) *VideoMetadata {
|
|
|
|
|
s.Format = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFrameHeight sets the FrameHeight field's value.
|
|
|
|
|
func (s *VideoMetadata) SetFrameHeight(v int64) *VideoMetadata {
|
|
|
|
|
s.FrameHeight = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFrameRate sets the FrameRate field's value.
|
|
|
|
|
func (s *VideoMetadata) SetFrameRate(v float64) *VideoMetadata {
|
|
|
|
|
s.FrameRate = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// SetFrameWidth sets the FrameWidth field's value.
|
|
|
|
|
func (s *VideoMetadata) SetFrameWidth(v int64) *VideoMetadata {
|
|
|
|
|
s.FrameWidth = &v
|
|
|
|
|
return s
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
// AttributeDefault is a Attribute enum value
|
|
|
|
|
AttributeDefault = "DEFAULT"
|
|
|
|
|
|
|
|
|
|
// AttributeAll is a Attribute enum value
|
|
|
|
|
AttributeAll = "ALL"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
// CelebrityRecognitionSortById is a CelebrityRecognitionSortBy enum value
|
|
|
|
|
CelebrityRecognitionSortById = "ID"
|
|
|
|
|
|
|
|
|
|
// CelebrityRecognitionSortByTimestamp is a CelebrityRecognitionSortBy enum value
|
|
|
|
|
CelebrityRecognitionSortByTimestamp = "TIMESTAMP"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
// ContentModerationSortByName is a ContentModerationSortBy enum value
|
|
|
|
|
ContentModerationSortByName = "NAME"
|
|
|
|
|
|
|
|
|
|
// ContentModerationSortByTimestamp is a ContentModerationSortBy enum value
|
|
|
|
|
ContentModerationSortByTimestamp = "TIMESTAMP"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
// EmotionNameHappy is a EmotionName enum value
|
|
|
|
|
EmotionNameHappy = "HAPPY"
|
|
|
|
|
|
|
|
|
|
// EmotionNameSad is a EmotionName enum value
|
|
|
|
|
EmotionNameSad = "SAD"
|
|
|
|
|
|
|
|
|
|
// EmotionNameAngry is a EmotionName enum value
|
|
|
|
|
EmotionNameAngry = "ANGRY"
|
|
|
|
|
|
|
|
|
|
// EmotionNameConfused is a EmotionName enum value
|
|
|
|
|
EmotionNameConfused = "CONFUSED"
|
|
|
|
|
|
|
|
|
|
// EmotionNameDisgusted is a EmotionName enum value
|
|
|
|
|
EmotionNameDisgusted = "DISGUSTED"
|
|
|
|
|
|
|
|
|
|
// EmotionNameSurprised is a EmotionName enum value
|
|
|
|
|
EmotionNameSurprised = "SURPRISED"
|
|
|
|
|
|
|
|
|
|
// EmotionNameCalm is a EmotionName enum value
|
|
|
|
|
EmotionNameCalm = "CALM"
|
|
|
|
|
|
|
|
|
|
// EmotionNameUnknown is a EmotionName enum value
|
|
|
|
|
EmotionNameUnknown = "UNKNOWN"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
// FaceAttributesDefault is a FaceAttributes enum value
|
|
|
|
|
FaceAttributesDefault = "DEFAULT"
|
|
|
|
|
|
|
|
|
|
// FaceAttributesAll is a FaceAttributes enum value
|
|
|
|
|
FaceAttributesAll = "ALL"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
// FaceSearchSortByIndex is a FaceSearchSortBy enum value
|
|
|
|
|
FaceSearchSortByIndex = "INDEX"
|
|
|
|
|
|
|
|
|
|
// FaceSearchSortByTimestamp is a FaceSearchSortBy enum value
|
|
|
|
|
FaceSearchSortByTimestamp = "TIMESTAMP"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
// GenderTypeMale is a GenderType enum value
|
|
|
|
|
GenderTypeMale = "Male"
|
|
|
|
|
|
|
|
|
|
// GenderTypeFemale is a GenderType enum value
|
|
|
|
|
GenderTypeFemale = "Female"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
// LabelDetectionSortByName is a LabelDetectionSortBy enum value
|
|
|
|
|
LabelDetectionSortByName = "NAME"
|
|
|
|
|
|
|
|
|
|
// LabelDetectionSortByTimestamp is a LabelDetectionSortBy enum value
|
|
|
|
|
LabelDetectionSortByTimestamp = "TIMESTAMP"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
// LandmarkTypeEyeLeft is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeEyeLeft = "eyeLeft"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeEyeRight is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeEyeRight = "eyeRight"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeNose is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeNose = "nose"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeMouthLeft is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeMouthLeft = "mouthLeft"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeMouthRight is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeMouthRight = "mouthRight"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeLeftEyeBrowLeft is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeLeftEyeBrowLeft = "leftEyeBrowLeft"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeLeftEyeBrowRight is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeLeftEyeBrowRight = "leftEyeBrowRight"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeLeftEyeBrowUp is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeLeftEyeBrowUp = "leftEyeBrowUp"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeRightEyeBrowLeft is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeRightEyeBrowLeft = "rightEyeBrowLeft"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeRightEyeBrowRight is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeRightEyeBrowRight = "rightEyeBrowRight"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeRightEyeBrowUp is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeRightEyeBrowUp = "rightEyeBrowUp"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeLeftEyeLeft is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeLeftEyeLeft = "leftEyeLeft"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeLeftEyeRight is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeLeftEyeRight = "leftEyeRight"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeLeftEyeUp is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeLeftEyeUp = "leftEyeUp"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeLeftEyeDown is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeLeftEyeDown = "leftEyeDown"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeRightEyeLeft is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeRightEyeLeft = "rightEyeLeft"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeRightEyeRight is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeRightEyeRight = "rightEyeRight"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeRightEyeUp is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeRightEyeUp = "rightEyeUp"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeRightEyeDown is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeRightEyeDown = "rightEyeDown"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeNoseLeft is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeNoseLeft = "noseLeft"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeNoseRight is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeNoseRight = "noseRight"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeMouthUp is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeMouthUp = "mouthUp"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeMouthDown is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeMouthDown = "mouthDown"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeLeftPupil is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeLeftPupil = "leftPupil"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeRightPupil is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeRightPupil = "rightPupil"
|
2019-01-21 14:27:20 +00:00
|
|
|
|
|
|
|
|
|
// LandmarkTypeUpperJawlineLeft is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeUpperJawlineLeft = "upperJawlineLeft"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeMidJawlineLeft is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeMidJawlineLeft = "midJawlineLeft"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeChinBottom is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeChinBottom = "chinBottom"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeMidJawlineRight is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeMidJawlineRight = "midJawlineRight"
|
|
|
|
|
|
|
|
|
|
// LandmarkTypeUpperJawlineRight is a LandmarkType enum value
|
|
|
|
|
LandmarkTypeUpperJawlineRight = "upperJawlineRight"
|
2017-12-08 12:03:10 +00:00
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
// OrientationCorrectionRotate0 is a OrientationCorrection enum value
|
|
|
|
|
OrientationCorrectionRotate0 = "ROTATE_0"
|
|
|
|
|
|
|
|
|
|
// OrientationCorrectionRotate90 is a OrientationCorrection enum value
|
|
|
|
|
OrientationCorrectionRotate90 = "ROTATE_90"
|
|
|
|
|
|
|
|
|
|
// OrientationCorrectionRotate180 is a OrientationCorrection enum value
|
|
|
|
|
OrientationCorrectionRotate180 = "ROTATE_180"
|
|
|
|
|
|
|
|
|
|
// OrientationCorrectionRotate270 is a OrientationCorrection enum value
|
|
|
|
|
OrientationCorrectionRotate270 = "ROTATE_270"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
// PersonTrackingSortByIndex is a PersonTrackingSortBy enum value
|
|
|
|
|
PersonTrackingSortByIndex = "INDEX"
|
|
|
|
|
|
|
|
|
|
// PersonTrackingSortByTimestamp is a PersonTrackingSortBy enum value
|
|
|
|
|
PersonTrackingSortByTimestamp = "TIMESTAMP"
|
|
|
|
|
)
|
|
|
|
|
|
2019-01-21 14:27:20 +00:00
|
|
|
|
const (
|
|
|
|
|
// QualityFilterNone is a QualityFilter enum value
|
|
|
|
|
QualityFilterNone = "NONE"
|
|
|
|
|
|
|
|
|
|
// QualityFilterAuto is a QualityFilter enum value
|
|
|
|
|
QualityFilterAuto = "AUTO"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
// ReasonExceedsMaxFaces is a Reason enum value
|
|
|
|
|
ReasonExceedsMaxFaces = "EXCEEDS_MAX_FACES"
|
|
|
|
|
|
|
|
|
|
// ReasonExtremePose is a Reason enum value
|
|
|
|
|
ReasonExtremePose = "EXTREME_POSE"
|
|
|
|
|
|
|
|
|
|
// ReasonLowBrightness is a Reason enum value
|
|
|
|
|
ReasonLowBrightness = "LOW_BRIGHTNESS"
|
|
|
|
|
|
|
|
|
|
// ReasonLowSharpness is a Reason enum value
|
|
|
|
|
ReasonLowSharpness = "LOW_SHARPNESS"
|
|
|
|
|
|
|
|
|
|
// ReasonLowConfidence is a Reason enum value
|
|
|
|
|
ReasonLowConfidence = "LOW_CONFIDENCE"
|
|
|
|
|
|
|
|
|
|
// ReasonSmallBoundingBox is a Reason enum value
|
|
|
|
|
ReasonSmallBoundingBox = "SMALL_BOUNDING_BOX"
|
|
|
|
|
)
|
|
|
|
|
|
2017-12-08 12:03:10 +00:00
|
|
|
|
const (
|
|
|
|
|
// StreamProcessorStatusStopped is a StreamProcessorStatus enum value
|
|
|
|
|
StreamProcessorStatusStopped = "STOPPED"
|
|
|
|
|
|
|
|
|
|
// StreamProcessorStatusStarting is a StreamProcessorStatus enum value
|
|
|
|
|
StreamProcessorStatusStarting = "STARTING"
|
|
|
|
|
|
|
|
|
|
// StreamProcessorStatusRunning is a StreamProcessorStatus enum value
|
|
|
|
|
StreamProcessorStatusRunning = "RUNNING"
|
|
|
|
|
|
|
|
|
|
// StreamProcessorStatusFailed is a StreamProcessorStatus enum value
|
|
|
|
|
StreamProcessorStatusFailed = "FAILED"
|
|
|
|
|
|
|
|
|
|
// StreamProcessorStatusStopping is a StreamProcessorStatus enum value
|
|
|
|
|
StreamProcessorStatusStopping = "STOPPING"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
// TextTypesLine is a TextTypes enum value
|
|
|
|
|
TextTypesLine = "LINE"
|
|
|
|
|
|
|
|
|
|
// TextTypesWord is a TextTypes enum value
|
|
|
|
|
TextTypesWord = "WORD"
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
const (
|
|
|
|
|
// VideoJobStatusInProgress is a VideoJobStatus enum value
|
|
|
|
|
VideoJobStatusInProgress = "IN_PROGRESS"
|
|
|
|
|
|
|
|
|
|
// VideoJobStatusSucceeded is a VideoJobStatus enum value
|
|
|
|
|
VideoJobStatusSucceeded = "SUCCEEDED"
|
|
|
|
|
|
|
|
|
|
// VideoJobStatusFailed is a VideoJobStatus enum value
|
|
|
|
|
VideoJobStatusFailed = "FAILED"
|
|
|
|
|
)
|