// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. package rekognition import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/awsutil" "github.com/aws/aws-sdk-go/aws/request" ) const opCompareFaces = "CompareFaces" // CompareFacesRequest generates a "aws/request.Request" representing the // client's request for the CompareFaces operation. The "output" return // value can be used to capture response data after the request's "Send" method // is called. // // See CompareFaces for usage and error information. // // Creating a request object using this method should be used when you want to inject // custom logic into the request's lifecycle using a custom handler, or if you want to // access properties on the request object before or after sending the request. If // you just want the service response, call the CompareFaces method directly // instead. // // Note: You must call the "Send" method on the returned request object in order // to execute the request. // // // Example sending a request using the CompareFacesRequest method. // req, resp := client.CompareFacesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *Rekognition) CompareFacesRequest(input *CompareFacesInput) (req *request.Request, output *CompareFacesOutput) { op := &request.Operation{ Name: opCompareFaces, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CompareFacesInput{} } output = &CompareFacesOutput{} req = c.newRequest(op, input, output) return } // CompareFaces API operation for Amazon Rekognition. // // Compares a face in the source input image with each face detected in the // target input image. // // If the source image contains multiple faces, the service detects the largest // face and compares it with each face detected in the target image. // // In response, the operation returns an array of face matches ordered by similarity // score in descending order. For each face match, the response provides a bounding // box of the face, facial landmarks, pose details (pitch, role, and yaw), quality // (brightness and sharpness), and confidence value (indicating the level of // confidence that the bounding box contains a face). The response also provides // a similarity score, which indicates how closely the faces match. // // By default, only faces with a similarity score of greater than or equal to // 80% are returned in the response. You can change this value by specifying // the SimilarityThreshold parameter. // // CompareFaces also returns an array of faces that don't match the source image. // For each face, it returns a bounding box, confidence value, landmarks, pose // details, and quality. The response also returns information about the face // in the source image, including the bounding box of the face and confidence // value. // // If the image doesn't contain Exif metadata, CompareFaces returns orientation // information for the source and target images. Use these values to display // the images with the correct image orientation. // // This is a stateless API operation. That is, data returned by this operation // doesn't persist. // // For an example, see get-started-exercise-compare-faces. // // This operation requires permissions to perform the rekognition:CompareFaces // action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Rekognition's // API operation CompareFaces for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidParameterException "InvalidParameterException" // Input parameter violated a constraint. Validate your parameter before calling // the API operation again. // // * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException" // Amazon Rekognition is unable to access the S3 object specified in the request. // // * ErrCodeImageTooLargeException "ImageTooLargeException" // The input image size exceeds the allowed limit. For more information, see // limits. // // * ErrCodeAccessDeniedException "AccessDeniedException" // You are not authorized to perform the action. // // * ErrCodeInternalServerError "InternalServerError" // Amazon Rekognition experienced a service issue. Try your call again. // // * ErrCodeThrottlingException "ThrottlingException" // Amazon Rekognition is temporarily unable to process the request. Try your // call again. // // * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" // The number of requests exceeded your throughput limit. If you want to increase // this limit, contact Amazon Rekognition. // // * ErrCodeInvalidImageFormatException "InvalidImageFormatException" // The provided image format is not supported. // func (c *Rekognition) CompareFaces(input *CompareFacesInput) (*CompareFacesOutput, error) { req, out := c.CompareFacesRequest(input) return out, req.Send() } // CompareFacesWithContext is the same as CompareFaces with the addition of // the ability to pass a context and additional request options. // // See CompareFaces for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Rekognition) CompareFacesWithContext(ctx aws.Context, input *CompareFacesInput, opts ...request.Option) (*CompareFacesOutput, error) { req, out := c.CompareFacesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opCreateCollection = "CreateCollection" // CreateCollectionRequest generates a "aws/request.Request" representing the // client's request for the CreateCollection operation. The "output" return // value can be used to capture response data after the request's "Send" method // is called. // // See CreateCollection for usage and error information. // // Creating a request object using this method should be used when you want to inject // custom logic into the request's lifecycle using a custom handler, or if you want to // access properties on the request object before or after sending the request. If // you just want the service response, call the CreateCollection method directly // instead. // // Note: You must call the "Send" method on the returned request object in order // to execute the request. // // // Example sending a request using the CreateCollectionRequest method. // req, resp := client.CreateCollectionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *Rekognition) CreateCollectionRequest(input *CreateCollectionInput) (req *request.Request, output *CreateCollectionOutput) { op := &request.Operation{ Name: opCreateCollection, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &CreateCollectionInput{} } output = &CreateCollectionOutput{} req = c.newRequest(op, input, output) return } // CreateCollection API operation for Amazon Rekognition. // // Creates a collection in an AWS Region. You can add faces to the collection // using the operation. // // For example, you might create collections, one for each of your application // users. A user can then index faces using the IndexFaces operation and persist // results in a specific collection. Then, a user can search the collection // for faces in the user-specific container. // // Collection names are case-sensitive. // // For an example, see example1. // // This operation requires permissions to perform the rekognition:CreateCollection // action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Rekognition's // API operation CreateCollection for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidParameterException "InvalidParameterException" // Input parameter violated a constraint. Validate your parameter before calling // the API operation again. // // * ErrCodeAccessDeniedException "AccessDeniedException" // You are not authorized to perform the action. // // * ErrCodeInternalServerError "InternalServerError" // Amazon Rekognition experienced a service issue. Try your call again. // // * ErrCodeThrottlingException "ThrottlingException" // Amazon Rekognition is temporarily unable to process the request. Try your // call again. // // * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" // The number of requests exceeded your throughput limit. If you want to increase // this limit, contact Amazon Rekognition. // // * ErrCodeResourceAlreadyExistsException "ResourceAlreadyExistsException" // A collection with the specified ID already exists. // func (c *Rekognition) CreateCollection(input *CreateCollectionInput) (*CreateCollectionOutput, error) { req, out := c.CreateCollectionRequest(input) return out, req.Send() } // CreateCollectionWithContext is the same as CreateCollection with the addition of // the ability to pass a context and additional request options. // // See CreateCollection for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Rekognition) CreateCollectionWithContext(ctx aws.Context, input *CreateCollectionInput, opts ...request.Option) (*CreateCollectionOutput, error) { req, out := c.CreateCollectionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDeleteCollection = "DeleteCollection" // DeleteCollectionRequest generates a "aws/request.Request" representing the // client's request for the DeleteCollection operation. The "output" return // value can be used to capture response data after the request's "Send" method // is called. // // See DeleteCollection for usage and error information. // // Creating a request object using this method should be used when you want to inject // custom logic into the request's lifecycle using a custom handler, or if you want to // access properties on the request object before or after sending the request. If // you just want the service response, call the DeleteCollection method directly // instead. // // Note: You must call the "Send" method on the returned request object in order // to execute the request. // // // Example sending a request using the DeleteCollectionRequest method. // req, resp := client.DeleteCollectionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *Rekognition) DeleteCollectionRequest(input *DeleteCollectionInput) (req *request.Request, output *DeleteCollectionOutput) { op := &request.Operation{ Name: opDeleteCollection, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteCollectionInput{} } output = &DeleteCollectionOutput{} req = c.newRequest(op, input, output) return } // DeleteCollection API operation for Amazon Rekognition. // // Deletes the specified collection. Note that this operation removes all faces // in the collection. For an example, see example1. // // This operation requires permissions to perform the rekognition:DeleteCollection // action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Rekognition's // API operation DeleteCollection for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidParameterException "InvalidParameterException" // Input parameter violated a constraint. Validate your parameter before calling // the API operation again. // // * ErrCodeAccessDeniedException "AccessDeniedException" // You are not authorized to perform the action. // // * ErrCodeInternalServerError "InternalServerError" // Amazon Rekognition experienced a service issue. Try your call again. // // * ErrCodeThrottlingException "ThrottlingException" // Amazon Rekognition is temporarily unable to process the request. Try your // call again. // // * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" // The number of requests exceeded your throughput limit. If you want to increase // this limit, contact Amazon Rekognition. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // Collection specified in the request is not found. // func (c *Rekognition) DeleteCollection(input *DeleteCollectionInput) (*DeleteCollectionOutput, error) { req, out := c.DeleteCollectionRequest(input) return out, req.Send() } // DeleteCollectionWithContext is the same as DeleteCollection with the addition of // the ability to pass a context and additional request options. // // See DeleteCollection for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Rekognition) DeleteCollectionWithContext(ctx aws.Context, input *DeleteCollectionInput, opts ...request.Option) (*DeleteCollectionOutput, error) { req, out := c.DeleteCollectionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDeleteFaces = "DeleteFaces" // DeleteFacesRequest generates a "aws/request.Request" representing the // client's request for the DeleteFaces operation. The "output" return // value can be used to capture response data after the request's "Send" method // is called. // // See DeleteFaces for usage and error information. // // Creating a request object using this method should be used when you want to inject // custom logic into the request's lifecycle using a custom handler, or if you want to // access properties on the request object before or after sending the request. If // you just want the service response, call the DeleteFaces method directly // instead. // // Note: You must call the "Send" method on the returned request object in order // to execute the request. // // // Example sending a request using the DeleteFacesRequest method. // req, resp := client.DeleteFacesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *Rekognition) DeleteFacesRequest(input *DeleteFacesInput) (req *request.Request, output *DeleteFacesOutput) { op := &request.Operation{ Name: opDeleteFaces, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DeleteFacesInput{} } output = &DeleteFacesOutput{} req = c.newRequest(op, input, output) return } // DeleteFaces API operation for Amazon Rekognition. // // Deletes faces from a collection. You specify a collection ID and an array // of face IDs to remove from the collection. // // This operation requires permissions to perform the rekognition:DeleteFaces // action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Rekognition's // API operation DeleteFaces for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidParameterException "InvalidParameterException" // Input parameter violated a constraint. Validate your parameter before calling // the API operation again. // // * ErrCodeAccessDeniedException "AccessDeniedException" // You are not authorized to perform the action. // // * ErrCodeInternalServerError "InternalServerError" // Amazon Rekognition experienced a service issue. Try your call again. // // * ErrCodeThrottlingException "ThrottlingException" // Amazon Rekognition is temporarily unable to process the request. Try your // call again. // // * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" // The number of requests exceeded your throughput limit. If you want to increase // this limit, contact Amazon Rekognition. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // Collection specified in the request is not found. // func (c *Rekognition) DeleteFaces(input *DeleteFacesInput) (*DeleteFacesOutput, error) { req, out := c.DeleteFacesRequest(input) return out, req.Send() } // DeleteFacesWithContext is the same as DeleteFaces with the addition of // the ability to pass a context and additional request options. // // See DeleteFaces for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Rekognition) DeleteFacesWithContext(ctx aws.Context, input *DeleteFacesInput, opts ...request.Option) (*DeleteFacesOutput, error) { req, out := c.DeleteFacesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDetectFaces = "DetectFaces" // DetectFacesRequest generates a "aws/request.Request" representing the // client's request for the DetectFaces operation. The "output" return // value can be used to capture response data after the request's "Send" method // is called. // // See DetectFaces for usage and error information. // // Creating a request object using this method should be used when you want to inject // custom logic into the request's lifecycle using a custom handler, or if you want to // access properties on the request object before or after sending the request. If // you just want the service response, call the DetectFaces method directly // instead. // // Note: You must call the "Send" method on the returned request object in order // to execute the request. // // // Example sending a request using the DetectFacesRequest method. // req, resp := client.DetectFacesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *Rekognition) DetectFacesRequest(input *DetectFacesInput) (req *request.Request, output *DetectFacesOutput) { op := &request.Operation{ Name: opDetectFaces, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DetectFacesInput{} } output = &DetectFacesOutput{} req = c.newRequest(op, input, output) return } // DetectFaces API operation for Amazon Rekognition. // // Detects faces within an image (JPEG or PNG) that is provided as input. // // For each face detected, the operation returns face details including a bounding // box of the face, a confidence value (that the bounding box contains a face), // and a fixed set of attributes such as facial landmarks (for example, coordinates // of eye and mouth), gender, presence of beard, sunglasses, etc. // // The face-detection algorithm is most effective on frontal faces. For non-frontal // or obscured faces, the algorithm may not detect the faces or might detect // faces with lower confidence. // // This is a stateless API operation. That is, the operation does not persist // any data. // // For an example, see get-started-exercise-detect-faces. // // This operation requires permissions to perform the rekognition:DetectFaces // action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Rekognition's // API operation DetectFaces for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException" // Amazon Rekognition is unable to access the S3 object specified in the request. // // * ErrCodeInvalidParameterException "InvalidParameterException" // Input parameter violated a constraint. Validate your parameter before calling // the API operation again. // // * ErrCodeImageTooLargeException "ImageTooLargeException" // The input image size exceeds the allowed limit. For more information, see // limits. // // * ErrCodeAccessDeniedException "AccessDeniedException" // You are not authorized to perform the action. // // * ErrCodeInternalServerError "InternalServerError" // Amazon Rekognition experienced a service issue. Try your call again. // // * ErrCodeThrottlingException "ThrottlingException" // Amazon Rekognition is temporarily unable to process the request. Try your // call again. // // * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" // The number of requests exceeded your throughput limit. If you want to increase // this limit, contact Amazon Rekognition. // // * ErrCodeInvalidImageFormatException "InvalidImageFormatException" // The provided image format is not supported. // func (c *Rekognition) DetectFaces(input *DetectFacesInput) (*DetectFacesOutput, error) { req, out := c.DetectFacesRequest(input) return out, req.Send() } // DetectFacesWithContext is the same as DetectFaces with the addition of // the ability to pass a context and additional request options. // // See DetectFaces for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Rekognition) DetectFacesWithContext(ctx aws.Context, input *DetectFacesInput, opts ...request.Option) (*DetectFacesOutput, error) { req, out := c.DetectFacesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDetectLabels = "DetectLabels" // DetectLabelsRequest generates a "aws/request.Request" representing the // client's request for the DetectLabels operation. The "output" return // value can be used to capture response data after the request's "Send" method // is called. // // See DetectLabels for usage and error information. // // Creating a request object using this method should be used when you want to inject // custom logic into the request's lifecycle using a custom handler, or if you want to // access properties on the request object before or after sending the request. If // you just want the service response, call the DetectLabels method directly // instead. // // Note: You must call the "Send" method on the returned request object in order // to execute the request. // // // Example sending a request using the DetectLabelsRequest method. // req, resp := client.DetectLabelsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *Rekognition) DetectLabelsRequest(input *DetectLabelsInput) (req *request.Request, output *DetectLabelsOutput) { op := &request.Operation{ Name: opDetectLabels, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DetectLabelsInput{} } output = &DetectLabelsOutput{} req = c.newRequest(op, input, output) return } // DetectLabels API operation for Amazon Rekognition. // // Detects instances of real-world labels within an image (JPEG or PNG) provided // as input. This includes objects like flower, tree, and table; events like // wedding, graduation, and birthday party; and concepts like landscape, evening, // and nature. For an example, see get-started-exercise-detect-labels. // // For each object, scene, and concept the API returns one or more labels. Each // label provides the object name, and the level of confidence that the image // contains the object. For example, suppose the input image has a lighthouse, // the sea, and a rock. The response will include all three labels, one for // each object. // // {Name: lighthouse, Confidence: 98.4629} // // {Name: rock,Confidence: 79.2097} // // {Name: sea,Confidence: 75.061} // // In the preceding example, the operation returns one label for each of the // three objects. The operation can also return multiple labels for the same // object in the image. For example, if the input image shows a flower (for // example, a tulip), the operation might return the following three labels. // // {Name: flower,Confidence: 99.0562} // // {Name: plant,Confidence: 99.0562} // // {Name: tulip,Confidence: 99.0562} // // In this example, the detection algorithm more precisely identifies the flower // as a tulip. // // You can provide the input image as an S3 object or as base64-encoded bytes. // In response, the API returns an array of labels. In addition, the response // also includes the orientation correction. Optionally, you can specify MinConfidence // to control the confidence threshold for the labels returned. The default // is 50%. You can also add the MaxLabels parameter to limit the number of labels // returned. // // If the object detected is a person, the operation doesn't provide the same // facial details that the DetectFaces operation provides. // // This is a stateless API operation. That is, the operation does not persist // any data. // // This operation requires permissions to perform the rekognition:DetectLabels // action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Rekognition's // API operation DetectLabels for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException" // Amazon Rekognition is unable to access the S3 object specified in the request. // // * ErrCodeInvalidParameterException "InvalidParameterException" // Input parameter violated a constraint. Validate your parameter before calling // the API operation again. // // * ErrCodeImageTooLargeException "ImageTooLargeException" // The input image size exceeds the allowed limit. For more information, see // limits. // // * ErrCodeAccessDeniedException "AccessDeniedException" // You are not authorized to perform the action. // // * ErrCodeInternalServerError "InternalServerError" // Amazon Rekognition experienced a service issue. Try your call again. // // * ErrCodeThrottlingException "ThrottlingException" // Amazon Rekognition is temporarily unable to process the request. Try your // call again. // // * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" // The number of requests exceeded your throughput limit. If you want to increase // this limit, contact Amazon Rekognition. // // * ErrCodeInvalidImageFormatException "InvalidImageFormatException" // The provided image format is not supported. // func (c *Rekognition) DetectLabels(input *DetectLabelsInput) (*DetectLabelsOutput, error) { req, out := c.DetectLabelsRequest(input) return out, req.Send() } // DetectLabelsWithContext is the same as DetectLabels with the addition of // the ability to pass a context and additional request options. // // See DetectLabels for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Rekognition) DetectLabelsWithContext(ctx aws.Context, input *DetectLabelsInput, opts ...request.Option) (*DetectLabelsOutput, error) { req, out := c.DetectLabelsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opDetectModerationLabels = "DetectModerationLabels" // DetectModerationLabelsRequest generates a "aws/request.Request" representing the // client's request for the DetectModerationLabels operation. The "output" return // value can be used to capture response data after the request's "Send" method // is called. // // See DetectModerationLabels for usage and error information. // // Creating a request object using this method should be used when you want to inject // custom logic into the request's lifecycle using a custom handler, or if you want to // access properties on the request object before or after sending the request. If // you just want the service response, call the DetectModerationLabels method directly // instead. // // Note: You must call the "Send" method on the returned request object in order // to execute the request. // // // Example sending a request using the DetectModerationLabelsRequest method. // req, resp := client.DetectModerationLabelsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *Rekognition) DetectModerationLabelsRequest(input *DetectModerationLabelsInput) (req *request.Request, output *DetectModerationLabelsOutput) { op := &request.Operation{ Name: opDetectModerationLabels, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &DetectModerationLabelsInput{} } output = &DetectModerationLabelsOutput{} req = c.newRequest(op, input, output) return } // DetectModerationLabels API operation for Amazon Rekognition. // // Detects explicit or suggestive adult content in a specified JPEG or PNG format // image. Use DetectModerationLabels to moderate images depending on your requirements. // For example, you might want to filter images that contain nudity, but not // images containing suggestive content. // // To filter images, use the labels returned by DetectModerationLabels to determine // which types of content are appropriate. For information about moderation // labels, see image-moderation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Rekognition's // API operation DetectModerationLabels for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException" // Amazon Rekognition is unable to access the S3 object specified in the request. // // * ErrCodeInvalidParameterException "InvalidParameterException" // Input parameter violated a constraint. Validate your parameter before calling // the API operation again. // // * ErrCodeImageTooLargeException "ImageTooLargeException" // The input image size exceeds the allowed limit. For more information, see // limits. // // * ErrCodeAccessDeniedException "AccessDeniedException" // You are not authorized to perform the action. // // * ErrCodeInternalServerError "InternalServerError" // Amazon Rekognition experienced a service issue. Try your call again. // // * ErrCodeThrottlingException "ThrottlingException" // Amazon Rekognition is temporarily unable to process the request. Try your // call again. // // * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" // The number of requests exceeded your throughput limit. If you want to increase // this limit, contact Amazon Rekognition. // // * ErrCodeInvalidImageFormatException "InvalidImageFormatException" // The provided image format is not supported. // func (c *Rekognition) DetectModerationLabels(input *DetectModerationLabelsInput) (*DetectModerationLabelsOutput, error) { req, out := c.DetectModerationLabelsRequest(input) return out, req.Send() } // DetectModerationLabelsWithContext is the same as DetectModerationLabels with the addition of // the ability to pass a context and additional request options. // // See DetectModerationLabels for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Rekognition) DetectModerationLabelsWithContext(ctx aws.Context, input *DetectModerationLabelsInput, opts ...request.Option) (*DetectModerationLabelsOutput, error) { req, out := c.DetectModerationLabelsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opGetCelebrityInfo = "GetCelebrityInfo" // GetCelebrityInfoRequest generates a "aws/request.Request" representing the // client's request for the GetCelebrityInfo operation. The "output" return // value can be used to capture response data after the request's "Send" method // is called. // // See GetCelebrityInfo for usage and error information. // // Creating a request object using this method should be used when you want to inject // custom logic into the request's lifecycle using a custom handler, or if you want to // access properties on the request object before or after sending the request. If // you just want the service response, call the GetCelebrityInfo method directly // instead. // // Note: You must call the "Send" method on the returned request object in order // to execute the request. // // // Example sending a request using the GetCelebrityInfoRequest method. // req, resp := client.GetCelebrityInfoRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *Rekognition) GetCelebrityInfoRequest(input *GetCelebrityInfoInput) (req *request.Request, output *GetCelebrityInfoOutput) { op := &request.Operation{ Name: opGetCelebrityInfo, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &GetCelebrityInfoInput{} } output = &GetCelebrityInfoOutput{} req = c.newRequest(op, input, output) return } // GetCelebrityInfo API operation for Amazon Rekognition. // // Gets the name and additional information about a celebrity based on his or // her Rekognition ID. The additional information is returned as an array of // URLs. If there is no additional information about the celebrity, this list // is empty. For more information, see celebrity-recognition. // // This operation requires permissions to perform the rekognition:GetCelebrityInfo // action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Rekognition's // API operation GetCelebrityInfo for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidParameterException "InvalidParameterException" // Input parameter violated a constraint. Validate your parameter before calling // the API operation again. // // * ErrCodeAccessDeniedException "AccessDeniedException" // You are not authorized to perform the action. // // * ErrCodeInternalServerError "InternalServerError" // Amazon Rekognition experienced a service issue. Try your call again. // // * ErrCodeThrottlingException "ThrottlingException" // Amazon Rekognition is temporarily unable to process the request. Try your // call again. // // * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" // The number of requests exceeded your throughput limit. If you want to increase // this limit, contact Amazon Rekognition. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // Collection specified in the request is not found. // func (c *Rekognition) GetCelebrityInfo(input *GetCelebrityInfoInput) (*GetCelebrityInfoOutput, error) { req, out := c.GetCelebrityInfoRequest(input) return out, req.Send() } // GetCelebrityInfoWithContext is the same as GetCelebrityInfo with the addition of // the ability to pass a context and additional request options. // // See GetCelebrityInfo for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Rekognition) GetCelebrityInfoWithContext(ctx aws.Context, input *GetCelebrityInfoInput, opts ...request.Option) (*GetCelebrityInfoOutput, error) { req, out := c.GetCelebrityInfoRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opIndexFaces = "IndexFaces" // IndexFacesRequest generates a "aws/request.Request" representing the // client's request for the IndexFaces operation. The "output" return // value can be used to capture response data after the request's "Send" method // is called. // // See IndexFaces for usage and error information. // // Creating a request object using this method should be used when you want to inject // custom logic into the request's lifecycle using a custom handler, or if you want to // access properties on the request object before or after sending the request. If // you just want the service response, call the IndexFaces method directly // instead. // // Note: You must call the "Send" method on the returned request object in order // to execute the request. // // // Example sending a request using the IndexFacesRequest method. // req, resp := client.IndexFacesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *Rekognition) IndexFacesRequest(input *IndexFacesInput) (req *request.Request, output *IndexFacesOutput) { op := &request.Operation{ Name: opIndexFaces, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &IndexFacesInput{} } output = &IndexFacesOutput{} req = c.newRequest(op, input, output) return } // IndexFaces API operation for Amazon Rekognition. // // Detects faces in the input image and adds them to the specified collection. // // Amazon Rekognition does not save the actual faces detected. Instead, the // underlying detection algorithm first detects the faces in the input image, // and for each face extracts facial features into a feature vector, and stores // it in the back-end database. Amazon Rekognition uses feature vectors when // performing face match and search operations using the and operations. // // If you provide the optional externalImageID for the input image you provided, // Amazon Rekognition associates this ID with all faces that it detects. When // you call the operation, the response returns the external ID. You can use // this external image ID to create a client-side index to associate the faces // with each image. You can then use the index to find all faces in an image. // // In response, the operation returns an array of metadata for all detected // faces. This includes, the bounding box of the detected face, confidence value // (indicating the bounding box contains a face), a face ID assigned by the // service for each face that is detected and stored, and an image ID assigned // by the service for the input image. If you request all facial attributes // (using the detectionAttributes parameter, Amazon Rekognition returns detailed // facial attributes such as facial landmarks (for example, location of eye // and mount) and other facial attributes such gender. If you provide the same // image, specify the same collection, and use the same external ID in the IndexFaces // operation, Amazon Rekognition doesn't save duplicate face metadata. // // For an example, see example2. // // This operation requires permissions to perform the rekognition:IndexFaces // action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Rekognition's // API operation IndexFaces for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException" // Amazon Rekognition is unable to access the S3 object specified in the request. // // * ErrCodeInvalidParameterException "InvalidParameterException" // Input parameter violated a constraint. Validate your parameter before calling // the API operation again. // // * ErrCodeImageTooLargeException "ImageTooLargeException" // The input image size exceeds the allowed limit. For more information, see // limits. // // * ErrCodeAccessDeniedException "AccessDeniedException" // You are not authorized to perform the action. // // * ErrCodeInternalServerError "InternalServerError" // Amazon Rekognition experienced a service issue. Try your call again. // // * ErrCodeThrottlingException "ThrottlingException" // Amazon Rekognition is temporarily unable to process the request. Try your // call again. // // * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" // The number of requests exceeded your throughput limit. If you want to increase // this limit, contact Amazon Rekognition. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // Collection specified in the request is not found. // // * ErrCodeInvalidImageFormatException "InvalidImageFormatException" // The provided image format is not supported. // func (c *Rekognition) IndexFaces(input *IndexFacesInput) (*IndexFacesOutput, error) { req, out := c.IndexFacesRequest(input) return out, req.Send() } // IndexFacesWithContext is the same as IndexFaces with the addition of // the ability to pass a context and additional request options. // // See IndexFaces for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Rekognition) IndexFacesWithContext(ctx aws.Context, input *IndexFacesInput, opts ...request.Option) (*IndexFacesOutput, error) { req, out := c.IndexFacesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opListCollections = "ListCollections" // ListCollectionsRequest generates a "aws/request.Request" representing the // client's request for the ListCollections operation. The "output" return // value can be used to capture response data after the request's "Send" method // is called. // // See ListCollections for usage and error information. // // Creating a request object using this method should be used when you want to inject // custom logic into the request's lifecycle using a custom handler, or if you want to // access properties on the request object before or after sending the request. If // you just want the service response, call the ListCollections method directly // instead. // // Note: You must call the "Send" method on the returned request object in order // to execute the request. // // // Example sending a request using the ListCollectionsRequest method. // req, resp := client.ListCollectionsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *Rekognition) ListCollectionsRequest(input *ListCollectionsInput) (req *request.Request, output *ListCollectionsOutput) { op := &request.Operation{ Name: opListCollections, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, LimitToken: "MaxResults", TruncationToken: "", }, } if input == nil { input = &ListCollectionsInput{} } output = &ListCollectionsOutput{} req = c.newRequest(op, input, output) return } // ListCollections API operation for Amazon Rekognition. // // Returns list of collection IDs in your account. If the result is truncated, // the response also provides a NextToken that you can use in the subsequent // request to fetch the next set of collection IDs. // // For an example, see example1. // // This operation requires permissions to perform the rekognition:ListCollections // action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Rekognition's // API operation ListCollections for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidParameterException "InvalidParameterException" // Input parameter violated a constraint. Validate your parameter before calling // the API operation again. // // * ErrCodeAccessDeniedException "AccessDeniedException" // You are not authorized to perform the action. // // * ErrCodeInternalServerError "InternalServerError" // Amazon Rekognition experienced a service issue. Try your call again. // // * ErrCodeThrottlingException "ThrottlingException" // Amazon Rekognition is temporarily unable to process the request. Try your // call again. // // * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" // The number of requests exceeded your throughput limit. If you want to increase // this limit, contact Amazon Rekognition. // // * ErrCodeInvalidPaginationTokenException "InvalidPaginationTokenException" // Pagination token in the request is not valid. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // Collection specified in the request is not found. // func (c *Rekognition) ListCollections(input *ListCollectionsInput) (*ListCollectionsOutput, error) { req, out := c.ListCollectionsRequest(input) return out, req.Send() } // ListCollectionsWithContext is the same as ListCollections with the addition of // the ability to pass a context and additional request options. // // See ListCollections for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Rekognition) ListCollectionsWithContext(ctx aws.Context, input *ListCollectionsInput, opts ...request.Option) (*ListCollectionsOutput, error) { req, out := c.ListCollectionsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // ListCollectionsPages iterates over the pages of a ListCollections operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // // See ListCollections method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // // // Example iterating over at most 3 pages of a ListCollections operation. // pageNum := 0 // err := client.ListCollectionsPages(params, // func(page *ListCollectionsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // func (c *Rekognition) ListCollectionsPages(input *ListCollectionsInput, fn func(*ListCollectionsOutput, bool) bool) error { return c.ListCollectionsPagesWithContext(aws.BackgroundContext(), input, fn) } // ListCollectionsPagesWithContext same as ListCollectionsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Rekognition) ListCollectionsPagesWithContext(ctx aws.Context, input *ListCollectionsInput, fn func(*ListCollectionsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { var inCpy *ListCollectionsInput if input != nil { tmp := *input inCpy = &tmp } req, _ := c.ListCollectionsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } cont := true for p.Next() && cont { cont = fn(p.Page().(*ListCollectionsOutput), !p.HasNextPage()) } return p.Err() } const opListFaces = "ListFaces" // ListFacesRequest generates a "aws/request.Request" representing the // client's request for the ListFaces operation. The "output" return // value can be used to capture response data after the request's "Send" method // is called. // // See ListFaces for usage and error information. // // Creating a request object using this method should be used when you want to inject // custom logic into the request's lifecycle using a custom handler, or if you want to // access properties on the request object before or after sending the request. If // you just want the service response, call the ListFaces method directly // instead. // // Note: You must call the "Send" method on the returned request object in order // to execute the request. // // // Example sending a request using the ListFacesRequest method. // req, resp := client.ListFacesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *Rekognition) ListFacesRequest(input *ListFacesInput) (req *request.Request, output *ListFacesOutput) { op := &request.Operation{ Name: opListFaces, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ InputTokens: []string{"NextToken"}, OutputTokens: []string{"NextToken"}, LimitToken: "MaxResults", TruncationToken: "", }, } if input == nil { input = &ListFacesInput{} } output = &ListFacesOutput{} req = c.newRequest(op, input, output) return } // ListFaces API operation for Amazon Rekognition. // // Returns metadata for faces in the specified collection. This metadata includes // information such as the bounding box coordinates, the confidence (that the // bounding box contains a face), and face ID. For an example, see example3. // // This operation requires permissions to perform the rekognition:ListFaces // action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Rekognition's // API operation ListFaces for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidParameterException "InvalidParameterException" // Input parameter violated a constraint. Validate your parameter before calling // the API operation again. // // * ErrCodeAccessDeniedException "AccessDeniedException" // You are not authorized to perform the action. // // * ErrCodeInternalServerError "InternalServerError" // Amazon Rekognition experienced a service issue. Try your call again. // // * ErrCodeThrottlingException "ThrottlingException" // Amazon Rekognition is temporarily unable to process the request. Try your // call again. // // * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" // The number of requests exceeded your throughput limit. If you want to increase // this limit, contact Amazon Rekognition. // // * ErrCodeInvalidPaginationTokenException "InvalidPaginationTokenException" // Pagination token in the request is not valid. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // Collection specified in the request is not found. // func (c *Rekognition) ListFaces(input *ListFacesInput) (*ListFacesOutput, error) { req, out := c.ListFacesRequest(input) return out, req.Send() } // ListFacesWithContext is the same as ListFaces with the addition of // the ability to pass a context and additional request options. // // See ListFaces for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Rekognition) ListFacesWithContext(ctx aws.Context, input *ListFacesInput, opts ...request.Option) (*ListFacesOutput, error) { req, out := c.ListFacesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // ListFacesPages iterates over the pages of a ListFaces operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // // See ListFaces method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // // // Example iterating over at most 3 pages of a ListFaces operation. // pageNum := 0 // err := client.ListFacesPages(params, // func(page *ListFacesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // func (c *Rekognition) ListFacesPages(input *ListFacesInput, fn func(*ListFacesOutput, bool) bool) error { return c.ListFacesPagesWithContext(aws.BackgroundContext(), input, fn) } // ListFacesPagesWithContext same as ListFacesPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Rekognition) ListFacesPagesWithContext(ctx aws.Context, input *ListFacesInput, fn func(*ListFacesOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { var inCpy *ListFacesInput if input != nil { tmp := *input inCpy = &tmp } req, _ := c.ListFacesRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil }, } cont := true for p.Next() && cont { cont = fn(p.Page().(*ListFacesOutput), !p.HasNextPage()) } return p.Err() } const opRecognizeCelebrities = "RecognizeCelebrities" // RecognizeCelebritiesRequest generates a "aws/request.Request" representing the // client's request for the RecognizeCelebrities operation. The "output" return // value can be used to capture response data after the request's "Send" method // is called. // // See RecognizeCelebrities for usage and error information. // // Creating a request object using this method should be used when you want to inject // custom logic into the request's lifecycle using a custom handler, or if you want to // access properties on the request object before or after sending the request. If // you just want the service response, call the RecognizeCelebrities method directly // instead. // // Note: You must call the "Send" method on the returned request object in order // to execute the request. // // // Example sending a request using the RecognizeCelebritiesRequest method. // req, resp := client.RecognizeCelebritiesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *Rekognition) RecognizeCelebritiesRequest(input *RecognizeCelebritiesInput) (req *request.Request, output *RecognizeCelebritiesOutput) { op := &request.Operation{ Name: opRecognizeCelebrities, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &RecognizeCelebritiesInput{} } output = &RecognizeCelebritiesOutput{} req = c.newRequest(op, input, output) return } // RecognizeCelebrities API operation for Amazon Rekognition. // // Returns an array of celebrities recognized in the input image. The image // is passed either as base64-encoded image bytes or as a reference to an image // in an Amazon S3 bucket. The image must be either a PNG or JPEG formatted // file. For more information, see celebrity-recognition. // // RecognizeCelebrities returns the 15 largest faces in the image. It lists // recognized celebrities in the CelebrityFaces list and unrecognized faces // in the UnrecognizedFaces list. The operation doesn't return celebrities whose // face sizes are smaller than the largest 15 faces in the image. // // For each celebrity recognized, the API returns a Celebrity object. The Celebrity // object contains the celebrity name, ID, URL links to additional information, // match confidence, and a ComparedFace object that you can use to locate the // celebrity's face on the image. // // Rekognition does not retain information about which images a celebrity has // been recognized in. Your application must store this information and use // the Celebrity ID property as a unique identifier for the celebrity. If you // don't store the celebrity name or additional information URLs returned by // RecognizeCelebrities, you will need the ID to identify the celebrity in a // call to the operation. // // For an example, see recognize-celebrities-tutorial. // // This operation requires permissions to perform the rekognition:RecognizeCelebrities // operation. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Rekognition's // API operation RecognizeCelebrities for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException" // Amazon Rekognition is unable to access the S3 object specified in the request. // // * ErrCodeInvalidParameterException "InvalidParameterException" // Input parameter violated a constraint. Validate your parameter before calling // the API operation again. // // * ErrCodeInvalidImageFormatException "InvalidImageFormatException" // The provided image format is not supported. // // * ErrCodeImageTooLargeException "ImageTooLargeException" // The input image size exceeds the allowed limit. For more information, see // limits. // // * ErrCodeAccessDeniedException "AccessDeniedException" // You are not authorized to perform the action. // // * ErrCodeInternalServerError "InternalServerError" // Amazon Rekognition experienced a service issue. Try your call again. // // * ErrCodeThrottlingException "ThrottlingException" // Amazon Rekognition is temporarily unable to process the request. Try your // call again. // // * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" // The number of requests exceeded your throughput limit. If you want to increase // this limit, contact Amazon Rekognition. // // * ErrCodeInvalidImageFormatException "InvalidImageFormatException" // The provided image format is not supported. // func (c *Rekognition) RecognizeCelebrities(input *RecognizeCelebritiesInput) (*RecognizeCelebritiesOutput, error) { req, out := c.RecognizeCelebritiesRequest(input) return out, req.Send() } // RecognizeCelebritiesWithContext is the same as RecognizeCelebrities with the addition of // the ability to pass a context and additional request options. // // See RecognizeCelebrities for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Rekognition) RecognizeCelebritiesWithContext(ctx aws.Context, input *RecognizeCelebritiesInput, opts ...request.Option) (*RecognizeCelebritiesOutput, error) { req, out := c.RecognizeCelebritiesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opSearchFaces = "SearchFaces" // SearchFacesRequest generates a "aws/request.Request" representing the // client's request for the SearchFaces operation. The "output" return // value can be used to capture response data after the request's "Send" method // is called. // // See SearchFaces for usage and error information. // // Creating a request object using this method should be used when you want to inject // custom logic into the request's lifecycle using a custom handler, or if you want to // access properties on the request object before or after sending the request. If // you just want the service response, call the SearchFaces method directly // instead. // // Note: You must call the "Send" method on the returned request object in order // to execute the request. // // // Example sending a request using the SearchFacesRequest method. // req, resp := client.SearchFacesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *Rekognition) SearchFacesRequest(input *SearchFacesInput) (req *request.Request, output *SearchFacesOutput) { op := &request.Operation{ Name: opSearchFaces, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &SearchFacesInput{} } output = &SearchFacesOutput{} req = c.newRequest(op, input, output) return } // SearchFaces API operation for Amazon Rekognition. // // For a given input face ID, searches for matching faces in the collection // the face belongs to. You get a face ID when you add a face to the collection // using the IndexFaces operation. The operation compares the features of the // input face with faces in the specified collection. // // You can also search faces without indexing faces by using the SearchFacesByImage // operation. // // The operation response returns an array of faces that match, ordered by similarity // score with the highest similarity first. More specifically, it is an array // of metadata for each face match that is found. Along with the metadata, the // response also includes a confidence value for each face match, indicating // the confidence that the specific face matches the input face. // // For an example, see example3. // // This operation requires permissions to perform the rekognition:SearchFaces // action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Rekognition's // API operation SearchFaces for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidParameterException "InvalidParameterException" // Input parameter violated a constraint. Validate your parameter before calling // the API operation again. // // * ErrCodeAccessDeniedException "AccessDeniedException" // You are not authorized to perform the action. // // * ErrCodeInternalServerError "InternalServerError" // Amazon Rekognition experienced a service issue. Try your call again. // // * ErrCodeThrottlingException "ThrottlingException" // Amazon Rekognition is temporarily unable to process the request. Try your // call again. // // * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" // The number of requests exceeded your throughput limit. If you want to increase // this limit, contact Amazon Rekognition. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // Collection specified in the request is not found. // func (c *Rekognition) SearchFaces(input *SearchFacesInput) (*SearchFacesOutput, error) { req, out := c.SearchFacesRequest(input) return out, req.Send() } // SearchFacesWithContext is the same as SearchFaces with the addition of // the ability to pass a context and additional request options. // // See SearchFaces for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Rekognition) SearchFacesWithContext(ctx aws.Context, input *SearchFacesInput, opts ...request.Option) (*SearchFacesOutput, error) { req, out := c.SearchFacesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } const opSearchFacesByImage = "SearchFacesByImage" // SearchFacesByImageRequest generates a "aws/request.Request" representing the // client's request for the SearchFacesByImage operation. The "output" return // value can be used to capture response data after the request's "Send" method // is called. // // See SearchFacesByImage for usage and error information. // // Creating a request object using this method should be used when you want to inject // custom logic into the request's lifecycle using a custom handler, or if you want to // access properties on the request object before or after sending the request. If // you just want the service response, call the SearchFacesByImage method directly // instead. // // Note: You must call the "Send" method on the returned request object in order // to execute the request. // // // Example sending a request using the SearchFacesByImageRequest method. // req, resp := client.SearchFacesByImageRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } func (c *Rekognition) SearchFacesByImageRequest(input *SearchFacesByImageInput) (req *request.Request, output *SearchFacesByImageOutput) { op := &request.Operation{ Name: opSearchFacesByImage, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { input = &SearchFacesByImageInput{} } output = &SearchFacesByImageOutput{} req = c.newRequest(op, input, output) return } // SearchFacesByImage API operation for Amazon Rekognition. // // For a given input image, first detects the largest face in the image, and // then searches the specified collection for matching faces. The operation // compares the features of the input face with faces in the specified collection. // // To search for all faces in an input image, you might first call the operation, // and then use the face IDs returned in subsequent calls to the operation. // // You can also call the DetectFaces operation and use the bounding boxes in // the response to make face crops, which then you can pass in to the SearchFacesByImage // operation. // // The response returns an array of faces that match, ordered by similarity // score with the highest similarity first. More specifically, it is an array // of metadata for each face match found. Along with the metadata, the response // also includes a similarity indicating how similar the face is to the input // face. In the response, the operation also returns the bounding box (and a // confidence level that the bounding box contains a face) of the face that // Amazon Rekognition used for the input image. // // For an example, see example3. // // This operation requires permissions to perform the rekognition:SearchFacesByImage // action. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon Rekognition's // API operation SearchFacesByImage for usage and error information. // // Returned Error Codes: // * ErrCodeInvalidS3ObjectException "InvalidS3ObjectException" // Amazon Rekognition is unable to access the S3 object specified in the request. // // * ErrCodeInvalidParameterException "InvalidParameterException" // Input parameter violated a constraint. Validate your parameter before calling // the API operation again. // // * ErrCodeImageTooLargeException "ImageTooLargeException" // The input image size exceeds the allowed limit. For more information, see // limits. // // * ErrCodeAccessDeniedException "AccessDeniedException" // You are not authorized to perform the action. // // * ErrCodeInternalServerError "InternalServerError" // Amazon Rekognition experienced a service issue. Try your call again. // // * ErrCodeThrottlingException "ThrottlingException" // Amazon Rekognition is temporarily unable to process the request. Try your // call again. // // * ErrCodeProvisionedThroughputExceededException "ProvisionedThroughputExceededException" // The number of requests exceeded your throughput limit. If you want to increase // this limit, contact Amazon Rekognition. // // * ErrCodeResourceNotFoundException "ResourceNotFoundException" // Collection specified in the request is not found. // // * ErrCodeInvalidImageFormatException "InvalidImageFormatException" // The provided image format is not supported. // func (c *Rekognition) SearchFacesByImage(input *SearchFacesByImageInput) (*SearchFacesByImageOutput, error) { req, out := c.SearchFacesByImageRequest(input) return out, req.Send() } // SearchFacesByImageWithContext is the same as SearchFacesByImage with the addition of // the ability to pass a context and additional request options. // // See SearchFacesByImage for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. func (c *Rekognition) SearchFacesByImageWithContext(ctx aws.Context, input *SearchFacesByImageInput, opts ...request.Option) (*SearchFacesByImageOutput, error) { req, out := c.SearchFacesByImageRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } // Structure containing the estimated age range, in years, for a face. // // Rekognition estimates an age-range for faces detected in the input image. // Estimated age ranges can overlap; a face of a 5 year old may have an estimated // range of 4-6 whilst the face of a 6 year old may have an estimated range // of 4-8. type AgeRange struct { _ struct{} `type:"structure"` // The highest estimated age. High *int64 `type:"integer"` // The lowest estimated age. Low *int64 `type:"integer"` } // String returns the string representation func (s AgeRange) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s AgeRange) GoString() string { return s.String() } // SetHigh sets the High field's value. func (s *AgeRange) SetHigh(v int64) *AgeRange { s.High = &v return s } // SetLow sets the Low field's value. func (s *AgeRange) SetLow(v int64) *AgeRange { s.Low = &v return s } // Indicates whether or not the face has a beard, and the confidence level in // the determination. type Beard struct { _ struct{} `type:"structure"` // Level of confidence in the determination. Confidence *float64 `type:"float"` // Boolean value that indicates whether the face has beard or not. Value *bool `type:"boolean"` } // String returns the string representation func (s Beard) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Beard) GoString() string { return s.String() } // SetConfidence sets the Confidence field's value. func (s *Beard) SetConfidence(v float64) *Beard { s.Confidence = &v return s } // SetValue sets the Value field's value. func (s *Beard) SetValue(v bool) *Beard { s.Value = &v return s } // Identifies the bounding box around the object or face. The left (x-coordinate) // and top (y-coordinate) are coordinates representing the top and left sides // of the bounding box. Note that the upper-left corner of the image is the // origin (0,0). // // The top and left values returned are ratios of the overall image size. For // example, if the input image is 700x200 pixels, and the top-left coordinate // of the bounding box is 350x50 pixels, the API returns a left value of 0.5 // (350/700) and a top value of 0.25 (50/200). // // The width and height values represent the dimensions of the bounding box // as a ratio of the overall image dimension. For example, if the input image // is 700x200 pixels, and the bounding box width is 70 pixels, the width returned // is 0.1. // // The bounding box coordinates can have negative values. For example, if Amazon // Rekognition is able to detect a face that is at the image edge and is only // partially visible, the service can return coordinates that are outside the // image bounds and, depending on the image edge, you might get negative values // or values greater than 1 for the left or top values. type BoundingBox struct { _ struct{} `type:"structure"` // Height of the bounding box as a ratio of the overall image height. Height *float64 `type:"float"` // Left coordinate of the bounding box as a ratio of overall image width. Left *float64 `type:"float"` // Top coordinate of the bounding box as a ratio of overall image height. Top *float64 `type:"float"` // Width of the bounding box as a ratio of the overall image width. Width *float64 `type:"float"` } // String returns the string representation func (s BoundingBox) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s BoundingBox) GoString() string { return s.String() } // SetHeight sets the Height field's value. func (s *BoundingBox) SetHeight(v float64) *BoundingBox { s.Height = &v return s } // SetLeft sets the Left field's value. func (s *BoundingBox) SetLeft(v float64) *BoundingBox { s.Left = &v return s } // SetTop sets the Top field's value. func (s *BoundingBox) SetTop(v float64) *BoundingBox { s.Top = &v return s } // SetWidth sets the Width field's value. func (s *BoundingBox) SetWidth(v float64) *BoundingBox { s.Width = &v return s } // Provides information about a celebrity recognized by the operation. type Celebrity struct { _ struct{} `type:"structure"` // Provides information about the celebrity's face, such as its location on // the image. Face *ComparedFace `type:"structure"` // A unique identifier for the celebrity. Id *string `type:"string"` // The confidence, in percentage, that Rekognition has that the recognized face // is the celebrity. MatchConfidence *float64 `type:"float"` // The name of the celebrity. Name *string `type:"string"` // An array of URLs pointing to additional information about the celebrity. // If there is no additional information about the celebrity, this list is empty. Urls []*string `type:"list"` } // String returns the string representation func (s Celebrity) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Celebrity) GoString() string { return s.String() } // SetFace sets the Face field's value. func (s *Celebrity) SetFace(v *ComparedFace) *Celebrity { s.Face = v return s } // SetId sets the Id field's value. func (s *Celebrity) SetId(v string) *Celebrity { s.Id = &v return s } // SetMatchConfidence sets the MatchConfidence field's value. func (s *Celebrity) SetMatchConfidence(v float64) *Celebrity { s.MatchConfidence = &v return s } // SetName sets the Name field's value. func (s *Celebrity) SetName(v string) *Celebrity { s.Name = &v return s } // SetUrls sets the Urls field's value. func (s *Celebrity) SetUrls(v []*string) *Celebrity { s.Urls = v return s } type CompareFacesInput struct { _ struct{} `type:"structure"` // The minimum level of confidence in the face matches that a match must meet // to be included in the FaceMatches array. SimilarityThreshold *float64 `type:"float"` // The source image, either as bytes or as an S3 object. // // SourceImage is a required field SourceImage *Image `type:"structure" required:"true"` // The target image, either as bytes or as an S3 object. // // TargetImage is a required field TargetImage *Image `type:"structure" required:"true"` } // String returns the string representation func (s CompareFacesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CompareFacesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CompareFacesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CompareFacesInput"} if s.SourceImage == nil { invalidParams.Add(request.NewErrParamRequired("SourceImage")) } if s.TargetImage == nil { invalidParams.Add(request.NewErrParamRequired("TargetImage")) } if s.SourceImage != nil { if err := s.SourceImage.Validate(); err != nil { invalidParams.AddNested("SourceImage", err.(request.ErrInvalidParams)) } } if s.TargetImage != nil { if err := s.TargetImage.Validate(); err != nil { invalidParams.AddNested("TargetImage", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetSimilarityThreshold sets the SimilarityThreshold field's value. func (s *CompareFacesInput) SetSimilarityThreshold(v float64) *CompareFacesInput { s.SimilarityThreshold = &v return s } // SetSourceImage sets the SourceImage field's value. func (s *CompareFacesInput) SetSourceImage(v *Image) *CompareFacesInput { s.SourceImage = v return s } // SetTargetImage sets the TargetImage field's value. func (s *CompareFacesInput) SetTargetImage(v *Image) *CompareFacesInput { s.TargetImage = v return s } // Provides information about a face in a target image that matches the source // image face analysed by CompareFaces. The Face property contains the bounding // box of the face in the target image. The Similarity property is the confidence // that the source image face matches the face in the bounding box. type CompareFacesMatch struct { _ struct{} `type:"structure"` // Provides face metadata (bounding box and confidence that the bounding box // actually contains a face). Face *ComparedFace `type:"structure"` // Level of confidence that the faces match. Similarity *float64 `type:"float"` } // String returns the string representation func (s CompareFacesMatch) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CompareFacesMatch) GoString() string { return s.String() } // SetFace sets the Face field's value. func (s *CompareFacesMatch) SetFace(v *ComparedFace) *CompareFacesMatch { s.Face = v return s } // SetSimilarity sets the Similarity field's value. func (s *CompareFacesMatch) SetSimilarity(v float64) *CompareFacesMatch { s.Similarity = &v return s } type CompareFacesOutput struct { _ struct{} `type:"structure"` // An array of faces in the target image that match the source image face. Each // CompareFacesMatch object provides the bounding box, the confidence level // that the bounding box contains a face, and the similarity score for the face // in the bounding box and the face in the source image. FaceMatches []*CompareFacesMatch `type:"list"` // The face in the source image that was used for comparison. SourceImageFace *ComparedSourceImageFace `type:"structure"` // The orientation of the source image (counterclockwise direction). If your // application displays the source image, you can use this value to correct // image orientation. The bounding box coordinates returned in SourceImageFace // represent the location of the face before the image orientation is corrected. // // If the source image is in .jpeg format, it might contain exchangeable image // (Exif) metadata that includes the image's orientation. If the Exif metadata // for the source image populates the orientation field, the value of OrientationCorrection // is null and the SourceImageFace bounding box coordinates represent the location // of the face after Exif metadata is used to correct the orientation. Images // in .png format don't contain Exif metadata. SourceImageOrientationCorrection *string `type:"string" enum:"OrientationCorrection"` // The orientation of the target image (in counterclockwise direction). If your // application displays the target image, you can use this value to correct // the orientation of the image. The bounding box coordinates returned in FaceMatches // and UnmatchedFaces represent face locations before the image orientation // is corrected. // // If the target image is in .jpg format, it might contain Exif metadata that // includes the orientation of the image. If the Exif metadata for the target // image populates the orientation field, the value of OrientationCorrection // is null and the bounding box coordinates in FaceMatches and UnmatchedFaces // represent the location of the face after Exif metadata is used to correct // the orientation. Images in .png format don't contain Exif metadata. TargetImageOrientationCorrection *string `type:"string" enum:"OrientationCorrection"` // An array of faces in the target image that did not match the source image // face. UnmatchedFaces []*ComparedFace `type:"list"` } // String returns the string representation func (s CompareFacesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CompareFacesOutput) GoString() string { return s.String() } // SetFaceMatches sets the FaceMatches field's value. func (s *CompareFacesOutput) SetFaceMatches(v []*CompareFacesMatch) *CompareFacesOutput { s.FaceMatches = v return s } // SetSourceImageFace sets the SourceImageFace field's value. func (s *CompareFacesOutput) SetSourceImageFace(v *ComparedSourceImageFace) *CompareFacesOutput { s.SourceImageFace = v return s } // SetSourceImageOrientationCorrection sets the SourceImageOrientationCorrection field's value. func (s *CompareFacesOutput) SetSourceImageOrientationCorrection(v string) *CompareFacesOutput { s.SourceImageOrientationCorrection = &v return s } // SetTargetImageOrientationCorrection sets the TargetImageOrientationCorrection field's value. func (s *CompareFacesOutput) SetTargetImageOrientationCorrection(v string) *CompareFacesOutput { s.TargetImageOrientationCorrection = &v return s } // SetUnmatchedFaces sets the UnmatchedFaces field's value. func (s *CompareFacesOutput) SetUnmatchedFaces(v []*ComparedFace) *CompareFacesOutput { s.UnmatchedFaces = v return s } // Provides face metadata for target image faces that are analysed by CompareFaces // and RecognizeCelebrities. type ComparedFace struct { _ struct{} `type:"structure"` // Bounding box of the face. BoundingBox *BoundingBox `type:"structure"` // Level of confidence that what the bounding box contains is a face. Confidence *float64 `type:"float"` // An array of facial landmarks. Landmarks []*Landmark `type:"list"` // Indicates the pose of the face as determined by its pitch, roll, and yaw. Pose *Pose `type:"structure"` // Identifies face image brightness and sharpness. Quality *ImageQuality `type:"structure"` } // String returns the string representation func (s ComparedFace) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ComparedFace) GoString() string { return s.String() } // SetBoundingBox sets the BoundingBox field's value. func (s *ComparedFace) SetBoundingBox(v *BoundingBox) *ComparedFace { s.BoundingBox = v return s } // SetConfidence sets the Confidence field's value. func (s *ComparedFace) SetConfidence(v float64) *ComparedFace { s.Confidence = &v return s } // SetLandmarks sets the Landmarks field's value. func (s *ComparedFace) SetLandmarks(v []*Landmark) *ComparedFace { s.Landmarks = v return s } // SetPose sets the Pose field's value. func (s *ComparedFace) SetPose(v *Pose) *ComparedFace { s.Pose = v return s } // SetQuality sets the Quality field's value. func (s *ComparedFace) SetQuality(v *ImageQuality) *ComparedFace { s.Quality = v return s } // Type that describes the face Amazon Rekognition chose to compare with the // faces in the target. This contains a bounding box for the selected face and // confidence level that the bounding box contains a face. Note that Amazon // Rekognition selects the largest face in the source image for this comparison. type ComparedSourceImageFace struct { _ struct{} `type:"structure"` // Bounding box of the face. BoundingBox *BoundingBox `type:"structure"` // Confidence level that the selected bounding box contains a face. Confidence *float64 `type:"float"` } // String returns the string representation func (s ComparedSourceImageFace) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ComparedSourceImageFace) GoString() string { return s.String() } // SetBoundingBox sets the BoundingBox field's value. func (s *ComparedSourceImageFace) SetBoundingBox(v *BoundingBox) *ComparedSourceImageFace { s.BoundingBox = v return s } // SetConfidence sets the Confidence field's value. func (s *ComparedSourceImageFace) SetConfidence(v float64) *ComparedSourceImageFace { s.Confidence = &v return s } type CreateCollectionInput struct { _ struct{} `type:"structure"` // ID for the collection that you are creating. // // CollectionId is a required field CollectionId *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s CreateCollectionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateCollectionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *CreateCollectionInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "CreateCollectionInput"} if s.CollectionId == nil { invalidParams.Add(request.NewErrParamRequired("CollectionId")) } if s.CollectionId != nil && len(*s.CollectionId) < 1 { invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCollectionId sets the CollectionId field's value. func (s *CreateCollectionInput) SetCollectionId(v string) *CreateCollectionInput { s.CollectionId = &v return s } type CreateCollectionOutput struct { _ struct{} `type:"structure"` // Amazon Resource Name (ARN) of the collection. You can use this to manage // permissions on your resources. CollectionArn *string `type:"string"` // HTTP status code indicating the result of the operation. StatusCode *int64 `type:"integer"` } // String returns the string representation func (s CreateCollectionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s CreateCollectionOutput) GoString() string { return s.String() } // SetCollectionArn sets the CollectionArn field's value. func (s *CreateCollectionOutput) SetCollectionArn(v string) *CreateCollectionOutput { s.CollectionArn = &v return s } // SetStatusCode sets the StatusCode field's value. func (s *CreateCollectionOutput) SetStatusCode(v int64) *CreateCollectionOutput { s.StatusCode = &v return s } type DeleteCollectionInput struct { _ struct{} `type:"structure"` // ID of the collection to delete. // // CollectionId is a required field CollectionId *string `min:"1" type:"string" required:"true"` } // String returns the string representation func (s DeleteCollectionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteCollectionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteCollectionInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteCollectionInput"} if s.CollectionId == nil { invalidParams.Add(request.NewErrParamRequired("CollectionId")) } if s.CollectionId != nil && len(*s.CollectionId) < 1 { invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCollectionId sets the CollectionId field's value. func (s *DeleteCollectionInput) SetCollectionId(v string) *DeleteCollectionInput { s.CollectionId = &v return s } type DeleteCollectionOutput struct { _ struct{} `type:"structure"` // HTTP status code that indicates the result of the operation. StatusCode *int64 `type:"integer"` } // String returns the string representation func (s DeleteCollectionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteCollectionOutput) GoString() string { return s.String() } // SetStatusCode sets the StatusCode field's value. func (s *DeleteCollectionOutput) SetStatusCode(v int64) *DeleteCollectionOutput { s.StatusCode = &v return s } type DeleteFacesInput struct { _ struct{} `type:"structure"` // Collection from which to remove the specific faces. // // CollectionId is a required field CollectionId *string `min:"1" type:"string" required:"true"` // An array of face IDs to delete. // // FaceIds is a required field FaceIds []*string `min:"1" type:"list" required:"true"` } // String returns the string representation func (s DeleteFacesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteFacesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DeleteFacesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DeleteFacesInput"} if s.CollectionId == nil { invalidParams.Add(request.NewErrParamRequired("CollectionId")) } if s.CollectionId != nil && len(*s.CollectionId) < 1 { invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1)) } if s.FaceIds == nil { invalidParams.Add(request.NewErrParamRequired("FaceIds")) } if s.FaceIds != nil && len(s.FaceIds) < 1 { invalidParams.Add(request.NewErrParamMinLen("FaceIds", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCollectionId sets the CollectionId field's value. func (s *DeleteFacesInput) SetCollectionId(v string) *DeleteFacesInput { s.CollectionId = &v return s } // SetFaceIds sets the FaceIds field's value. func (s *DeleteFacesInput) SetFaceIds(v []*string) *DeleteFacesInput { s.FaceIds = v return s } type DeleteFacesOutput struct { _ struct{} `type:"structure"` // An array of strings (face IDs) of the faces that were deleted. DeletedFaces []*string `min:"1" type:"list"` } // String returns the string representation func (s DeleteFacesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DeleteFacesOutput) GoString() string { return s.String() } // SetDeletedFaces sets the DeletedFaces field's value. func (s *DeleteFacesOutput) SetDeletedFaces(v []*string) *DeleteFacesOutput { s.DeletedFaces = v return s } type DetectFacesInput struct { _ struct{} `type:"structure"` // An array of facial attributes you want to be returned. This can be the default // list of attributes or all attributes. If you don't specify a value for Attributes // or if you specify ["DEFAULT"], the API returns the following subset of facial // attributes: BoundingBox, Confidence, Pose, Quality and Landmarks. If you // provide ["ALL"], all facial attributes are returned but the operation will // take longer to complete. // // If you provide both, ["ALL", "DEFAULT"], the service uses a logical AND operator // to determine which attributes to return (in this case, all attributes). Attributes []*string `type:"list"` // The image in which you want to detect faces. You can specify a blob or an // S3 object. // // Image is a required field Image *Image `type:"structure" required:"true"` } // String returns the string representation func (s DetectFacesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DetectFacesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DetectFacesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DetectFacesInput"} if s.Image == nil { invalidParams.Add(request.NewErrParamRequired("Image")) } if s.Image != nil { if err := s.Image.Validate(); err != nil { invalidParams.AddNested("Image", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetAttributes sets the Attributes field's value. func (s *DetectFacesInput) SetAttributes(v []*string) *DetectFacesInput { s.Attributes = v return s } // SetImage sets the Image field's value. func (s *DetectFacesInput) SetImage(v *Image) *DetectFacesInput { s.Image = v return s } type DetectFacesOutput struct { _ struct{} `type:"structure"` // Details of each face found in the image. FaceDetails []*FaceDetail `type:"list"` // The orientation of the input image (counter-clockwise direction). If your // application displays the image, you can use this value to correct image orientation. // The bounding box coordinates returned in FaceDetails represent face locations // before the image orientation is corrected. // // If the input image is in .jpeg format, it might contain exchangeable image // (Exif) metadata that includes the image's orientation. If so, and the Exif // metadata for the input image populates the orientation field, the value of // OrientationCorrection is null and the FaceDetails bounding box coordinates // represent face locations after Exif metadata is used to correct the image // orientation. Images in .png format don't contain Exif metadata. OrientationCorrection *string `type:"string" enum:"OrientationCorrection"` } // String returns the string representation func (s DetectFacesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DetectFacesOutput) GoString() string { return s.String() } // SetFaceDetails sets the FaceDetails field's value. func (s *DetectFacesOutput) SetFaceDetails(v []*FaceDetail) *DetectFacesOutput { s.FaceDetails = v return s } // SetOrientationCorrection sets the OrientationCorrection field's value. func (s *DetectFacesOutput) SetOrientationCorrection(v string) *DetectFacesOutput { s.OrientationCorrection = &v return s } type DetectLabelsInput struct { _ struct{} `type:"structure"` // The input image. You can provide a blob of image bytes or an S3 object. // // Image is a required field Image *Image `type:"structure" required:"true"` // Maximum number of labels you want the service to return in the response. // The service returns the specified number of highest confidence labels. MaxLabels *int64 `type:"integer"` // Specifies the minimum confidence level for the labels to return. Amazon Rekognition // doesn't return any labels with confidence lower than this specified value. // // If MinConfidence is not specified, the operation returns labels with a confidence // values greater than or equal to 50 percent. MinConfidence *float64 `type:"float"` } // String returns the string representation func (s DetectLabelsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DetectLabelsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DetectLabelsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DetectLabelsInput"} if s.Image == nil { invalidParams.Add(request.NewErrParamRequired("Image")) } if s.Image != nil { if err := s.Image.Validate(); err != nil { invalidParams.AddNested("Image", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetImage sets the Image field's value. func (s *DetectLabelsInput) SetImage(v *Image) *DetectLabelsInput { s.Image = v return s } // SetMaxLabels sets the MaxLabels field's value. func (s *DetectLabelsInput) SetMaxLabels(v int64) *DetectLabelsInput { s.MaxLabels = &v return s } // SetMinConfidence sets the MinConfidence field's value. func (s *DetectLabelsInput) SetMinConfidence(v float64) *DetectLabelsInput { s.MinConfidence = &v return s } type DetectLabelsOutput struct { _ struct{} `type:"structure"` // An array of labels for the real-world objects detected. Labels []*Label `type:"list"` // The orientation of the input image (counter-clockwise direction). If your // application displays the image, you can use this value to correct the orientation. // If Amazon Rekognition detects that the input image was rotated (for example, // by 90 degrees), it first corrects the orientation before detecting the labels. // // If the input image Exif metadata populates the orientation field, Amazon // Rekognition does not perform orientation correction and the value of OrientationCorrection // will be null. OrientationCorrection *string `type:"string" enum:"OrientationCorrection"` } // String returns the string representation func (s DetectLabelsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DetectLabelsOutput) GoString() string { return s.String() } // SetLabels sets the Labels field's value. func (s *DetectLabelsOutput) SetLabels(v []*Label) *DetectLabelsOutput { s.Labels = v return s } // SetOrientationCorrection sets the OrientationCorrection field's value. func (s *DetectLabelsOutput) SetOrientationCorrection(v string) *DetectLabelsOutput { s.OrientationCorrection = &v return s } type DetectModerationLabelsInput struct { _ struct{} `type:"structure"` // The input image as bytes or an S3 object. // // Image is a required field Image *Image `type:"structure" required:"true"` // Specifies the minimum confidence level for the labels to return. Amazon Rekognition // doesn't return any labels with a confidence level lower than this specified // value. // // If you don't specify MinConfidence, the operation returns labels with confidence // values greater than or equal to 50 percent. MinConfidence *float64 `type:"float"` } // String returns the string representation func (s DetectModerationLabelsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DetectModerationLabelsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *DetectModerationLabelsInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "DetectModerationLabelsInput"} if s.Image == nil { invalidParams.Add(request.NewErrParamRequired("Image")) } if s.Image != nil { if err := s.Image.Validate(); err != nil { invalidParams.AddNested("Image", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetImage sets the Image field's value. func (s *DetectModerationLabelsInput) SetImage(v *Image) *DetectModerationLabelsInput { s.Image = v return s } // SetMinConfidence sets the MinConfidence field's value. func (s *DetectModerationLabelsInput) SetMinConfidence(v float64) *DetectModerationLabelsInput { s.MinConfidence = &v return s } type DetectModerationLabelsOutput struct { _ struct{} `type:"structure"` // An array of labels for explicit or suggestive adult content found in the // image. The list includes the top-level label and each child label detected // in the image. This is useful for filtering specific categories of content. ModerationLabels []*ModerationLabel `type:"list"` } // String returns the string representation func (s DetectModerationLabelsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s DetectModerationLabelsOutput) GoString() string { return s.String() } // SetModerationLabels sets the ModerationLabels field's value. func (s *DetectModerationLabelsOutput) SetModerationLabels(v []*ModerationLabel) *DetectModerationLabelsOutput { s.ModerationLabels = v return s } // The emotions detected on the face, and the confidence level in the determination. // For example, HAPPY, SAD, and ANGRY. type Emotion struct { _ struct{} `type:"structure"` // Level of confidence in the determination. Confidence *float64 `type:"float"` // Type of emotion detected. Type *string `type:"string" enum:"EmotionName"` } // String returns the string representation func (s Emotion) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Emotion) GoString() string { return s.String() } // SetConfidence sets the Confidence field's value. func (s *Emotion) SetConfidence(v float64) *Emotion { s.Confidence = &v return s } // SetType sets the Type field's value. func (s *Emotion) SetType(v string) *Emotion { s.Type = &v return s } // Indicates whether or not the eyes on the face are open, and the confidence // level in the determination. type EyeOpen struct { _ struct{} `type:"structure"` // Level of confidence in the determination. Confidence *float64 `type:"float"` // Boolean value that indicates whether the eyes on the face are open. Value *bool `type:"boolean"` } // String returns the string representation func (s EyeOpen) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s EyeOpen) GoString() string { return s.String() } // SetConfidence sets the Confidence field's value. func (s *EyeOpen) SetConfidence(v float64) *EyeOpen { s.Confidence = &v return s } // SetValue sets the Value field's value. func (s *EyeOpen) SetValue(v bool) *EyeOpen { s.Value = &v return s } // Indicates whether or not the face is wearing eye glasses, and the confidence // level in the determination. type Eyeglasses struct { _ struct{} `type:"structure"` // Level of confidence in the determination. Confidence *float64 `type:"float"` // Boolean value that indicates whether the face is wearing eye glasses or not. Value *bool `type:"boolean"` } // String returns the string representation func (s Eyeglasses) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Eyeglasses) GoString() string { return s.String() } // SetConfidence sets the Confidence field's value. func (s *Eyeglasses) SetConfidence(v float64) *Eyeglasses { s.Confidence = &v return s } // SetValue sets the Value field's value. func (s *Eyeglasses) SetValue(v bool) *Eyeglasses { s.Value = &v return s } // Describes the face properties such as the bounding box, face ID, image ID // of the input image, and external image ID that you assigned. type Face struct { _ struct{} `type:"structure"` // Bounding box of the face. BoundingBox *BoundingBox `type:"structure"` // Confidence level that the bounding box contains a face (and not a different // object such as a tree). Confidence *float64 `type:"float"` // Identifier that you assign to all the faces in the input image. ExternalImageId *string `min:"1" type:"string"` // Unique identifier that Amazon Rekognition assigns to the face. FaceId *string `type:"string"` // Unique identifier that Amazon Rekognition assigns to the input image. ImageId *string `type:"string"` } // String returns the string representation func (s Face) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Face) GoString() string { return s.String() } // SetBoundingBox sets the BoundingBox field's value. func (s *Face) SetBoundingBox(v *BoundingBox) *Face { s.BoundingBox = v return s } // SetConfidence sets the Confidence field's value. func (s *Face) SetConfidence(v float64) *Face { s.Confidence = &v return s } // SetExternalImageId sets the ExternalImageId field's value. func (s *Face) SetExternalImageId(v string) *Face { s.ExternalImageId = &v return s } // SetFaceId sets the FaceId field's value. func (s *Face) SetFaceId(v string) *Face { s.FaceId = &v return s } // SetImageId sets the ImageId field's value. func (s *Face) SetImageId(v string) *Face { s.ImageId = &v return s } // Structure containing attributes of the face that the algorithm detected. type FaceDetail struct { _ struct{} `type:"structure"` // The estimated age range, in years, for the face. Low represents the lowest // estimated age and High represents the highest estimated age. AgeRange *AgeRange `type:"structure"` // Indicates whether or not the face has a beard, and the confidence level in // the determination. Beard *Beard `type:"structure"` // Bounding box of the face. BoundingBox *BoundingBox `type:"structure"` // Confidence level that the bounding box contains a face (and not a different // object such as a tree). Confidence *float64 `type:"float"` // The emotions detected on the face, and the confidence level in the determination. // For example, HAPPY, SAD, and ANGRY. Emotions []*Emotion `type:"list"` // Indicates whether or not the face is wearing eye glasses, and the confidence // level in the determination. Eyeglasses *Eyeglasses `type:"structure"` // Indicates whether or not the eyes on the face are open, and the confidence // level in the determination. EyesOpen *EyeOpen `type:"structure"` // Gender of the face and the confidence level in the determination. Gender *Gender `type:"structure"` // Indicates the location of landmarks on the face. Landmarks []*Landmark `type:"list"` // Indicates whether or not the mouth on the face is open, and the confidence // level in the determination. MouthOpen *MouthOpen `type:"structure"` // Indicates whether or not the face has a mustache, and the confidence level // in the determination. Mustache *Mustache `type:"structure"` // Indicates the pose of the face as determined by its pitch, roll, and yaw. Pose *Pose `type:"structure"` // Identifies image brightness and sharpness. Quality *ImageQuality `type:"structure"` // Indicates whether or not the face is smiling, and the confidence level in // the determination. Smile *Smile `type:"structure"` // Indicates whether or not the face is wearing sunglasses, and the confidence // level in the determination. Sunglasses *Sunglasses `type:"structure"` } // String returns the string representation func (s FaceDetail) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s FaceDetail) GoString() string { return s.String() } // SetAgeRange sets the AgeRange field's value. func (s *FaceDetail) SetAgeRange(v *AgeRange) *FaceDetail { s.AgeRange = v return s } // SetBeard sets the Beard field's value. func (s *FaceDetail) SetBeard(v *Beard) *FaceDetail { s.Beard = v return s } // SetBoundingBox sets the BoundingBox field's value. func (s *FaceDetail) SetBoundingBox(v *BoundingBox) *FaceDetail { s.BoundingBox = v return s } // SetConfidence sets the Confidence field's value. func (s *FaceDetail) SetConfidence(v float64) *FaceDetail { s.Confidence = &v return s } // SetEmotions sets the Emotions field's value. func (s *FaceDetail) SetEmotions(v []*Emotion) *FaceDetail { s.Emotions = v return s } // SetEyeglasses sets the Eyeglasses field's value. func (s *FaceDetail) SetEyeglasses(v *Eyeglasses) *FaceDetail { s.Eyeglasses = v return s } // SetEyesOpen sets the EyesOpen field's value. func (s *FaceDetail) SetEyesOpen(v *EyeOpen) *FaceDetail { s.EyesOpen = v return s } // SetGender sets the Gender field's value. func (s *FaceDetail) SetGender(v *Gender) *FaceDetail { s.Gender = v return s } // SetLandmarks sets the Landmarks field's value. func (s *FaceDetail) SetLandmarks(v []*Landmark) *FaceDetail { s.Landmarks = v return s } // SetMouthOpen sets the MouthOpen field's value. func (s *FaceDetail) SetMouthOpen(v *MouthOpen) *FaceDetail { s.MouthOpen = v return s } // SetMustache sets the Mustache field's value. func (s *FaceDetail) SetMustache(v *Mustache) *FaceDetail { s.Mustache = v return s } // SetPose sets the Pose field's value. func (s *FaceDetail) SetPose(v *Pose) *FaceDetail { s.Pose = v return s } // SetQuality sets the Quality field's value. func (s *FaceDetail) SetQuality(v *ImageQuality) *FaceDetail { s.Quality = v return s } // SetSmile sets the Smile field's value. func (s *FaceDetail) SetSmile(v *Smile) *FaceDetail { s.Smile = v return s } // SetSunglasses sets the Sunglasses field's value. func (s *FaceDetail) SetSunglasses(v *Sunglasses) *FaceDetail { s.Sunglasses = v return s } // Provides face metadata. In addition, it also provides the confidence in the // match of this face with the input face. type FaceMatch struct { _ struct{} `type:"structure"` // Describes the face properties such as the bounding box, face ID, image ID // of the source image, and external image ID that you assigned. Face *Face `type:"structure"` // Confidence in the match of this face with the input face. Similarity *float64 `type:"float"` } // String returns the string representation func (s FaceMatch) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s FaceMatch) GoString() string { return s.String() } // SetFace sets the Face field's value. func (s *FaceMatch) SetFace(v *Face) *FaceMatch { s.Face = v return s } // SetSimilarity sets the Similarity field's value. func (s *FaceMatch) SetSimilarity(v float64) *FaceMatch { s.Similarity = &v return s } // Object containing both the face metadata (stored in the back-end database) // and facial attributes that are detected but aren't stored in the database. type FaceRecord struct { _ struct{} `type:"structure"` // Describes the face properties such as the bounding box, face ID, image ID // of the input image, and external image ID that you assigned. Face *Face `type:"structure"` // Structure containing attributes of the face that the algorithm detected. FaceDetail *FaceDetail `type:"structure"` } // String returns the string representation func (s FaceRecord) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s FaceRecord) GoString() string { return s.String() } // SetFace sets the Face field's value. func (s *FaceRecord) SetFace(v *Face) *FaceRecord { s.Face = v return s } // SetFaceDetail sets the FaceDetail field's value. func (s *FaceRecord) SetFaceDetail(v *FaceDetail) *FaceRecord { s.FaceDetail = v return s } // Gender of the face and the confidence level in the determination. type Gender struct { _ struct{} `type:"structure"` // Level of confidence in the determination. Confidence *float64 `type:"float"` // Gender of the face. Value *string `type:"string" enum:"GenderType"` } // String returns the string representation func (s Gender) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Gender) GoString() string { return s.String() } // SetConfidence sets the Confidence field's value. func (s *Gender) SetConfidence(v float64) *Gender { s.Confidence = &v return s } // SetValue sets the Value field's value. func (s *Gender) SetValue(v string) *Gender { s.Value = &v return s } type GetCelebrityInfoInput struct { _ struct{} `type:"structure"` // The ID for the celebrity. You get the celebrity ID from a call to the operation, // which recognizes celebrities in an image. // // Id is a required field Id *string `type:"string" required:"true"` } // String returns the string representation func (s GetCelebrityInfoInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetCelebrityInfoInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *GetCelebrityInfoInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "GetCelebrityInfoInput"} if s.Id == nil { invalidParams.Add(request.NewErrParamRequired("Id")) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetId sets the Id field's value. func (s *GetCelebrityInfoInput) SetId(v string) *GetCelebrityInfoInput { s.Id = &v return s } type GetCelebrityInfoOutput struct { _ struct{} `type:"structure"` // The name of the celebrity. Name *string `type:"string"` // An array of URLs pointing to additional celebrity information. Urls []*string `type:"list"` } // String returns the string representation func (s GetCelebrityInfoOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s GetCelebrityInfoOutput) GoString() string { return s.String() } // SetName sets the Name field's value. func (s *GetCelebrityInfoOutput) SetName(v string) *GetCelebrityInfoOutput { s.Name = &v return s } // SetUrls sets the Urls field's value. func (s *GetCelebrityInfoOutput) SetUrls(v []*string) *GetCelebrityInfoOutput { s.Urls = v return s } // Provides the input image either as bytes or an S3 object. // // You pass image bytes to a Rekognition API operation by using the Bytes property. // For example, you would use the Bytes property to pass an image loaded from // a local file system. Image bytes passed by using the Bytes property must // be base64-encoded. Your code may not need to encode image bytes if you are // using an AWS SDK to call Rekognition API operations. For more information, // see example4. // // You pass images stored in an S3 bucket to a Rekognition API operation by // using the S3Object property. Images stored in an S3 bucket do not need to // be base64-encoded. // // The region for the S3 bucket containing the S3 object must match the region // you use for Amazon Rekognition operations. // // If you use the Amazon CLI to call Amazon Rekognition operations, passing // image bytes using the Bytes property is not supported. You must first upload // the image to an Amazon S3 bucket and then call the operation using the S3Object // property. // // For Amazon Rekognition to process an S3 object, the user must have permission // to access the S3 object. For more information, see manage-access-resource-policies. type Image struct { _ struct{} `type:"structure"` // Blob of image bytes up to 5 MBs. // // Bytes is automatically base64 encoded/decoded by the SDK. Bytes []byte `min:"1" type:"blob"` // Identifies an S3 object as the image source. S3Object *S3Object `type:"structure"` } // String returns the string representation func (s Image) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Image) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *Image) Validate() error { invalidParams := request.ErrInvalidParams{Context: "Image"} if s.Bytes != nil && len(s.Bytes) < 1 { invalidParams.Add(request.NewErrParamMinLen("Bytes", 1)) } if s.S3Object != nil { if err := s.S3Object.Validate(); err != nil { invalidParams.AddNested("S3Object", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBytes sets the Bytes field's value. func (s *Image) SetBytes(v []byte) *Image { s.Bytes = v return s } // SetS3Object sets the S3Object field's value. func (s *Image) SetS3Object(v *S3Object) *Image { s.S3Object = v return s } // Identifies face image brightness and sharpness. type ImageQuality struct { _ struct{} `type:"structure"` // Value representing brightness of the face. The service returns a value between // 0 and 100 (inclusive). A higher value indicates a brighter face image. Brightness *float64 `type:"float"` // Value representing sharpness of the face. The service returns a value between // 0 and 100 (inclusive). A higher value indicates a sharper face image. Sharpness *float64 `type:"float"` } // String returns the string representation func (s ImageQuality) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ImageQuality) GoString() string { return s.String() } // SetBrightness sets the Brightness field's value. func (s *ImageQuality) SetBrightness(v float64) *ImageQuality { s.Brightness = &v return s } // SetSharpness sets the Sharpness field's value. func (s *ImageQuality) SetSharpness(v float64) *ImageQuality { s.Sharpness = &v return s } type IndexFacesInput struct { _ struct{} `type:"structure"` // The ID of an existing collection to which you want to add the faces that // are detected in the input images. // // CollectionId is a required field CollectionId *string `min:"1" type:"string" required:"true"` // An array of facial attributes that you want to be returned. This can be the // default list of attributes or all attributes. If you don't specify a value // for Attributes or if you specify ["DEFAULT"], the API returns the following // subset of facial attributes: BoundingBox, Confidence, Pose, Quality and Landmarks. // If you provide ["ALL"], all facial attributes are returned but the operation // will take longer to complete. // // If you provide both, ["ALL", "DEFAULT"], the service uses a logical AND operator // to determine which attributes to return (in this case, all attributes). DetectionAttributes []*string `type:"list"` // ID you want to assign to all the faces detected in the image. ExternalImageId *string `min:"1" type:"string"` // The input image as bytes or an S3 object. // // Image is a required field Image *Image `type:"structure" required:"true"` } // String returns the string representation func (s IndexFacesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s IndexFacesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *IndexFacesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "IndexFacesInput"} if s.CollectionId == nil { invalidParams.Add(request.NewErrParamRequired("CollectionId")) } if s.CollectionId != nil && len(*s.CollectionId) < 1 { invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1)) } if s.ExternalImageId != nil && len(*s.ExternalImageId) < 1 { invalidParams.Add(request.NewErrParamMinLen("ExternalImageId", 1)) } if s.Image == nil { invalidParams.Add(request.NewErrParamRequired("Image")) } if s.Image != nil { if err := s.Image.Validate(); err != nil { invalidParams.AddNested("Image", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCollectionId sets the CollectionId field's value. func (s *IndexFacesInput) SetCollectionId(v string) *IndexFacesInput { s.CollectionId = &v return s } // SetDetectionAttributes sets the DetectionAttributes field's value. func (s *IndexFacesInput) SetDetectionAttributes(v []*string) *IndexFacesInput { s.DetectionAttributes = v return s } // SetExternalImageId sets the ExternalImageId field's value. func (s *IndexFacesInput) SetExternalImageId(v string) *IndexFacesInput { s.ExternalImageId = &v return s } // SetImage sets the Image field's value. func (s *IndexFacesInput) SetImage(v *Image) *IndexFacesInput { s.Image = v return s } type IndexFacesOutput struct { _ struct{} `type:"structure"` // An array of faces detected and added to the collection. For more information, // see howitworks-index-faces. FaceRecords []*FaceRecord `type:"list"` // The orientation of the input image (counterclockwise direction). If your // application displays the image, you can use this value to correct image orientation. // The bounding box coordinates returned in FaceRecords represent face locations // before the image orientation is corrected. // // If the input image is in jpeg format, it might contain exchangeable image // (Exif) metadata. If so, and the Exif metadata populates the orientation field, // the value of OrientationCorrection is null and the bounding box coordinates // in FaceRecords represent face locations after Exif metadata is used to correct // the image orientation. Images in .png format don't contain Exif metadata. OrientationCorrection *string `type:"string" enum:"OrientationCorrection"` } // String returns the string representation func (s IndexFacesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s IndexFacesOutput) GoString() string { return s.String() } // SetFaceRecords sets the FaceRecords field's value. func (s *IndexFacesOutput) SetFaceRecords(v []*FaceRecord) *IndexFacesOutput { s.FaceRecords = v return s } // SetOrientationCorrection sets the OrientationCorrection field's value. func (s *IndexFacesOutput) SetOrientationCorrection(v string) *IndexFacesOutput { s.OrientationCorrection = &v return s } // Structure containing details about the detected label, including name, and // level of confidence. type Label struct { _ struct{} `type:"structure"` // Level of confidence. Confidence *float64 `type:"float"` // The name (label) of the object. Name *string `type:"string"` } // String returns the string representation func (s Label) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Label) GoString() string { return s.String() } // SetConfidence sets the Confidence field's value. func (s *Label) SetConfidence(v float64) *Label { s.Confidence = &v return s } // SetName sets the Name field's value. func (s *Label) SetName(v string) *Label { s.Name = &v return s } // Indicates the location of the landmark on the face. type Landmark struct { _ struct{} `type:"structure"` // Type of the landmark. Type *string `type:"string" enum:"LandmarkType"` // x-coordinate from the top left of the landmark expressed as the ration of // the width of the image. For example, if the images is 700x200 and the x-coordinate // of the landmark is at 350 pixels, this value is 0.5. X *float64 `type:"float"` // y-coordinate from the top left of the landmark expressed as the ration of // the height of the image. For example, if the images is 700x200 and the y-coordinate // of the landmark is at 100 pixels, this value is 0.5. Y *float64 `type:"float"` } // String returns the string representation func (s Landmark) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Landmark) GoString() string { return s.String() } // SetType sets the Type field's value. func (s *Landmark) SetType(v string) *Landmark { s.Type = &v return s } // SetX sets the X field's value. func (s *Landmark) SetX(v float64) *Landmark { s.X = &v return s } // SetY sets the Y field's value. func (s *Landmark) SetY(v float64) *Landmark { s.Y = &v return s } type ListCollectionsInput struct { _ struct{} `type:"structure"` // Maximum number of collection IDs to return. MaxResults *int64 `type:"integer"` // Pagination token from the previous response. NextToken *string `type:"string"` } // String returns the string representation func (s ListCollectionsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListCollectionsInput) GoString() string { return s.String() } // SetMaxResults sets the MaxResults field's value. func (s *ListCollectionsInput) SetMaxResults(v int64) *ListCollectionsInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. func (s *ListCollectionsInput) SetNextToken(v string) *ListCollectionsInput { s.NextToken = &v return s } type ListCollectionsOutput struct { _ struct{} `type:"structure"` // An array of collection IDs. CollectionIds []*string `type:"list"` // If the result is truncated, the response provides a NextToken that you can // use in the subsequent request to fetch the next set of collection IDs. NextToken *string `type:"string"` } // String returns the string representation func (s ListCollectionsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListCollectionsOutput) GoString() string { return s.String() } // SetCollectionIds sets the CollectionIds field's value. func (s *ListCollectionsOutput) SetCollectionIds(v []*string) *ListCollectionsOutput { s.CollectionIds = v return s } // SetNextToken sets the NextToken field's value. func (s *ListCollectionsOutput) SetNextToken(v string) *ListCollectionsOutput { s.NextToken = &v return s } type ListFacesInput struct { _ struct{} `type:"structure"` // ID of the collection from which to list the faces. // // CollectionId is a required field CollectionId *string `min:"1" type:"string" required:"true"` // Maximum number of faces to return. MaxResults *int64 `type:"integer"` // If the previous response was incomplete (because there is more data to retrieve), // Amazon Rekognition returns a pagination token in the response. You can use // this pagination token to retrieve the next set of faces. NextToken *string `type:"string"` } // String returns the string representation func (s ListFacesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListFacesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *ListFacesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "ListFacesInput"} if s.CollectionId == nil { invalidParams.Add(request.NewErrParamRequired("CollectionId")) } if s.CollectionId != nil && len(*s.CollectionId) < 1 { invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCollectionId sets the CollectionId field's value. func (s *ListFacesInput) SetCollectionId(v string) *ListFacesInput { s.CollectionId = &v return s } // SetMaxResults sets the MaxResults field's value. func (s *ListFacesInput) SetMaxResults(v int64) *ListFacesInput { s.MaxResults = &v return s } // SetNextToken sets the NextToken field's value. func (s *ListFacesInput) SetNextToken(v string) *ListFacesInput { s.NextToken = &v return s } type ListFacesOutput struct { _ struct{} `type:"structure"` // An array of Face objects. Faces []*Face `type:"list"` // If the response is truncated, Amazon Rekognition returns this token that // you can use in the subsequent request to retrieve the next set of faces. NextToken *string `type:"string"` } // String returns the string representation func (s ListFacesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ListFacesOutput) GoString() string { return s.String() } // SetFaces sets the Faces field's value. func (s *ListFacesOutput) SetFaces(v []*Face) *ListFacesOutput { s.Faces = v return s } // SetNextToken sets the NextToken field's value. func (s *ListFacesOutput) SetNextToken(v string) *ListFacesOutput { s.NextToken = &v return s } // Provides information about a single type of moderated content found in an // image. Each type of moderated content has a label within a hierarchical taxonomy. // For more information, see image-moderation. type ModerationLabel struct { _ struct{} `type:"structure"` // Specifies the confidence that Amazon Rekognition has that the label has been // correctly identified. // // If you don't specify the MinConfidence parameter in the call to DetectModerationLabels, // the operation returns labels with a confidence value greater than or equal // to 50 percent. Confidence *float64 `type:"float"` // The label name for the type of content detected in the image. Name *string `type:"string"` // The name for the parent label. Labels at the top-level of the hierarchy have // the parent label "". ParentName *string `type:"string"` } // String returns the string representation func (s ModerationLabel) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s ModerationLabel) GoString() string { return s.String() } // SetConfidence sets the Confidence field's value. func (s *ModerationLabel) SetConfidence(v float64) *ModerationLabel { s.Confidence = &v return s } // SetName sets the Name field's value. func (s *ModerationLabel) SetName(v string) *ModerationLabel { s.Name = &v return s } // SetParentName sets the ParentName field's value. func (s *ModerationLabel) SetParentName(v string) *ModerationLabel { s.ParentName = &v return s } // Indicates whether or not the mouth on the face is open, and the confidence // level in the determination. type MouthOpen struct { _ struct{} `type:"structure"` // Level of confidence in the determination. Confidence *float64 `type:"float"` // Boolean value that indicates whether the mouth on the face is open or not. Value *bool `type:"boolean"` } // String returns the string representation func (s MouthOpen) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s MouthOpen) GoString() string { return s.String() } // SetConfidence sets the Confidence field's value. func (s *MouthOpen) SetConfidence(v float64) *MouthOpen { s.Confidence = &v return s } // SetValue sets the Value field's value. func (s *MouthOpen) SetValue(v bool) *MouthOpen { s.Value = &v return s } // Indicates whether or not the face has a mustache, and the confidence level // in the determination. type Mustache struct { _ struct{} `type:"structure"` // Level of confidence in the determination. Confidence *float64 `type:"float"` // Boolean value that indicates whether the face has mustache or not. Value *bool `type:"boolean"` } // String returns the string representation func (s Mustache) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Mustache) GoString() string { return s.String() } // SetConfidence sets the Confidence field's value. func (s *Mustache) SetConfidence(v float64) *Mustache { s.Confidence = &v return s } // SetValue sets the Value field's value. func (s *Mustache) SetValue(v bool) *Mustache { s.Value = &v return s } // Indicates the pose of the face as determined by its pitch, roll, and yaw. type Pose struct { _ struct{} `type:"structure"` // Value representing the face rotation on the pitch axis. Pitch *float64 `type:"float"` // Value representing the face rotation on the roll axis. Roll *float64 `type:"float"` // Value representing the face rotation on the yaw axis. Yaw *float64 `type:"float"` } // String returns the string representation func (s Pose) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Pose) GoString() string { return s.String() } // SetPitch sets the Pitch field's value. func (s *Pose) SetPitch(v float64) *Pose { s.Pitch = &v return s } // SetRoll sets the Roll field's value. func (s *Pose) SetRoll(v float64) *Pose { s.Roll = &v return s } // SetYaw sets the Yaw field's value. func (s *Pose) SetYaw(v float64) *Pose { s.Yaw = &v return s } type RecognizeCelebritiesInput struct { _ struct{} `type:"structure"` // The input image to use for celebrity recognition. // // Image is a required field Image *Image `type:"structure" required:"true"` } // String returns the string representation func (s RecognizeCelebritiesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RecognizeCelebritiesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *RecognizeCelebritiesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "RecognizeCelebritiesInput"} if s.Image == nil { invalidParams.Add(request.NewErrParamRequired("Image")) } if s.Image != nil { if err := s.Image.Validate(); err != nil { invalidParams.AddNested("Image", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetImage sets the Image field's value. func (s *RecognizeCelebritiesInput) SetImage(v *Image) *RecognizeCelebritiesInput { s.Image = v return s } type RecognizeCelebritiesOutput struct { _ struct{} `type:"structure"` // Details about each celebrity found in the image. Amazon Rekognition can detect // a maximum of 15 celebrities in an image. CelebrityFaces []*Celebrity `type:"list"` // The orientation of the input image (counterclockwise direction). If your // application displays the image, you can use this value to correct the orientation. // The bounding box coordinates returned in CelebrityFaces and UnrecognizedFaces // represent face locations before the image orientation is corrected. // // If the input image is in .jpeg format, it might contain exchangeable image // (Exif) metadata that includes the image's orientation. If so, and the Exif // metadata for the input image populates the orientation field, the value of // OrientationCorrection is null and the CelebrityFaces and UnrecognizedFaces // bounding box coordinates represent face locations after Exif metadata is // used to correct the image orientation. Images in .png format don't contain // Exif metadata. OrientationCorrection *string `type:"string" enum:"OrientationCorrection"` // Details about each unrecognized face in the image. UnrecognizedFaces []*ComparedFace `type:"list"` } // String returns the string representation func (s RecognizeCelebritiesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s RecognizeCelebritiesOutput) GoString() string { return s.String() } // SetCelebrityFaces sets the CelebrityFaces field's value. func (s *RecognizeCelebritiesOutput) SetCelebrityFaces(v []*Celebrity) *RecognizeCelebritiesOutput { s.CelebrityFaces = v return s } // SetOrientationCorrection sets the OrientationCorrection field's value. func (s *RecognizeCelebritiesOutput) SetOrientationCorrection(v string) *RecognizeCelebritiesOutput { s.OrientationCorrection = &v return s } // SetUnrecognizedFaces sets the UnrecognizedFaces field's value. func (s *RecognizeCelebritiesOutput) SetUnrecognizedFaces(v []*ComparedFace) *RecognizeCelebritiesOutput { s.UnrecognizedFaces = v return s } // Provides the S3 bucket name and object name. // // The region for the S3 bucket containing the S3 object must match the region // you use for Amazon Rekognition operations. // // For Amazon Rekognition to process an S3 object, the user must have permission // to access the S3 object. For more information, see manage-access-resource-policies. type S3Object struct { _ struct{} `type:"structure"` // Name of the S3 bucket. Bucket *string `min:"3" type:"string"` // S3 object key name. Name *string `min:"1" type:"string"` // If the bucket is versioning enabled, you can specify the object version. Version *string `min:"1" type:"string"` } // String returns the string representation func (s S3Object) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s S3Object) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *S3Object) Validate() error { invalidParams := request.ErrInvalidParams{Context: "S3Object"} if s.Bucket != nil && len(*s.Bucket) < 3 { invalidParams.Add(request.NewErrParamMinLen("Bucket", 3)) } if s.Name != nil && len(*s.Name) < 1 { invalidParams.Add(request.NewErrParamMinLen("Name", 1)) } if s.Version != nil && len(*s.Version) < 1 { invalidParams.Add(request.NewErrParamMinLen("Version", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetBucket sets the Bucket field's value. func (s *S3Object) SetBucket(v string) *S3Object { s.Bucket = &v return s } // SetName sets the Name field's value. func (s *S3Object) SetName(v string) *S3Object { s.Name = &v return s } // SetVersion sets the Version field's value. func (s *S3Object) SetVersion(v string) *S3Object { s.Version = &v return s } type SearchFacesByImageInput struct { _ struct{} `type:"structure"` // ID of the collection to search. // // CollectionId is a required field CollectionId *string `min:"1" type:"string" required:"true"` // (Optional) Specifies the minimum confidence in the face match to return. // For example, don't return any matches where confidence in matches is less // than 70%. FaceMatchThreshold *float64 `type:"float"` // The input image as bytes or an S3 object. // // Image is a required field Image *Image `type:"structure" required:"true"` // Maximum number of faces to return. The operation returns the maximum number // of faces with the highest confidence in the match. MaxFaces *int64 `min:"1" type:"integer"` } // String returns the string representation func (s SearchFacesByImageInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s SearchFacesByImageInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *SearchFacesByImageInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "SearchFacesByImageInput"} if s.CollectionId == nil { invalidParams.Add(request.NewErrParamRequired("CollectionId")) } if s.CollectionId != nil && len(*s.CollectionId) < 1 { invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1)) } if s.Image == nil { invalidParams.Add(request.NewErrParamRequired("Image")) } if s.MaxFaces != nil && *s.MaxFaces < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxFaces", 1)) } if s.Image != nil { if err := s.Image.Validate(); err != nil { invalidParams.AddNested("Image", err.(request.ErrInvalidParams)) } } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCollectionId sets the CollectionId field's value. func (s *SearchFacesByImageInput) SetCollectionId(v string) *SearchFacesByImageInput { s.CollectionId = &v return s } // SetFaceMatchThreshold sets the FaceMatchThreshold field's value. func (s *SearchFacesByImageInput) SetFaceMatchThreshold(v float64) *SearchFacesByImageInput { s.FaceMatchThreshold = &v return s } // SetImage sets the Image field's value. func (s *SearchFacesByImageInput) SetImage(v *Image) *SearchFacesByImageInput { s.Image = v return s } // SetMaxFaces sets the MaxFaces field's value. func (s *SearchFacesByImageInput) SetMaxFaces(v int64) *SearchFacesByImageInput { s.MaxFaces = &v return s } type SearchFacesByImageOutput struct { _ struct{} `type:"structure"` // An array of faces that match the input face, along with the confidence in // the match. FaceMatches []*FaceMatch `type:"list"` // The bounding box around the face in the input image that Amazon Rekognition // used for the search. SearchedFaceBoundingBox *BoundingBox `type:"structure"` // The level of confidence that the searchedFaceBoundingBox, contains a face. SearchedFaceConfidence *float64 `type:"float"` } // String returns the string representation func (s SearchFacesByImageOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s SearchFacesByImageOutput) GoString() string { return s.String() } // SetFaceMatches sets the FaceMatches field's value. func (s *SearchFacesByImageOutput) SetFaceMatches(v []*FaceMatch) *SearchFacesByImageOutput { s.FaceMatches = v return s } // SetSearchedFaceBoundingBox sets the SearchedFaceBoundingBox field's value. func (s *SearchFacesByImageOutput) SetSearchedFaceBoundingBox(v *BoundingBox) *SearchFacesByImageOutput { s.SearchedFaceBoundingBox = v return s } // SetSearchedFaceConfidence sets the SearchedFaceConfidence field's value. func (s *SearchFacesByImageOutput) SetSearchedFaceConfidence(v float64) *SearchFacesByImageOutput { s.SearchedFaceConfidence = &v return s } type SearchFacesInput struct { _ struct{} `type:"structure"` // ID of the collection the face belongs to. // // CollectionId is a required field CollectionId *string `min:"1" type:"string" required:"true"` // ID of a face to find matches for in the collection. // // FaceId is a required field FaceId *string `type:"string" required:"true"` // Optional value specifying the minimum confidence in the face match to return. // For example, don't return any matches where confidence in matches is less // than 70%. FaceMatchThreshold *float64 `type:"float"` // Maximum number of faces to return. The operation returns the maximum number // of faces with the highest confidence in the match. MaxFaces *int64 `min:"1" type:"integer"` } // String returns the string representation func (s SearchFacesInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s SearchFacesInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. func (s *SearchFacesInput) Validate() error { invalidParams := request.ErrInvalidParams{Context: "SearchFacesInput"} if s.CollectionId == nil { invalidParams.Add(request.NewErrParamRequired("CollectionId")) } if s.CollectionId != nil && len(*s.CollectionId) < 1 { invalidParams.Add(request.NewErrParamMinLen("CollectionId", 1)) } if s.FaceId == nil { invalidParams.Add(request.NewErrParamRequired("FaceId")) } if s.MaxFaces != nil && *s.MaxFaces < 1 { invalidParams.Add(request.NewErrParamMinValue("MaxFaces", 1)) } if invalidParams.Len() > 0 { return invalidParams } return nil } // SetCollectionId sets the CollectionId field's value. func (s *SearchFacesInput) SetCollectionId(v string) *SearchFacesInput { s.CollectionId = &v return s } // SetFaceId sets the FaceId field's value. func (s *SearchFacesInput) SetFaceId(v string) *SearchFacesInput { s.FaceId = &v return s } // SetFaceMatchThreshold sets the FaceMatchThreshold field's value. func (s *SearchFacesInput) SetFaceMatchThreshold(v float64) *SearchFacesInput { s.FaceMatchThreshold = &v return s } // SetMaxFaces sets the MaxFaces field's value. func (s *SearchFacesInput) SetMaxFaces(v int64) *SearchFacesInput { s.MaxFaces = &v return s } type SearchFacesOutput struct { _ struct{} `type:"structure"` // An array of faces that matched the input face, along with the confidence // in the match. FaceMatches []*FaceMatch `type:"list"` // ID of the face that was searched for matches in a collection. SearchedFaceId *string `type:"string"` } // String returns the string representation func (s SearchFacesOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s SearchFacesOutput) GoString() string { return s.String() } // SetFaceMatches sets the FaceMatches field's value. func (s *SearchFacesOutput) SetFaceMatches(v []*FaceMatch) *SearchFacesOutput { s.FaceMatches = v return s } // SetSearchedFaceId sets the SearchedFaceId field's value. func (s *SearchFacesOutput) SetSearchedFaceId(v string) *SearchFacesOutput { s.SearchedFaceId = &v return s } // Indicates whether or not the face is smiling, and the confidence level in // the determination. type Smile struct { _ struct{} `type:"structure"` // Level of confidence in the determination. Confidence *float64 `type:"float"` // Boolean value that indicates whether the face is smiling or not. Value *bool `type:"boolean"` } // String returns the string representation func (s Smile) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Smile) GoString() string { return s.String() } // SetConfidence sets the Confidence field's value. func (s *Smile) SetConfidence(v float64) *Smile { s.Confidence = &v return s } // SetValue sets the Value field's value. func (s *Smile) SetValue(v bool) *Smile { s.Value = &v return s } // Indicates whether or not the face is wearing sunglasses, and the confidence // level in the determination. type Sunglasses struct { _ struct{} `type:"structure"` // Level of confidence in the determination. Confidence *float64 `type:"float"` // Boolean value that indicates whether the face is wearing sunglasses or not. Value *bool `type:"boolean"` } // String returns the string representation func (s Sunglasses) String() string { return awsutil.Prettify(s) } // GoString returns the string representation func (s Sunglasses) GoString() string { return s.String() } // SetConfidence sets the Confidence field's value. func (s *Sunglasses) SetConfidence(v float64) *Sunglasses { s.Confidence = &v return s } // SetValue sets the Value field's value. func (s *Sunglasses) SetValue(v bool) *Sunglasses { s.Value = &v return s } const ( // AttributeDefault is a Attribute enum value AttributeDefault = "DEFAULT" // AttributeAll is a Attribute enum value AttributeAll = "ALL" ) const ( // EmotionNameHappy is a EmotionName enum value EmotionNameHappy = "HAPPY" // EmotionNameSad is a EmotionName enum value EmotionNameSad = "SAD" // EmotionNameAngry is a EmotionName enum value EmotionNameAngry = "ANGRY" // EmotionNameConfused is a EmotionName enum value EmotionNameConfused = "CONFUSED" // EmotionNameDisgusted is a EmotionName enum value EmotionNameDisgusted = "DISGUSTED" // EmotionNameSurprised is a EmotionName enum value EmotionNameSurprised = "SURPRISED" // EmotionNameCalm is a EmotionName enum value EmotionNameCalm = "CALM" // EmotionNameUnknown is a EmotionName enum value EmotionNameUnknown = "UNKNOWN" ) const ( // GenderTypeMale is a GenderType enum value GenderTypeMale = "MALE" // GenderTypeFemale is a GenderType enum value GenderTypeFemale = "FEMALE" ) const ( // LandmarkTypeEyeLeft is a LandmarkType enum value LandmarkTypeEyeLeft = "EYE_LEFT" // LandmarkTypeEyeRight is a LandmarkType enum value LandmarkTypeEyeRight = "EYE_RIGHT" // LandmarkTypeNose is a LandmarkType enum value LandmarkTypeNose = "NOSE" // LandmarkTypeMouthLeft is a LandmarkType enum value LandmarkTypeMouthLeft = "MOUTH_LEFT" // LandmarkTypeMouthRight is a LandmarkType enum value LandmarkTypeMouthRight = "MOUTH_RIGHT" // LandmarkTypeLeftEyebrowLeft is a LandmarkType enum value LandmarkTypeLeftEyebrowLeft = "LEFT_EYEBROW_LEFT" // LandmarkTypeLeftEyebrowRight is a LandmarkType enum value LandmarkTypeLeftEyebrowRight = "LEFT_EYEBROW_RIGHT" // LandmarkTypeLeftEyebrowUp is a LandmarkType enum value LandmarkTypeLeftEyebrowUp = "LEFT_EYEBROW_UP" // LandmarkTypeRightEyebrowLeft is a LandmarkType enum value LandmarkTypeRightEyebrowLeft = "RIGHT_EYEBROW_LEFT" // LandmarkTypeRightEyebrowRight is a LandmarkType enum value LandmarkTypeRightEyebrowRight = "RIGHT_EYEBROW_RIGHT" // LandmarkTypeRightEyebrowUp is a LandmarkType enum value LandmarkTypeRightEyebrowUp = "RIGHT_EYEBROW_UP" // LandmarkTypeLeftEyeLeft is a LandmarkType enum value LandmarkTypeLeftEyeLeft = "LEFT_EYE_LEFT" // LandmarkTypeLeftEyeRight is a LandmarkType enum value LandmarkTypeLeftEyeRight = "LEFT_EYE_RIGHT" // LandmarkTypeLeftEyeUp is a LandmarkType enum value LandmarkTypeLeftEyeUp = "LEFT_EYE_UP" // LandmarkTypeLeftEyeDown is a LandmarkType enum value LandmarkTypeLeftEyeDown = "LEFT_EYE_DOWN" // LandmarkTypeRightEyeLeft is a LandmarkType enum value LandmarkTypeRightEyeLeft = "RIGHT_EYE_LEFT" // LandmarkTypeRightEyeRight is a LandmarkType enum value LandmarkTypeRightEyeRight = "RIGHT_EYE_RIGHT" // LandmarkTypeRightEyeUp is a LandmarkType enum value LandmarkTypeRightEyeUp = "RIGHT_EYE_UP" // LandmarkTypeRightEyeDown is a LandmarkType enum value LandmarkTypeRightEyeDown = "RIGHT_EYE_DOWN" // LandmarkTypeNoseLeft is a LandmarkType enum value LandmarkTypeNoseLeft = "NOSE_LEFT" // LandmarkTypeNoseRight is a LandmarkType enum value LandmarkTypeNoseRight = "NOSE_RIGHT" // LandmarkTypeMouthUp is a LandmarkType enum value LandmarkTypeMouthUp = "MOUTH_UP" // LandmarkTypeMouthDown is a LandmarkType enum value LandmarkTypeMouthDown = "MOUTH_DOWN" // LandmarkTypeLeftPupil is a LandmarkType enum value LandmarkTypeLeftPupil = "LEFT_PUPIL" // LandmarkTypeRightPupil is a LandmarkType enum value LandmarkTypeRightPupil = "RIGHT_PUPIL" ) const ( // OrientationCorrectionRotate0 is a OrientationCorrection enum value OrientationCorrectionRotate0 = "ROTATE_0" // OrientationCorrectionRotate90 is a OrientationCorrection enum value OrientationCorrectionRotate90 = "ROTATE_90" // OrientationCorrectionRotate180 is a OrientationCorrection enum value OrientationCorrectionRotate180 = "ROTATE_180" // OrientationCorrectionRotate270 is a OrientationCorrection enum value OrientationCorrectionRotate270 = "ROTATE_270" )