service/s3/api_op_CopyObject.go (247 lines of code) (raw):
// Code generated by smithy-go-codegen DO NOT EDIT.
package s3
import (
"context"
"fmt"
awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/aws/signer/v4"
s3cust "github.com/aws/aws-sdk-go-v2/service/s3/internal/customizations"
"github.com/aws/aws-sdk-go-v2/service/s3/types"
"github.com/aws/smithy-go/middleware"
"github.com/aws/smithy-go/ptr"
smithyhttp "github.com/aws/smithy-go/transport/http"
"time"
)
// Creates a copy of an object that is already stored in Amazon S3.
//
// You can store individual objects of up to 5 TB in Amazon S3. You create a copy
// of your object up to 5 GB in size in a single atomic action using this API.
// However, to copy an object greater than 5 GB, you must use the multipart upload
// Upload Part - Copy (UploadPartCopy) API. For more information, see [Copy Object Using the REST Multipart Upload API].
//
// You can copy individual objects between general purpose buckets, between
// directory buckets, and between general purpose buckets and directory buckets.
//
// - Amazon S3 supports copy operations using Multi-Region Access Points only as
// a destination when using the Multi-Region Access Point ARN.
//
// - Directory buckets - For directory buckets, you must make requests for this
// API operation to the Zonal endpoint. These endpoints support
// virtual-hosted-style requests in the format
// https://amzn-s3-demo-bucket.s3express-zone-id.region-code.amazonaws.com/key-name
// . Path-style requests are not supported. For more information about endpoints
// in Availability Zones, see [Regional and Zonal endpoints for directory buckets in Availability Zones]in the Amazon S3 User Guide. For more information
// about endpoints in Local Zones, see [Concepts for directory buckets in Local Zones]in the Amazon S3 User Guide.
//
// - VPC endpoints don't support cross-Region requests (including copies). If
// you're using VPC endpoints, your source and destination buckets should be in the
// same Amazon Web Services Region as your VPC endpoint.
//
// Both the Region that you want to copy the object from and the Region that you
// want to copy the object to must be enabled for your account. For more
// information about how to enable a Region for your account, see [Enable or disable a Region for standalone accounts]in the Amazon
// Web Services Account Management Guide.
//
// Amazon S3 transfer acceleration does not support cross-Region copies. If you
// request a cross-Region copy using a transfer acceleration endpoint, you get a
// 400 Bad Request error. For more information, see [Transfer Acceleration].
//
// Authentication and authorization All CopyObject requests must be authenticated
// and signed by using IAM credentials (access key ID and secret access key for the
// IAM identities). All headers with the x-amz- prefix, including x-amz-copy-source
// , must be signed. For more information, see [REST Authentication].
//
// Directory buckets - You must use the IAM credentials to authenticate and
// authorize your access to the CopyObject API operation, instead of using the
// temporary security credentials through the CreateSession API operation.
//
// Amazon Web Services CLI or SDKs handles authentication and authorization on
// your behalf.
//
// Permissions You must have read access to the source object and write access to
// the destination bucket.
//
// - General purpose bucket permissions - You must have permissions in an IAM
// policy based on the source and destination bucket types in a CopyObject
// operation.
//
// - If the source object is in a general purpose bucket, you must have
// s3:GetObject permission to read the source object that is being copied.
//
// - If the destination bucket is a general purpose bucket, you must have
// s3:PutObject permission to write the object copy to the destination bucket.
//
// - Directory bucket permissions - You must have permissions in a bucket policy
// or an IAM identity-based policy based on the source and destination bucket types
// in a CopyObject operation.
//
// - If the source object that you want to copy is in a directory bucket, you
// must have the s3express:CreateSession permission in the Action element of a
// policy to read the object. By default, the session is in the ReadWrite mode.
// If you want to restrict the access, you can explicitly set the
// s3express:SessionMode condition key to ReadOnly on the copy source bucket.
//
// - If the copy destination is a directory bucket, you must have the
// s3express:CreateSession permission in the Action element of a policy to write
// the object to the destination. The s3express:SessionMode condition key can't
// be set to ReadOnly on the copy destination bucket.
//
// If the object is encrypted with SSE-KMS, you must also have the
//
// kms:GenerateDataKey and kms:Decrypt permissions in IAM identity-based policies
// and KMS key policies for the KMS key.
//
// For example policies, see [Example bucket policies for S3 Express One Zone]and [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]in the Amazon S3 User Guide.
//
// Response and special errors When the request is an HTTP 1.1 request, the
// response is chunk encoded. When the request is not an HTTP 1.1 request, the
// response would not contain the Content-Length . You always need to read the
// entire response body to check if the copy succeeds.
//
// - If the copy is successful, you receive a response with information about
// the copied object.
//
// - A copy request might return an error when Amazon S3 receives the copy
// request or while Amazon S3 is copying the files. A 200 OK response can contain
// either a success or an error.
//
// - If the error occurs before the copy action starts, you receive a standard
// Amazon S3 error.
//
// - If the error occurs during the copy operation, the error response is
// embedded in the 200 OK response. For example, in a cross-region copy, you may
// encounter throttling and receive a 200 OK response. For more information, see [Resolve the Error 200 response when copying objects to Amazon S3]
// . The 200 OK status code means the copy was accepted, but it doesn't mean the
// copy is complete. Another example is when you disconnect from Amazon S3 before
// the copy is complete, Amazon S3 might cancel the copy and you may receive a
// 200 OK response. You must stay connected to Amazon S3 until the entire
// response is successfully received and processed.
//
// If you call this API operation directly, make sure to design your application
//
// to parse the content of the response and handle it appropriately. If you use
// Amazon Web Services SDKs, SDKs handle this condition. The SDKs detect the
// embedded error and apply error handling per your configuration settings
// (including automatically retrying the request as appropriate). If the condition
// persists, the SDKs throw an exception (or, for the SDKs that don't use
// exceptions, they return an error).
//
// Charge The copy request charge is based on the storage class and Region that
// you specify for the destination object. The request can also result in a data
// retrieval charge for the source if the source storage class bills for data
// retrieval. If the copy source is in a different region, the data transfer is
// billed to the copy source account. For pricing information, see [Amazon S3 pricing].
//
// HTTP Host header syntax
//
// - Directory buckets - The HTTP Host header syntax is
// Bucket-name.s3express-zone-id.region-code.amazonaws.com .
//
// - Amazon S3 on Outposts - When you use this action with S3 on Outposts
// through the REST API, you must direct requests to the S3 on Outposts hostname.
// The S3 on Outposts hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . The
// hostname isn't required when you use the Amazon Web Services CLI or SDKs.
//
// The following operations are related to CopyObject :
//
// [PutObject]
//
// [GetObject]
//
// [Concepts for directory buckets in Local Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-lzs-for-directory-buckets.html
// [Amazon Web Services Identity and Access Management (IAM) identity-based policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-identity-policies.html
// [Resolve the Error 200 response when copying objects to Amazon S3]: https://repost.aws/knowledge-center/s3-resolve-200-internalerror
// [Copy Object Using the REST Multipart Upload API]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html
// [REST Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html
// [Example bucket policies for S3 Express One Zone]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-security-iam-example-bucket-policies.html
// [Enable or disable a Region for standalone accounts]: https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-regions.html#manage-acct-regions-enable-standalone
// [Transfer Acceleration]: https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html
// [PutObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
// [GetObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html
// [Regional and Zonal endpoints for directory buckets in Availability Zones]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/endpoint-directory-buckets-AZ.html
// [Amazon S3 pricing]: http://aws.amazon.com/s3/pricing/
func (c *Client) CopyObject(ctx context.Context, params *CopyObjectInput, optFns ...func(*Options)) (*CopyObjectOutput, error) {
if params == nil {
params = &CopyObjectInput{}
}
result, metadata, err := c.invokeOperation(ctx, "CopyObject", params, optFns, c.addOperationCopyObjectMiddlewares)
if err != nil {
return nil, err
}
out := result.(*CopyObjectOutput)
out.ResultMetadata = metadata
return out, nil
}
type CopyObjectInput struct {
// The name of the destination bucket.
//
// Directory buckets - When you use this operation with a directory bucket, you
// must use virtual-hosted-style requests in the format
// Bucket-name.s3express-zone-id.region-code.amazonaws.com . Path-style requests
// are not supported. Directory bucket names must be unique in the chosen Zone
// (Availability Zone or Local Zone). Bucket names must follow the format
// bucket-base-name--zone-id--x-s3 (for example,
// amzn-s3-demo-bucket--usw2-az1--x-s3 ). For information about bucket naming
// restrictions, see [Directory bucket naming rules]in the Amazon S3 User Guide.
//
// Copying objects across different Amazon Web Services Regions isn't supported
// when the source or destination bucket is in Amazon Web Services Local Zones. The
// source and destination buckets must have the same parent Amazon Web Services
// Region. Otherwise, you get an HTTP 400 Bad Request error with the error code
// InvalidRequest .
//
// Access points - When you use this action with an access point for general
// purpose buckets, you must provide the alias of the access point in place of the
// bucket name or specify the access point ARN. When you use this action with an
// access point for directory buckets, you must provide the access point name in
// place of the bucket name. When using the access point ARN, you must direct
// requests to the access point hostname. The access point hostname takes the form
// AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using this
// action with an access point through the Amazon Web Services SDKs, you provide
// the access point ARN in place of the bucket name. For more information about
// access point ARNs, see [Using access points]in the Amazon S3 User Guide.
//
// Object Lambda access points are not supported by directory buckets.
//
// S3 on Outposts - When you use this action with S3 on Outposts, you must use the
// Outpost bucket access point ARN or the access point alias for the destination
// bucket.
//
// You can only copy objects within the same Outpost bucket. It's not supported to
// copy objects across different Amazon Web Services Outposts, between buckets on
// the same Outposts, or between Outposts buckets and any other bucket types. For
// more information about S3 on Outposts, see [What is S3 on Outposts?]in the S3 on Outposts guide. When
// you use this action with S3 on Outposts through the REST API, you must direct
// requests to the S3 on Outposts hostname, in the format
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com . The
// hostname isn't required when you use the Amazon Web Services CLI or SDKs.
//
// [Directory bucket naming rules]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/directory-bucket-naming-rules.html
// [What is S3 on Outposts?]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html
// [Using access points]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html
//
// This member is required.
Bucket *string
// Specifies the source object for the copy operation. The source object can be up
// to 5 GB. If the source object is an object that was uploaded by using a
// multipart upload, the object copy will be a single part object after the source
// object is copied to the destination bucket.
//
// You specify the value of the copy source in one of two formats, depending on
// whether you want to access the source object through an [access point]:
//
// - For objects not accessed through an access point, specify the name of the
// source bucket and the key of the source object, separated by a slash (/). For
// example, to copy the object reports/january.pdf from the general purpose
// bucket awsexamplebucket , use awsexamplebucket/reports/january.pdf . The value
// must be URL-encoded. To copy the object reports/january.pdf from the directory
// bucket awsexamplebucket--use1-az5--x-s3 , use
// awsexamplebucket--use1-az5--x-s3/reports/january.pdf . The value must be
// URL-encoded.
//
// - For objects accessed through access points, specify the Amazon Resource
// Name (ARN) of the object as accessed through the access point, in the format
// arn:aws:s3:::accesspoint//object/ . For example, to copy the object
// reports/january.pdf through access point my-access-point owned by account
// 123456789012 in Region us-west-2 , use the URL encoding of
// arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf
// . The value must be URL encoded.
//
// - Amazon S3 supports copy operations using Access points only when the source
// and destination buckets are in the same Amazon Web Services Region.
//
// - Access points are not supported by directory buckets.
//
// Alternatively, for objects accessed through Amazon S3 on Outposts, specify the
// ARN of the object as accessed in the format
// arn:aws:s3-outposts:::outpost//object/ . For example, to copy the object
// reports/january.pdf through outpost my-outpost owned by account 123456789012
// in Region us-west-2 , use the URL encoding of
// arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf
// . The value must be URL-encoded.
//
// If your source bucket versioning is enabled, the x-amz-copy-source header by
// default identifies the current version of an object to copy. If the current
// version is a delete marker, Amazon S3 behaves as if the object was deleted. To
// copy a different version, use the versionId query parameter. Specifically,
// append ?versionId= to the value (for example,
// awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893
// ). If you don't specify a version ID, Amazon S3 copies the latest version of the
// source object.
//
// If you enable versioning on the destination bucket, Amazon S3 generates a
// unique version ID for the copied object. This version ID is different from the
// version ID of the source object. Amazon S3 returns the version ID of the copied
// object in the x-amz-version-id response header in the response.
//
// If you do not enable versioning or suspend it on the destination bucket, the
// version ID that Amazon S3 generates in the x-amz-version-id response header is
// always null.
//
// Directory buckets - S3 Versioning isn't enabled and supported for directory
// buckets.
//
// [access point]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-points.html
//
// This member is required.
CopySource *string
// The key of the destination object.
//
// This member is required.
Key *string
// The canned access control list (ACL) to apply to the object.
//
// When you copy an object, the ACL metadata is not preserved and is set to private
// by default. Only the owner has full access control. To override the default ACL
// setting, specify a new ACL when you generate a copy request. For more
// information, see [Using ACLs].
//
// If the destination bucket that you're copying objects to uses the bucket owner
// enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect
// permissions. Buckets that use this setting only accept PUT requests that don't
// specify an ACL or PUT requests that specify bucket owner full control ACLs,
// such as the bucket-owner-full-control canned ACL or an equivalent form of this
// ACL expressed in the XML format. For more information, see [Controlling ownership of objects and disabling ACLs]in the Amazon S3
// User Guide.
//
// - If your destination bucket uses the bucket owner enforced setting for
// Object Ownership, all objects written to the bucket by any account will be owned
// by the bucket owner.
//
// - This functionality is not supported for directory buckets.
//
// - This functionality is not supported for Amazon S3 on Outposts.
//
// [Using ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html
// [Controlling ownership of objects and disabling ACLs]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html
ACL types.ObjectCannedACL
// Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption
// with server-side encryption using Key Management Service (KMS) keys (SSE-KMS).
// If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object.
//
// Setting this header to true causes Amazon S3 to use an S3 Bucket Key for object
// encryption with SSE-KMS. Specifying this header with a COPY action doesn’t
// affect bucket-level settings for S3 Bucket Key.
//
// For more information, see [Amazon S3 Bucket Keys] in the Amazon S3 User Guide.
//
// Directory buckets - S3 Bucket Keys aren't supported, when you copy SSE-KMS
// encrypted objects from general purpose buckets to directory buckets, from
// directory buckets to general purpose buckets, or between directory buckets,
// through [CopyObject]. In this case, Amazon S3 makes a call to KMS every time a copy request
// is made for a KMS-encrypted object.
//
// [Amazon S3 Bucket Keys]: https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html
// [CopyObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html
BucketKeyEnabled *bool
// Specifies the caching behavior along the request/reply chain.
CacheControl *string
// Indicates the algorithm that you want Amazon S3 to use to create the checksum
// for the object. For more information, see [Checking object integrity]in the Amazon S3 User Guide.
//
// When you copy an object, if the source object has a checksum, that checksum
// value will be copied to the new object by default. If the CopyObject request
// does not include this x-amz-checksum-algorithm header, the checksum algorithm
// will be copied from the source object to the destination object (if it's present
// on the source object). You can optionally specify a different checksum algorithm
// to use with the x-amz-checksum-algorithm header. Unrecognized or unsupported
// values will respond with the HTTP status code 400 Bad Request .
//
// For directory buckets, when you use Amazon Web Services SDKs, CRC32 is the
// default checksum algorithm that's used for performance.
//
// [Checking object integrity]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html
ChecksumAlgorithm types.ChecksumAlgorithm
// Specifies presentational information for the object. Indicates whether an
// object should be displayed in a web browser or downloaded as a file. It allows
// specifying the desired filename for the downloaded file.
ContentDisposition *string
// Specifies what content encodings have been applied to the object and thus what
// decoding mechanisms must be applied to obtain the media-type referenced by the
// Content-Type header field.
//
// For directory buckets, only the aws-chunked value is supported in this header
// field.
ContentEncoding *string
// The language the content is in.
ContentLanguage *string
// A standard MIME type that describes the format of the object data.
ContentType *string
// Copies the object if its entity tag (ETag) matches the specified tag.
//
// If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
// headers are present in the request and evaluate as follows, Amazon S3 returns
// 200 OK and copies the data:
//
// - x-amz-copy-source-if-match condition evaluates to true
//
// - x-amz-copy-source-if-unmodified-since condition evaluates to false
CopySourceIfMatch *string
// Copies the object if it has been modified since the specified time.
//
// If both the x-amz-copy-source-if-none-match and
// x-amz-copy-source-if-modified-since headers are present in the request and
// evaluate as follows, Amazon S3 returns the 412 Precondition Failed response
// code:
//
// - x-amz-copy-source-if-none-match condition evaluates to false
//
// - x-amz-copy-source-if-modified-since condition evaluates to true
CopySourceIfModifiedSince *time.Time
// Copies the object if its entity tag (ETag) is different than the specified ETag.
//
// If both the x-amz-copy-source-if-none-match and
// x-amz-copy-source-if-modified-since headers are present in the request and
// evaluate as follows, Amazon S3 returns the 412 Precondition Failed response
// code:
//
// - x-amz-copy-source-if-none-match condition evaluates to false
//
// - x-amz-copy-source-if-modified-since condition evaluates to true
CopySourceIfNoneMatch *string
// Copies the object if it hasn't been modified since the specified time.
//
// If both the x-amz-copy-source-if-match and x-amz-copy-source-if-unmodified-since
// headers are present in the request and evaluate as follows, Amazon S3 returns
// 200 OK and copies the data:
//
// - x-amz-copy-source-if-match condition evaluates to true
//
// - x-amz-copy-source-if-unmodified-since condition evaluates to false
CopySourceIfUnmodifiedSince *time.Time
// Specifies the algorithm to use when decrypting the source object (for example,
// AES256 ).
//
// If the source object for the copy is stored in Amazon S3 using SSE-C, you must
// provide the necessary encryption information in your request so that Amazon S3
// can decrypt the object for copying.
//
// This functionality is not supported when the source object is in a directory
// bucket.
CopySourceSSECustomerAlgorithm *string
// Specifies the customer-provided encryption key for Amazon S3 to use to decrypt
// the source object. The encryption key provided in this header must be the same
// one that was used when the source object was created.
//
// If the source object for the copy is stored in Amazon S3 using SSE-C, you must
// provide the necessary encryption information in your request so that Amazon S3
// can decrypt the object for copying.
//
// This functionality is not supported when the source object is in a directory
// bucket.
CopySourceSSECustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
// encryption key was transmitted without error.
//
// If the source object for the copy is stored in Amazon S3 using SSE-C, you must
// provide the necessary encryption information in your request so that Amazon S3
// can decrypt the object for copying.
//
// This functionality is not supported when the source object is in a directory
// bucket.
CopySourceSSECustomerKeyMD5 *string
// The account ID of the expected destination bucket owner. If the account ID that
// you provide does not match the actual owner of the destination bucket, the
// request fails with the HTTP status code 403 Forbidden (access denied).
ExpectedBucketOwner *string
// The account ID of the expected source bucket owner. If the account ID that you
// provide does not match the actual owner of the source bucket, the request fails
// with the HTTP status code 403 Forbidden (access denied).
ExpectedSourceBucketOwner *string
// The date and time at which the object is no longer cacheable.
Expires *time.Time
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
//
// - This functionality is not supported for directory buckets.
//
// - This functionality is not supported for Amazon S3 on Outposts.
GrantFullControl *string
// Allows grantee to read the object data and its metadata.
//
// - This functionality is not supported for directory buckets.
//
// - This functionality is not supported for Amazon S3 on Outposts.
GrantRead *string
// Allows grantee to read the object ACL.
//
// - This functionality is not supported for directory buckets.
//
// - This functionality is not supported for Amazon S3 on Outposts.
GrantReadACP *string
// Allows grantee to write the ACL for the applicable object.
//
// - This functionality is not supported for directory buckets.
//
// - This functionality is not supported for Amazon S3 on Outposts.
GrantWriteACP *string
// A map of metadata to store with the object in S3.
Metadata map[string]string
// Specifies whether the metadata is copied from the source object or replaced
// with metadata that's provided in the request. When copying an object, you can
// preserve all metadata (the default) or specify new metadata. If this header
// isn’t specified, COPY is the default behavior.
//
// General purpose bucket - For general purpose buckets, when you grant
// permissions, you can use the s3:x-amz-metadata-directive condition key to
// enforce certain metadata behavior when objects are uploaded. For more
// information, see [Amazon S3 condition key examples]in the Amazon S3 User Guide.
//
// x-amz-website-redirect-location is unique to each object and is not copied when
// using the x-amz-metadata-directive header. To copy the value, you must specify
// x-amz-website-redirect-location in the request header.
//
// [Amazon S3 condition key examples]: https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html
MetadataDirective types.MetadataDirective
// Specifies whether you want to apply a legal hold to the object copy.
//
// This functionality is not supported for directory buckets.
ObjectLockLegalHoldStatus types.ObjectLockLegalHoldStatus
// The Object Lock mode that you want to apply to the object copy.
//
// This functionality is not supported for directory buckets.
ObjectLockMode types.ObjectLockMode
// The date and time when you want the Object Lock of the object copy to expire.
//
// This functionality is not supported for directory buckets.
ObjectLockRetainUntilDate *time.Time
// Confirms that the requester knows that they will be charged for the request.
// Bucket owners need not specify this parameter in their requests. If either the
// source or destination S3 bucket has Requester Pays enabled, the requester will
// pay for corresponding charges to copy the object. For information about
// downloading objects from Requester Pays buckets, see [Downloading Objects in Requester Pays Buckets]in the Amazon S3 User
// Guide.
//
// This functionality is not supported for directory buckets.
//
// [Downloading Objects in Requester Pays Buckets]: https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
RequestPayer types.RequestPayer
// Specifies the algorithm to use when encrypting the object (for example, AES256 ).
//
// When you perform a CopyObject operation, if you want to use a different type of
// encryption setting for the target object, you can specify appropriate
// encryption-related headers to encrypt the target object with an Amazon S3
// managed key, a KMS key, or a customer-provided key. If the encryption setting in
// your request is different from the default encryption configuration of the
// destination bucket, the encryption setting in your request takes precedence.
//
// This functionality is not supported when the destination bucket is a directory
// bucket.
SSECustomerAlgorithm *string
// Specifies the customer-provided encryption key for Amazon S3 to use in
// encrypting data. This value is used to store the object and then it is
// discarded. Amazon S3 does not store the encryption key. The key must be
// appropriate for use with the algorithm specified in the
// x-amz-server-side-encryption-customer-algorithm header.
//
// This functionality is not supported when the destination bucket is a directory
// bucket.
SSECustomerKey *string
// Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
// Amazon S3 uses this header for a message integrity check to ensure that the
// encryption key was transmitted without error.
//
// This functionality is not supported when the destination bucket is a directory
// bucket.
SSECustomerKeyMD5 *string
// Specifies the Amazon Web Services KMS Encryption Context as an additional
// encryption context to use for the destination object encryption. The value of
// this header is a base64-encoded UTF-8 string holding JSON with the encryption
// context key-value pairs.
//
// General purpose buckets - This value must be explicitly added to specify
// encryption context for CopyObject requests if you want an additional encryption
// context for your destination object. The additional encryption context of the
// source object won't be copied to the destination object. For more information,
// see [Encryption context]in the Amazon S3 User Guide.
//
// Directory buckets - You can optionally provide an explicit encryption context
// value. The value must match the default encryption context - the bucket Amazon
// Resource Name (ARN). An additional encryption context value is not supported.
//
// [Encryption context]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingKMSEncryption.html#encryption-context
SSEKMSEncryptionContext *string
// Specifies the KMS key ID (Key ID, Key ARN, or Key Alias) to use for object
// encryption. All GET and PUT requests for an object protected by KMS will fail if
// they're not made via SSL or using SigV4. For information about configuring any
// of the officially supported Amazon Web Services SDKs and Amazon Web Services
// CLI, see [Specifying the Signature Version in Request Authentication]in the Amazon S3 User Guide.
//
// Directory buckets - To encrypt data using SSE-KMS, it's recommended to specify
// the x-amz-server-side-encryption header to aws:kms . Then, the
// x-amz-server-side-encryption-aws-kms-key-id header implicitly uses the bucket's
// default KMS customer managed key ID. If you want to explicitly set the
// x-amz-server-side-encryption-aws-kms-key-id header, it must match the bucket's
// default customer managed key (using key ID or ARN, not alias). Your SSE-KMS
// configuration can only support 1 [customer managed key]per directory bucket's lifetime. The [Amazon Web Services managed key] ( aws/s3
// ) isn't supported.
//
// Incorrect key specification results in an HTTP 400 Bad Request error.
//
// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk
// [Specifying the Signature Version in Request Authentication]: https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version
// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk
SSEKMSKeyId *string
// The server-side encryption algorithm used when storing this object in Amazon
// S3. Unrecognized or unsupported values won’t write a destination object and will
// receive a 400 Bad Request response.
//
// Amazon S3 automatically encrypts all new objects that are copied to an S3
// bucket. When copying an object, if you don't specify encryption information in
// your copy request, the encryption setting of the target object is set to the
// default encryption configuration of the destination bucket. By default, all
// buckets have a base level of encryption configuration that uses server-side
// encryption with Amazon S3 managed keys (SSE-S3). If the destination bucket has a
// different default encryption configuration, Amazon S3 uses the corresponding
// encryption key to encrypt the target object copy.
//
// With server-side encryption, Amazon S3 encrypts your data as it writes your
// data to disks in its data centers and decrypts the data when you access it. For
// more information about server-side encryption, see [Using Server-Side Encryption]in the Amazon S3 User Guide.
//
// General purpose buckets
//
// - For general purpose buckets, there are the following supported options for
// server-side encryption: server-side encryption with Key Management Service (KMS)
// keys (SSE-KMS), dual-layer server-side encryption with Amazon Web Services KMS
// keys (DSSE-KMS), and server-side encryption with customer-provided encryption
// keys (SSE-C). Amazon S3 uses the corresponding KMS key, or a customer-provided
// key to encrypt the target object copy.
//
// - When you perform a CopyObject operation, if you want to use a different type
// of encryption setting for the target object, you can specify appropriate
// encryption-related headers to encrypt the target object with an Amazon S3
// managed key, a KMS key, or a customer-provided key. If the encryption setting in
// your request is different from the default encryption configuration of the
// destination bucket, the encryption setting in your request takes precedence.
//
// Directory buckets
//
// - For directory buckets, there are only two supported options for server-side
// encryption: server-side encryption with Amazon S3 managed keys (SSE-S3) (
// AES256 ) and server-side encryption with KMS keys (SSE-KMS) ( aws:kms ). We
// recommend that the bucket's default encryption uses the desired encryption
// configuration and you don't override the bucket default encryption in your
// CreateSession requests or PUT object requests. Then, new objects are
// automatically encrypted with the desired encryption settings. For more
// information, see [Protecting data with server-side encryption]in the Amazon S3 User Guide. For more information about the
// encryption overriding behaviors in directory buckets, see [Specifying server-side encryption with KMS for new object uploads].
//
// - To encrypt new object copies to a directory bucket with SSE-KMS, we
// recommend you specify SSE-KMS as the directory bucket's default encryption
// configuration with a KMS key (specifically, a [customer managed key]). The [Amazon Web Services managed key]( aws/s3 ) isn't
// supported. Your SSE-KMS configuration can only support 1 [customer managed key]per directory bucket
// for the lifetime of the bucket. After you specify a customer managed key for
// SSE-KMS, you can't override the customer managed key for the bucket's SSE-KMS
// configuration. Then, when you perform a CopyObject operation and want to
// specify server-side encryption settings for new object copies with SSE-KMS in
// the encryption-related request headers, you must ensure the encryption key is
// the same customer managed key that you specified for the directory bucket's
// default encryption configuration.
//
// [Using Server-Side Encryption]: https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html
// [Specifying server-side encryption with KMS for new object uploads]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-specifying-kms-encryption.html
// [customer managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#customer-cmk
// [Protecting data with server-side encryption]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-serv-side-encryption.html
// [Amazon Web Services managed key]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk
ServerSideEncryption types.ServerSideEncryption
// If the x-amz-storage-class header is not used, the copied object will be stored
// in the STANDARD Storage Class by default. The STANDARD storage class provides
// high durability and high availability. Depending on performance needs, you can
// specify a different Storage Class.
//
// - Directory buckets - Directory buckets only support EXPRESS_ONEZONE (the S3
// Express One Zone storage class) in Availability Zones and ONEZONE_IA (the S3
// One Zone-Infrequent Access storage class) in Dedicated Local Zones. Unsupported
// storage class values won't write a destination object and will respond with the
// HTTP status code 400 Bad Request .
//
// - Amazon S3 on Outposts - S3 on Outposts only uses the OUTPOSTS Storage Class.
//
// You can use the CopyObject action to change the storage class of an object that
// is already stored in Amazon S3 by using the x-amz-storage-class header. For
// more information, see [Storage Classes]in the Amazon S3 User Guide.
//
// Before using an object as a source object for the copy operation, you must
// restore a copy of it if it meets any of the following conditions:
//
// - The storage class of the source object is GLACIER or DEEP_ARCHIVE .
//
// - The storage class of the source object is INTELLIGENT_TIERING and it's [S3 Intelligent-Tiering access tier]is
// Archive Access or Deep Archive Access .
//
// For more information, see [RestoreObject] and [Copying Objects] in the Amazon S3 User Guide.
//
// [Storage Classes]: https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html
// [RestoreObject]: https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html
// [Copying Objects]: https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html
// [S3 Intelligent-Tiering access tier]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/intelligent-tiering-overview.html#intel-tiering-tier-definition
StorageClass types.StorageClass
// The tag-set for the object copy in the destination bucket. This value must be
// used in conjunction with the x-amz-tagging-directive if you choose REPLACE for
// the x-amz-tagging-directive . If you choose COPY for the x-amz-tagging-directive
// , you don't need to set the x-amz-tagging header, because the tag-set will be
// copied from the source object directly. The tag-set must be encoded as URL Query
// parameters.
//
// The default value is the empty value.
//
// Directory buckets - For directory buckets in a CopyObject operation, only the
// empty tag-set is supported. Any requests that attempt to write non-empty tags
// into directory buckets will receive a 501 Not Implemented status code. When the
// destination bucket is a directory bucket, you will receive a 501 Not Implemented
// response in any of the following situations:
//
// - When you attempt to COPY the tag-set from an S3 source object that has
// non-empty tags.
//
// - When you attempt to REPLACE the tag-set of a source object and set a
// non-empty value to x-amz-tagging .
//
// - When you don't set the x-amz-tagging-directive header and the source object
// has non-empty tags. This is because the default value of
// x-amz-tagging-directive is COPY .
//
// Because only the empty tag-set is supported for directory buckets in a
// CopyObject operation, the following situations are allowed:
//
// - When you attempt to COPY the tag-set from a directory bucket source object
// that has no tags to a general purpose bucket. It copies an empty tag-set to the
// destination object.
//
// - When you attempt to REPLACE the tag-set of a directory bucket source object
// and set the x-amz-tagging value of the directory bucket destination object to
// empty.
//
// - When you attempt to REPLACE the tag-set of a general purpose bucket source
// object that has non-empty tags and set the x-amz-tagging value of the
// directory bucket destination object to empty.
//
// - When you attempt to REPLACE the tag-set of a directory bucket source object
// and don't set the x-amz-tagging value of the directory bucket destination
// object. This is because the default value of x-amz-tagging is the empty value.
Tagging *string
// Specifies whether the object tag-set is copied from the source object or
// replaced with the tag-set that's provided in the request.
//
// The default value is COPY .
//
// Directory buckets - For directory buckets in a CopyObject operation, only the
// empty tag-set is supported. Any requests that attempt to write non-empty tags
// into directory buckets will receive a 501 Not Implemented status code. When the
// destination bucket is a directory bucket, you will receive a 501 Not Implemented
// response in any of the following situations:
//
// - When you attempt to COPY the tag-set from an S3 source object that has
// non-empty tags.
//
// - When you attempt to REPLACE the tag-set of a source object and set a
// non-empty value to x-amz-tagging .
//
// - When you don't set the x-amz-tagging-directive header and the source object
// has non-empty tags. This is because the default value of
// x-amz-tagging-directive is COPY .
//
// Because only the empty tag-set is supported for directory buckets in a
// CopyObject operation, the following situations are allowed:
//
// - When you attempt to COPY the tag-set from a directory bucket source object
// that has no tags to a general purpose bucket. It copies an empty tag-set to the
// destination object.
//
// - When you attempt to REPLACE the tag-set of a directory bucket source object
// and set the x-amz-tagging value of the directory bucket destination object to
// empty.
//
// - When you attempt to REPLACE the tag-set of a general purpose bucket source
// object that has non-empty tags and set the x-amz-tagging value of the
// directory bucket destination object to empty.
//
// - When you attempt to REPLACE the tag-set of a directory bucket source object
// and don't set the x-amz-tagging value of the directory bucket destination
// object. This is because the default value of x-amz-tagging is the empty value.
TaggingDirective types.TaggingDirective
// If the destination bucket is configured as a website, redirects requests for
// this object copy to another object in the same bucket or to an external URL.
// Amazon S3 stores the value of this header in the object metadata. This value is
// unique to each object and is not copied when using the x-amz-metadata-directive
// header. Instead, you may opt to provide this header in combination with the
// x-amz-metadata-directive header.
//
// This functionality is not supported for directory buckets.
WebsiteRedirectLocation *string
noSmithyDocumentSerde
}
func (in *CopyObjectInput) bindEndpointParams(p *EndpointParameters) {
p.Bucket = in.Bucket
p.CopySource = in.CopySource
p.Key = in.Key
p.DisableS3ExpressSessionAuth = ptr.Bool(true)
}
type CopyObjectOutput struct {
// Indicates whether the copied object uses an S3 Bucket Key for server-side
// encryption with Key Management Service (KMS) keys (SSE-KMS).
BucketKeyEnabled *bool
// Container for all response elements.
CopyObjectResult *types.CopyObjectResult
// Version ID of the source object that was copied.
//
// This functionality is not supported when the source object is in a directory
// bucket.
CopySourceVersionId *string
// If the object expiration is configured, the response includes this header.
//
// Object expiration information is not returned in directory buckets and this
// header returns the value " NotImplemented " in all responses for directory
// buckets.
Expiration *string
// If present, indicates that the requester was successfully charged for the
// request.
//
// This functionality is not supported for directory buckets.
RequestCharged types.RequestCharged
// If server-side encryption with a customer-provided encryption key was
// requested, the response will include this header to confirm the encryption
// algorithm that's used.
//
// This functionality is not supported for directory buckets.
SSECustomerAlgorithm *string
// If server-side encryption with a customer-provided encryption key was
// requested, the response will include this header to provide the round-trip
// message integrity verification of the customer-provided encryption key.
//
// This functionality is not supported for directory buckets.
SSECustomerKeyMD5 *string
// If present, indicates the Amazon Web Services KMS Encryption Context to use for
// object encryption. The value of this header is a Base64 encoded UTF-8 string
// holding JSON with the encryption context key-value pairs.
SSEKMSEncryptionContext *string
// If present, indicates the ID of the KMS key that was used for object encryption.
SSEKMSKeyId *string
// The server-side encryption algorithm used when you store this object in Amazon
// S3 (for example, AES256 , aws:kms , aws:kms:dsse ).
ServerSideEncryption types.ServerSideEncryption
// Version ID of the newly created copy.
//
// This functionality is not supported for directory buckets.
VersionId *string
// Metadata pertaining to the operation's result.
ResultMetadata middleware.Metadata
noSmithyDocumentSerde
}
func (c *Client) addOperationCopyObjectMiddlewares(stack *middleware.Stack, options Options) (err error) {
if err := stack.Serialize.Add(&setOperationInputMiddleware{}, middleware.After); err != nil {
return err
}
err = stack.Serialize.Add(&awsRestxml_serializeOpCopyObject{}, middleware.After)
if err != nil {
return err
}
err = stack.Deserialize.Add(&awsRestxml_deserializeOpCopyObject{}, middleware.After)
if err != nil {
return err
}
if err := addProtocolFinalizerMiddlewares(stack, options, "CopyObject"); err != nil {
return fmt.Errorf("add protocol finalizers: %v", err)
}
if err = addlegacyEndpointContextSetter(stack, options); err != nil {
return err
}
if err = addSetLoggerMiddleware(stack, options); err != nil {
return err
}
if err = addClientRequestID(stack); err != nil {
return err
}
if err = addComputeContentLength(stack); err != nil {
return err
}
if err = addResolveEndpointMiddleware(stack, options); err != nil {
return err
}
if err = addComputePayloadSHA256(stack); err != nil {
return err
}
if err = addRetry(stack, options); err != nil {
return err
}
if err = addRawResponseToMetadata(stack); err != nil {
return err
}
if err = addRecordResponseTiming(stack); err != nil {
return err
}
if err = addSpanRetryLoop(stack, options); err != nil {
return err
}
if err = addClientUserAgent(stack, options); err != nil {
return err
}
if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil {
return err
}
if err = addSetLegacyContextSigningOptionsMiddleware(stack); err != nil {
return err
}
if err = addPutBucketContextMiddleware(stack); err != nil {
return err
}
if err = addTimeOffsetBuild(stack, c); err != nil {
return err
}
if err = addUserAgentRetryMode(stack, options); err != nil {
return err
}
if err = addIsExpressUserAgent(stack); err != nil {
return err
}
if err = addCredentialSource(stack, options); err != nil {
return err
}
if err = addOpCopyObjectValidationMiddleware(stack); err != nil {
return err
}
if err = stack.Initialize.Add(newServiceMetadataMiddleware_opCopyObject(options.Region), middleware.Before); err != nil {
return err
}
if err = addMetadataRetrieverMiddleware(stack); err != nil {
return err
}
if err = addRecursionDetection(stack); err != nil {
return err
}
if err = addCopyObjectUpdateEndpoint(stack, options); err != nil {
return err
}
if err = addResponseErrorMiddleware(stack); err != nil {
return err
}
if err = v4.AddContentSHA256HeaderMiddleware(stack); err != nil {
return err
}
if err = disableAcceptEncodingGzip(stack); err != nil {
return err
}
if err = s3cust.HandleResponseErrorWith200Status(stack); err != nil {
return err
}
if err = addRequestResponseLogging(stack, options); err != nil {
return err
}
if err = addDisableHTTPSMiddleware(stack, options); err != nil {
return err
}
if err = addSerializeImmutableHostnameBucketMiddleware(stack, options); err != nil {
return err
}
if err = addSpanInitializeStart(stack); err != nil {
return err
}
if err = addSpanInitializeEnd(stack); err != nil {
return err
}
if err = addSpanBuildRequestStart(stack); err != nil {
return err
}
if err = addSpanBuildRequestEnd(stack); err != nil {
return err
}
return nil
}
func (v *CopyObjectInput) bucket() (string, bool) {
if v.Bucket == nil {
return "", false
}
return *v.Bucket, true
}
func newServiceMetadataMiddleware_opCopyObject(region string) *awsmiddleware.RegisterServiceMetadata {
return &awsmiddleware.RegisterServiceMetadata{
Region: region,
ServiceID: ServiceID,
OperationName: "CopyObject",
}
}
// getCopyObjectBucketMember returns a pointer to string denoting a provided
// bucket member valueand a boolean indicating if the input has a modeled bucket
// name,
func getCopyObjectBucketMember(input interface{}) (*string, bool) {
in := input.(*CopyObjectInput)
if in.Bucket == nil {
return nil, false
}
return in.Bucket, true
}
func addCopyObjectUpdateEndpoint(stack *middleware.Stack, options Options) error {
return s3cust.UpdateEndpoint(stack, s3cust.UpdateEndpointOptions{
Accessor: s3cust.UpdateEndpointParameterAccessor{
GetBucketFromInput: getCopyObjectBucketMember,
},
UsePathStyle: options.UsePathStyle,
UseAccelerate: options.UseAccelerate,
SupportsAccelerate: true,
TargetS3ObjectLambda: false,
EndpointResolver: options.EndpointResolver,
EndpointResolverOptions: options.EndpointOptions,
UseARNRegion: options.UseARNRegion,
DisableMultiRegionAccessPoints: options.DisableMultiRegionAccessPoints,
})
}