in source/s3_copy_object.c [356:541]
static int s_s3_copy_object_prepare_request(struct aws_s3_meta_request *meta_request, struct aws_s3_request *request) {
AWS_PRECONDITION(meta_request);
struct aws_s3_copy_object *copy_object = meta_request->impl;
AWS_PRECONDITION(copy_object);
aws_s3_meta_request_lock_synced_data(meta_request);
struct aws_http_message *message = NULL;
switch (request->request_tag) {
/* Prepares the GetObject HEAD request to get the source object size. */
case AWS_S3_COPY_OBJECT_REQUEST_TAG_GET_OBJECT_SIZE: {
message = aws_s3_get_source_object_size_message_new(
meta_request->allocator, meta_request->initial_request_message);
break;
}
/* The S3 object is not large enough for multi-part copy. Bypasses a copy of the original CopyObject request to
* S3. */
case AWS_S3_COPY_OBJECT_REQUEST_TAG_BYPASS: {
message = aws_s3_message_util_copy_http_message(
meta_request->allocator, meta_request->initial_request_message, NULL, 0);
if (meta_request->should_compute_content_md5) {
aws_s3_message_util_add_content_md5_header(meta_request->allocator, &request->request_body, message);
}
break;
}
/* Prepares the CreateMultipartUpload sub-request. */
case AWS_S3_COPY_OBJECT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD: {
uint64_t part_size_uint64 = copy_object->synced_data.content_length / (uint64_t)g_s3_max_num_upload_parts;
if (part_size_uint64 > SIZE_MAX) {
AWS_LOGF_ERROR(
AWS_LS_S3_META_REQUEST,
"Could not create multipart copy meta request; required part size of %" PRIu64
" bytes is too large for platform.",
part_size_uint64);
aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
return AWS_OP_ERR;
}
size_t part_size = (size_t)part_size_uint64;
const size_t MIN_PART_SIZE = 64L * 1024L * 1024L; /* minimum partition size */
if (part_size < MIN_PART_SIZE) {
part_size = MIN_PART_SIZE;
}
uint32_t num_parts = (uint32_t)(copy_object->synced_data.content_length / part_size);
if ((copy_object->synced_data.content_length % part_size) > 0) {
++num_parts;
}
copy_object->synced_data.total_num_parts = num_parts;
copy_object->synced_data.part_size = part_size;
AWS_LOGF_DEBUG(
AWS_LS_S3_META_REQUEST,
"Starting multi-part Copy using part size=%zu, total_num_parts=%zu",
part_size,
(size_t)num_parts);
/* Create the message to create a new multipart upload. */
message = aws_s3_create_multipart_upload_message_new(
meta_request->allocator, meta_request->initial_request_message);
break;
}
/* Prepares the UploadPartCopy sub-request. */
case AWS_S3_COPY_OBJECT_REQUEST_TAG_MULTIPART_COPY: {
/* Create a new uploadPartCopy message to upload a part. */
/* compute sub-request range */
uint64_t range_start = (request->part_number - 1) * copy_object->synced_data.part_size;
uint64_t range_end = range_start + copy_object->synced_data.part_size - 1;
if (range_end >= copy_object->synced_data.content_length) {
/* adjust size of last part */
range_end = copy_object->synced_data.content_length - 1;
}
AWS_LOGF_DEBUG(
AWS_LS_S3_META_REQUEST,
"Starting UploadPartCopy for partition %" PRIu32 ", range_start=%" PRIu64 ", range_end=%" PRIu64
", full object length=%" PRIu64,
request->part_number,
range_start,
range_end,
copy_object->synced_data.content_length);
message = aws_s3_upload_part_copy_message_new(
meta_request->allocator,
meta_request->initial_request_message,
&request->request_body,
request->part_number,
range_start,
range_end,
copy_object->upload_id,
meta_request->should_compute_content_md5);
break;
}
/* Prepares the CompleteMultiPartUpload sub-request. */
case AWS_S3_COPY_OBJECT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD: {
if (request->num_times_prepared == 0) {
aws_byte_buf_init(
&request->request_body, meta_request->allocator, s_complete_multipart_upload_init_body_size_bytes);
} else {
aws_byte_buf_reset(&request->request_body, false);
}
AWS_FATAL_ASSERT(copy_object->upload_id);
AWS_ASSERT(request->request_body.capacity > 0);
aws_byte_buf_reset(&request->request_body, false);
/* Build the message to complete our multipart upload, which includes a payload describing all of our
* completed parts. */
message = aws_s3_complete_multipart_message_new(
meta_request->allocator,
meta_request->initial_request_message,
&request->request_body,
copy_object->upload_id,
©_object->synced_data.etag_list);
break;
}
/* Prepares the AbortMultiPartUpload sub-request. */
case AWS_S3_COPY_OBJECT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD: {
AWS_FATAL_ASSERT(copy_object->upload_id);
AWS_LOGF_DEBUG(
AWS_LS_S3_META_REQUEST,
"id=%p Abort multipart upload request for upload id %s.",
(void *)meta_request,
aws_string_c_str(copy_object->upload_id));
if (request->num_times_prepared == 0) {
aws_byte_buf_init(
&request->request_body, meta_request->allocator, s_abort_multipart_upload_init_body_size_bytes);
} else {
aws_byte_buf_reset(&request->request_body, false);
}
/* Build the message to abort our multipart upload */
message = aws_s3_abort_multipart_upload_message_new(
meta_request->allocator, meta_request->initial_request_message, copy_object->upload_id);
break;
}
}
aws_s3_meta_request_unlock_synced_data(meta_request);
if (message == NULL) {
AWS_LOGF_ERROR(
AWS_LS_S3_META_REQUEST,
"id=%p Could not allocate message for request with tag %d for CopyObject meta request.",
(void *)meta_request,
request->request_tag);
goto message_create_failed;
}
aws_s3_request_setup_send_data(request, message);
aws_http_message_release(message);
AWS_LOGF_DEBUG(
AWS_LS_S3_META_REQUEST,
"id=%p: Prepared request %p for part %d",
(void *)meta_request,
(void *)request,
request->part_number);
return AWS_OP_SUCCESS;
message_create_failed:
return AWS_OP_ERR;
}