in source/s3_copy_object.c [574:784]
static void s_s3_copy_object_request_finished(
struct aws_s3_meta_request *meta_request,
struct aws_s3_request *request,
int error_code) {
AWS_PRECONDITION(meta_request);
AWS_PRECONDITION(meta_request->impl);
AWS_PRECONDITION(request);
struct aws_s3_copy_object *copy_object = meta_request->impl;
aws_s3_meta_request_lock_synced_data(meta_request);
switch (request->request_tag) {
case AWS_S3_COPY_OBJECT_REQUEST_TAG_GET_OBJECT_SIZE: {
if (error_code == AWS_ERROR_SUCCESS) {
struct aws_byte_cursor content_length_cursor;
if (!aws_http_headers_get(
request->send_data.response_headers, g_content_length_header_name, &content_length_cursor)) {
if (!aws_byte_cursor_utf8_parse_u64(
content_length_cursor, ©_object->synced_data.content_length)) {
copy_object->synced_data.head_object_completed = true;
} else {
/* HEAD request returned an invalid content-length */
aws_s3_meta_request_set_fail_synced(
meta_request, request, AWS_ERROR_S3_INVALID_CONTENT_LENGTH_HEADER);
}
} else {
/* HEAD request didn't return content-length header */
aws_s3_meta_request_set_fail_synced(
meta_request, request, AWS_ERROR_S3_INVALID_CONTENT_LENGTH_HEADER);
}
} else {
aws_s3_meta_request_set_fail_synced(meta_request, request, error_code);
}
break;
}
case AWS_S3_COPY_OBJECT_REQUEST_TAG_BYPASS: {
if (error_code == AWS_ERROR_SUCCESS) {
copy_object->synced_data.copy_request_bypass_completed = true;
} else {
/* Bypassed CopyObject request failed */
aws_s3_meta_request_set_fail_synced(meta_request, request, error_code);
}
break;
}
case AWS_S3_COPY_OBJECT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD: {
struct aws_http_headers *needed_response_headers = NULL;
if (error_code == AWS_ERROR_SUCCESS) {
needed_response_headers = aws_http_headers_new(meta_request->allocator);
const size_t copy_header_count =
sizeof(s_create_multipart_upload_copy_headers) / sizeof(struct aws_byte_cursor);
/* Copy any headers now that we'll need for the final, transformed headers later. */
for (size_t header_index = 0; header_index < copy_header_count; ++header_index) {
const struct aws_byte_cursor *header_name = &s_create_multipart_upload_copy_headers[header_index];
struct aws_byte_cursor header_value;
AWS_ZERO_STRUCT(header_value);
if (!aws_http_headers_get(request->send_data.response_headers, *header_name, &header_value)) {
aws_http_headers_set(needed_response_headers, *header_name, header_value);
}
}
struct aws_byte_cursor buffer_byte_cursor = aws_byte_cursor_from_buf(&request->send_data.response_body);
/* Find the upload id for this multipart upload. */
struct aws_string *upload_id =
get_top_level_xml_tag_value(meta_request->allocator, &s_upload_id, &buffer_byte_cursor);
if (upload_id == NULL) {
AWS_LOGF_ERROR(
AWS_LS_S3_META_REQUEST,
"id=%p Could not find upload-id in create-multipart-upload response",
(void *)meta_request);
aws_raise_error(AWS_ERROR_S3_MISSING_UPLOAD_ID);
error_code = AWS_ERROR_S3_MISSING_UPLOAD_ID;
} else {
/* Store the multipart upload id. */
copy_object->upload_id = upload_id;
}
}
AWS_ASSERT(copy_object->synced_data.needed_response_headers == NULL)
copy_object->synced_data.needed_response_headers = needed_response_headers;
copy_object->synced_data.create_multipart_upload_completed = true;
copy_object->synced_data.create_multipart_upload_error_code = error_code;
if (error_code != AWS_ERROR_SUCCESS) {
aws_s3_meta_request_set_fail_synced(meta_request, request, error_code);
}
break;
}
case AWS_S3_COPY_OBJECT_REQUEST_TAG_MULTIPART_COPY: {
size_t part_number = request->part_number;
AWS_FATAL_ASSERT(part_number > 0);
size_t part_index = part_number - 1;
++copy_object->synced_data.num_parts_completed;
AWS_LOGF_DEBUG(
AWS_LS_S3_META_REQUEST,
"id=%p: %d out of %d parts have completed.",
(void *)meta_request,
copy_object->synced_data.num_parts_completed,
copy_object->synced_data.total_num_parts);
if (error_code == AWS_ERROR_SUCCESS) {
struct aws_string *etag = s_etag_new_from_upload_part_copy_response(
meta_request->allocator, &request->send_data.response_body);
AWS_ASSERT(etag != NULL);
++copy_object->synced_data.num_parts_successful;
if (meta_request->progress_callback != NULL) {
struct aws_s3_meta_request_progress progress = {
.bytes_transferred = copy_object->synced_data.part_size,
.content_length = copy_object->synced_data.content_length};
meta_request->progress_callback(meta_request, &progress, meta_request->user_data);
}
struct aws_string *null_etag = NULL;
/* ETags need to be associated with their part number, so we keep the etag indices consistent with
* part numbers. This means we may have to add padding to the list in the case that parts finish out
* of order. */
while (aws_array_list_length(©_object->synced_data.etag_list) < part_number) {
int push_back_result = aws_array_list_push_back(©_object->synced_data.etag_list, &null_etag);
AWS_FATAL_ASSERT(push_back_result == AWS_OP_SUCCESS);
}
aws_array_list_set_at(©_object->synced_data.etag_list, &etag, part_index);
} else {
++copy_object->synced_data.num_parts_failed;
aws_s3_meta_request_set_fail_synced(meta_request, request, error_code);
}
break;
}
case AWS_S3_COPY_OBJECT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD: {
if (error_code == AWS_ERROR_SUCCESS && meta_request->headers_callback != NULL) {
struct aws_http_headers *final_response_headers = aws_http_headers_new(meta_request->allocator);
/* Copy all the response headers from this request. */
copy_http_headers(request->send_data.response_headers, final_response_headers);
/* Copy over any response headers that we've previously determined are needed for this final
* response.
*/
copy_http_headers(copy_object->synced_data.needed_response_headers, final_response_headers);
struct aws_byte_cursor response_body_cursor =
aws_byte_cursor_from_buf(&request->send_data.response_body);
/* Grab the ETag for the entire object, and set it as a header. */
struct aws_string *etag_header_value =
get_top_level_xml_tag_value(meta_request->allocator, &g_etag_header_name, &response_body_cursor);
if (etag_header_value != NULL) {
struct aws_byte_buf etag_header_value_byte_buf;
AWS_ZERO_STRUCT(etag_header_value_byte_buf);
replace_quote_entities(meta_request->allocator, etag_header_value, &etag_header_value_byte_buf);
aws_http_headers_set(
final_response_headers,
g_etag_header_name,
aws_byte_cursor_from_buf(&etag_header_value_byte_buf));
aws_string_destroy(etag_header_value);
aws_byte_buf_clean_up(&etag_header_value_byte_buf);
}
/* Notify the user of the headers. */
if (meta_request->headers_callback(
meta_request,
final_response_headers,
request->send_data.response_status,
meta_request->user_data)) {
error_code = aws_last_error_or_unknown();
}
aws_http_headers_release(final_response_headers);
}
copy_object->synced_data.complete_multipart_upload_completed = true;
copy_object->synced_data.complete_multipart_upload_error_code = error_code;
if (error_code != AWS_ERROR_SUCCESS) {
aws_s3_meta_request_set_fail_synced(meta_request, request, error_code);
}
break;
}
case AWS_S3_COPY_OBJECT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD: {
copy_object->synced_data.abort_multipart_upload_error_code = error_code;
copy_object->synced_data.abort_multipart_upload_completed = true;
break;
}
}
aws_s3_meta_request_unlock_synced_data(meta_request);
}