in source/lambda/calculateTreehash/index.js [127:180]
async function failArchiveAndRetry(statusRecord, key) {
let params = {
Bucket: STAGING_BUCKET,
Key: key,
};
try {
console.log(
`Deleting object ${key} from Staging Bucket ${STAGING_BUCKET} after mismatch in hash comparison`
);
await s3.deleteObject(params).promise();
} catch (e) {
console.error(
`Error deleting object ${key} from Staging Bucket ${STAGING_BUCKET} after mismatch in hash comparison`
);
console.error(e);
}
const updatedItem = await db.incrementRetryCount(
statusRecord.Attributes.aid.S,
"rc"
);
const retryCount = parseInt(updatedItem.Attributes.rc.N);
console.error(
`Submitting retry copy request message on SQS. New retry counter is ${retryCount} for ${key}`
);
// wipe sgt and chunks' status and start over
await db.deleteItem(statusRecord.Attributes.aid.S, "sgt");
console.log(`${key} : sgt deleted`);
await db.deleteChunkStatus(statusRecord.Attributes.aid.S, await getListOfChunks(statusRecord));
//Repost to SQS (ArchiveRetrievalNotificationQueue) archive id, job id to trigger another copy
let messageBody = JSON.stringify({
Message: JSON.stringify({
Action: "RetryRequest",
JobId: statusRecord.Attributes.jobId.S,
ArchiveId: statusRecord.Attributes.aid.S,
ArchiveSizeInBytes: parseInt(statusRecord.Attributes.sz.N), // ParseInt to convert
}),
});
let hashQueueUrl = await sqs
.getQueueUrl({
QueueName: SQS_ARCHIVE_NOTIFICATION,
})
.promise();
return await sqs
.sendMessage({
QueueUrl: hashQueueUrl.QueueUrl,
MessageBody: messageBody,
})
.promise();
}