in source/lambda/calculateTreehash/index.js [33:79]
async function handler(event) {
let {aid, key, partNo, startByte, endByte} = JSON.parse(event.Records[0].body);
console.log(`${key} - ${partNo} hash : ${startByte}-${endByte}`);
// the only way vdt is present at this section of the code is if the treehash is successfully validated already, but
// triggerCopyToDestinationBucket fails, hence retry triggerCopyToDestinationBucket.
let resultRecord = await db.getStatusRecord(aid);
if (resultRecord.Item.vdt && resultRecord.Item.vdt.S) {
resultRecord.Attributes = resultRecord.Item;
console.log(`${key} : treehash has already been processed. Skipping`);
await trigger.triggerCopyToDestinationBucket(resultRecord);
return;
}
let cc = parseInt(resultRecord.Item.cc.N);
let chunkHash = await treehash.getChunkHash(key, partNo, startByte, endByte);
// Single Chunk
if (cc === 1) {
resultRecord.Attributes = resultRecord.Item;
await validateTreehash(chunkHash, resultRecord);
return;
}
// Multi Chunk
let statusRecord = await db.updateChunkStatusGetLatest(aid, partNo, chunkHash);
let count = 0;
for (const entry in statusRecord.Attributes) {
if (
entry.includes("chunk") &&
statusRecord.Attributes[entry].S &&
statusRecord.Attributes[entry].S.length > 40 // to validate that the field contains hash as opposed to etag
) {
count++;
}
}
if (count < cc) return; // not all chunks have yet been completed
let multiPartHash = treehash.calculateMultiPartHash(statusRecord);
await validateTreehash(multiPartHash, statusRecord);
}