async function multiChunkCopy()

in source/lambda/copyChunk/index.js [118:167]


async function multiChunkCopy(uploadId, jobId, archiveId, key, partNo, startByte, endByte, archiveSize, callback) {
    console.log(`chunk upload ${key} - ${partNo} : ${startByte}-${endByte}`)

    let glacierStream = glacier.getJobOutput({
        accountId: "-",
        jobId: jobId,
        range: `bytes=${startByte}-${endByte}`,
        vaultName: VAULT
    }).createReadStream().on('error', function (error){
        checkAndThrowException(error.code, archiveId, archiveSize, callback)
    });

    glacierStream.length = endByte - startByte + 1;

    let uploadResult = await s3.uploadPart({
        UploadId: uploadId,
        Bucket: STAGING_BUCKET,
        Key: `${STAGING_BUCKET_PREFIX}/${key}`,
        PartNumber: partNo,
        Body: glacierStream
    }).promise()

    let etag = uploadResult.ETag;

    console.log(`${key}  - ${partNo}: updating chunk etag : ${etag}`)
    let statusRecord = await db.updateChunkStatusGetLatest(archiveId, partNo, etag)

    let cc = parseInt(statusRecord.Attributes.cc.N)

    let count = 0
    for (const entry in statusRecord.Attributes) {
        if (entry.includes("chunk") &&
            statusRecord.Attributes[entry].S) {
            count++
        }
    }

    // [ CHECK IF ALL CHUNKS ARE COMPLETED ]
    if (count < cc) return

    console.log(`${key}  - ${partNo}: all chunks processed`)
    await closeMultipartUpload(key, uploadId, statusRecord)

    console.log(`${key} : setting complete timestamp`)
    // setTimeStampNow is before calcHash because we need a way to know if closeMultipartUpload is completed. If completed, trigger calcHash.
    await db.setTimestampNow(statusRecord.Attributes.aid.S, "sgt")

    await trigger.calcHash(statusRecord)
    
}