in s3_diver/s3_diver_03-test.py [0:0]
def copy_to_snowball(error_log, success_log, key_name, org_files_list):
tar_file_size = 0
recv_buf = io.BytesIO()
mpu_id = create_mpu(key_name)
parts_index = 1
s_log = success_log
e_log = error_log
with tarfile.open(fileobj=recv_buf, mode="w") as tar:
for org_file in org_files_list:
if os.path.isfile(org_file):
tar.add(org_file)
#print ('1. recv_buf_size: %s' % len(recv_buf.getvalue()))
log_success(s_log, org_file, " is archiving \n" )
recv_buf_size = recv_buf.tell()
#print ('1. recv_buf_pos: %s' % recv_buf.tell())
if recv_buf_size > max_part_size:
print('multi part uploading: %s / %s , size: %s' % (parts_index, max_part_count, recv_buf_size))
chunk_count = int(recv_buf_size / max_part_size)
tar_file_size = tar_file_size + recv_buf_size
print('%s is accumulating, size: %s' % (key_name, tar_file_size))
#print('chunk_count: %s ' % chunk_count)
for buf_index in range(chunk_count):
start_pos = buf_index * max_part_size
recv_buf.seek(start_pos,0)
mpu_parts = upload_mpu(key_name, mpu_id, recv_buf.read(max_part_size), parts_index)
parts_index += 1
####################
buf_fifo(recv_buf)
recv_buf_size = recv_buf.tell()
#print('3.after fifo, recv_buf_pos : %s' % recv_buf.tell())
#print ('3. after fifo, recv_buf_size: %s' % len(recv_buf.getvalue()))
else:
pass
#print('accumulating files...')
else:
log_error(e_log, org_file," does not exist\n")
print (org_file + ' is not exist...............................................\n')
recv_buf.seek(0,0)
mpu_parts = upload_mpu(key_name, mpu_id, recv_buf.read(), parts_index)
parts_index += 1
mpu_parts = adjusting_parts_order(mpu_parts)
complete_mpu(key_name, mpu_id, mpu_parts)
### print metadata
meta_out = s3.head_object(Bucket=bucket_name, Key=key_name)
print ('\n metadata info: %s' % str(meta_out))
log_success(s_log, str(meta_out), '!!\n')
print ("\n tar file: %s \n" % key_name)
log_success(s_log, key_name, ' is uploaded successfully\n')