def copy_to_snowball()

in s3booster-snowball-v1.py [0:0]


def copy_to_snowball(tar_name, org_files_list):
    delimeter = ' ,'
    tar_file_size = 0
    recv_buf = io.BytesIO()
    mpu_id = create_mpu(tar_name)
    parts_index = 1
    parts = []
    collected_files_no = 0
    with tarfile.open(fileobj=recv_buf, mode='w:'+compression) as tar:
    #with tarfile.open(fileobj=recv_buf, mode='w:'+compression, compresslevel=1) as tar:
        for file_name, obj_name, file_size in org_files_list:
            if os.path.isfile(file_name):
                try:
                    tar.add(file_name, arcname=obj_name)
                    collected_files_no += 1
                    #success_log.debug('1. recv_buf_size: %s' % len(recv_buf.getvalue()))
                    filelist_log.debug(file_name + delimeter + obj_name + delimeter + str(file_size)) #kyongki
                    recv_buf_size = recv_buf.tell()
                    #success_log.debug('1. recv_buf_pos: %s' % recv_buf.tell())
                    if recv_buf_size > max_part_size:
                        print('multi part uploading:  %s / %s , size: %s bytes' % (parts_index, max_part_count, recv_buf_size))
                        chunk_count = int(recv_buf_size / max_part_size)
                        tar_file_size = tar_file_size + recv_buf_size
                        #print('%s is accumulating, size: %s byte' % (tar_name, tar_file_size))
                        for buf_index in range(chunk_count):
                            start_pos = buf_index * max_part_size
                            recv_buf.seek(start_pos,0)
                            mpu_parts = upload_mpu(tar_name, mpu_id, recv_buf.read(max_part_size), parts_index, parts)
                            parts_index += 1
                        ####################
                        buf_fifo(recv_buf)
                        recv_buf_size = recv_buf.tell()
                        #print('3.after fifo, recv_buf_pos : %s' % recv_buf.tell())
                        #print ('3. after fifo, recv_buf_size: %s' % len(recv_buf.getvalue()))
                    else:
                        pass
                        #print('accumulating files...')
                except IOError:
                    error_log.info("%s is ignored" % file_name) 
            else:
                error_log.info(file_name,' does not exist\n')
                print (file_name + ' is not exist...............................................\n')
    recv_buf.seek(0,0)
    mpu_parts = upload_mpu(tar_name, mpu_id, recv_buf.read(), parts_index, parts)
    parts_index += 1
    mpu_parts = adjusting_parts_order(mpu_parts)
    complete_mpu(tar_name, mpu_id, mpu_parts)
    ### print metadata
    meta_out = s3_client.head_object(Bucket=bucket_name, Key=tar_name)
    print('metadata info: %s\n' % str(meta_out))
    print('%s is uploaded successfully\n' % tar_name)
    success_log.debug('metadata info: %s' % str(meta_out))
    success_log.info('%s uploaded successfully' % tar_name)
    return collected_files_no