in s3plugin/backup.go [195:222]
func uploadFile(sess *session.Session, config *PluginConfig, fileKey string,
file *os.File) (int64, time.Duration, error) {
start := time.Now()
bucket := config.Options.Bucket
uploadChunkSize := config.Options.UploadChunkSize
uploadConcurrency := config.Options.UploadConcurrency
uploader := s3manager.NewUploader(sess, func(u *s3manager.Uploader) {
u.PartSize = uploadChunkSize
u.Concurrency = uploadConcurrency
})
gplog.Debug("Uploading file %s with chunksize %d and concurrency %d",
filepath.Base(fileKey), uploader.PartSize, uploader.Concurrency)
_, err := uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String(bucket),
Key: aws.String(fileKey),
// This will cause memory issues if
// segment_per_host*uploadChunkSize*uploadConcurreny is larger than
// the amount of ram a system has.
Body: bufio.NewReaderSize(file, int(uploadChunkSize)*uploadConcurrency),
})
if err != nil {
return 0, -1, errors.Wrap(err, fmt.Sprintf("Error while uploading %s", fileKey))
}
bytes, err := getFileSize(uploader.S3, bucket, fileKey)
return bytes, time.Since(start), err
}