diff --git a/backend/s3/s3.go b/backend/s3/s3.go index d2cef2e63..b2cd37ccf 100644 --- a/backend/s3/s3.go +++ b/backend/s3/s3.go @@ -537,10 +537,11 @@ const ( // Globals var ( // Flags - s3ACL = flags.StringP("s3-acl", "", "", "Canned ACL used when creating buckets and/or storing objects in S3") - s3StorageClass = flags.StringP("s3-storage-class", "", "", "Storage class to use when uploading S3 objects (STANDARD|REDUCED_REDUNDANCY|STANDARD_IA|ONEZONE_IA)") - s3ChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize) - s3DisableChecksum = flags.BoolP("s3-disable-checksum", "", false, "Don't store MD5 checksum with object metadata") + s3ACL = flags.StringP("s3-acl", "", "", "Canned ACL used when creating buckets and/or storing objects in S3") + s3StorageClass = flags.StringP("s3-storage-class", "", "", "Storage class to use when uploading S3 objects (STANDARD|REDUCED_REDUNDANCY|STANDARD_IA|ONEZONE_IA)") + s3ChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize) + s3DisableChecksum = flags.BoolP("s3-disable-checksum", "", false, "Don't store MD5 checksum with object metadata") + s3UploadConcurrency = flags.IntP("s3-upload-concurrency", "", 2, "Concurrency for multipart uploads") ) // Fs represents a remote s3 server @@ -1352,7 +1353,7 @@ func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOptio size := src.Size() uploader := s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) { - u.Concurrency = 2 + u.Concurrency = *s3UploadConcurrency u.LeavePartsOnError = false u.S3 = o.fs.c u.PartSize = int64(s3ChunkSize) diff --git a/docs/content/s3.md b/docs/content/s3.md index b12b27a87..d71939aa3 100644 --- a/docs/content/s3.md +++ b/docs/content/s3.md @@ -402,6 +402,15 @@ Note that 2 chunks of this size are buffered in memory per transfer. If you are transferring large files over high speed links and you have enough memory, then increasing this will speed up the transfers. +#### --s3-upload-concurrency #### + +Number of chunks of the same file that are uploaded concurrently. +Default is 2. + +If you are uploading small amount of large file over high speed link +and these uploads do not fully utilize your bandwidth, then increasing +this may help to speed up the transfers. + ### Anonymous access to public buckets ### If you want to use rclone to access a public bucket, configure with a