chunksize: initial implementation of chunksize helper lib

This commit is contained in:
Derek Battams 2022-05-06 15:24:27 -04:00 committed by Nick Craig-Wood
parent 9e4854955c
commit f2e7a2e794
2 changed files with 76 additions and 0 deletions

36
fs/chunksize/chunksize.go Normal file
View File

@ -0,0 +1,36 @@
// Package chunksize calculates a suitable chunk size for large uploads
package chunksize
import (
"github.com/rclone/rclone/fs"
)
/*
Calculator calculates the minimum chunk size needed to fit within the maximum number of parts, rounded up to the nearest fs.Mebi
For most backends, (chunk_size) * (concurrent_upload_routines) memory will be required so we want to use the smallest
possible chunk size that's going to allow the upload to proceed. Rounding up to the nearest fs.Mebi on the assumption
that some backends may only allow integer type parameters when specifying the chunk size.
Returns the default chunk size if it is sufficiently large enough to support the given file size otherwise returns the
smallest chunk size necessary to allow the upload to proceed.
*/
func Calculator(objInfo fs.ObjectInfo, maxParts int, defaultChunkSize fs.SizeSuffix) fs.SizeSuffix {
fileSize := fs.SizeSuffix(objInfo.Size())
requiredChunks := fileSize / defaultChunkSize
if requiredChunks < fs.SizeSuffix(maxParts) || (requiredChunks == fs.SizeSuffix(maxParts) && fileSize%defaultChunkSize == 0) {
return defaultChunkSize
}
minChunk := fileSize / fs.SizeSuffix(maxParts)
remainder := minChunk % fs.Mebi
if remainder != 0 {
minChunk += fs.Mebi - remainder
}
if fileSize/minChunk == fs.SizeSuffix(maxParts) && fileSize%fs.SizeSuffix(maxParts) != 0 { // when right on the boundary, we need to add a MiB
minChunk += fs.Mebi
}
fs.Debugf(objInfo, "size: %v, parts: %v, default: %v, new: %v; default chunk size insufficient, returned new chunk size", fileSize, maxParts, defaultChunkSize, minChunk)
return minChunk
}

View File

@ -0,0 +1,40 @@
package chunksize
import (
"testing"
"time"
"github.com/rclone/rclone/fs"
"github.com/rclone/rclone/fs/object"
)
func TestComputeChunkSize(t *testing.T) {
tests := map[string]struct {
fileSize fs.SizeSuffix
maxParts int
defaultChunkSize fs.SizeSuffix
expected fs.SizeSuffix
}{
"default size returned when file size is small enough": {fileSize: 1000, maxParts: 10000, defaultChunkSize: toSizeSuffixMiB(10), expected: toSizeSuffixMiB(10)},
"default size returned when file size is just 1 byte small enough": {fileSize: toSizeSuffixMiB(100000) - 1, maxParts: 10000, defaultChunkSize: toSizeSuffixMiB(10), expected: toSizeSuffixMiB(10)},
"no rounding up when everything divides evenly": {fileSize: toSizeSuffixMiB(1000000), maxParts: 10000, defaultChunkSize: toSizeSuffixMiB(100), expected: toSizeSuffixMiB(100)},
"rounding up to nearest MiB when not quite enough parts": {fileSize: toSizeSuffixMiB(1000000), maxParts: 9999, defaultChunkSize: toSizeSuffixMiB(100), expected: toSizeSuffixMiB(101)},
"rounding up to nearest MiB when one extra byte": {fileSize: toSizeSuffixMiB(1000000) + 1, maxParts: 10000, defaultChunkSize: toSizeSuffixMiB(100), expected: toSizeSuffixMiB(101)},
"expected MiB value when rounding sets to absolute minimum": {fileSize: toSizeSuffixMiB(1) - 1, maxParts: 1, defaultChunkSize: toSizeSuffixMiB(1), expected: toSizeSuffixMiB(1)},
"expected MiB value when rounding to absolute min with extra": {fileSize: toSizeSuffixMiB(1) + 1, maxParts: 1, defaultChunkSize: toSizeSuffixMiB(1), expected: toSizeSuffixMiB(2)},
}
for name, tc := range tests {
t.Run(name, func(t *testing.T) {
src := object.NewStaticObjectInfo("mock", time.Now(), int64(tc.fileSize), true, nil, nil)
result := Calculator(src, tc.maxParts, tc.defaultChunkSize)
if result != tc.expected {
t.Fatalf("expected: %v, got: %v", tc.expected, result)
}
})
}
}
func toSizeSuffixMiB(size int64) fs.SizeSuffix {
return fs.SizeSuffix(size * int64(fs.Mebi))
}