fs: add --bwlimit-file flag to limit speeds of individual file transfers

This commit is contained in:
Nick Craig-Wood 2020-06-20 16:10:02 +01:00
parent 8bf265c775
commit 26001d520a
4 changed files with 41 additions and 0 deletions

View File

@ -422,6 +422,20 @@ change the bwlimit dynamically:
rclone rc core/bwlimit rate=1M
### --bwlimit-file=BANDWIDTH_SPEC ###
This option controls per file bandwidth limit. For the options see the
`--bwlimit` flag.
For example use this to allow no transfers to be faster than 1MByte/s
--bwlimit-file 1M
This can be used in conjunction with `--bwlimit`.
Note that if a schedule is provided the file will use the schedule in
effect at the start of the transfer.
### --buffer-size=SIZE ###
Use this sized buffer to speed up file transfers. Each `--transfer`

View File

@ -2,6 +2,7 @@
package accounting
import (
"context"
"fmt"
"io"
"sync"
@ -9,6 +10,7 @@ import (
"unicode/utf8"
"github.com/rclone/rclone/fs/rc"
"golang.org/x/time/rate"
"github.com/pkg/errors"
"github.com/rclone/rclone/fs"
@ -42,6 +44,8 @@ type Account struct {
exit chan struct{} // channel that will be closed when transfer is finished
withBuf bool // is using a buffered in
tokenBucket *rate.Limiter // per file bandwidth limiter (may be nil)
values accountValues
}
@ -78,6 +82,12 @@ func newAccountSizeName(stats *StatsInfo, in io.ReadCloser, size int64, name str
if fs.Config.CutoffMode == fs.CutoffModeHard {
acc.values.max = int64((fs.Config.MaxTransfer))
}
currLimit := fs.Config.BwLimitFile.LimitAt(time.Now())
if currLimit.Bandwidth > 0 {
fs.Debugf(acc.name, "Limiting file transfer to %v", currLimit.Bandwidth)
acc.tokenBucket = newTokenBucket(currLimit.Bandwidth)
}
go acc.averageLoop()
stats.inProgress.set(acc.name, acc)
return acc
@ -257,6 +267,20 @@ func (acc *Account) ServerSideCopyEnd(n int64) {
acc.stats.Bytes(n)
}
// Account for n bytes from the current file bandwidth limit (if any)
func (acc *Account) limitPerFileBandwidth(n int) {
acc.values.mu.Lock()
tokenBucket := acc.tokenBucket
acc.values.mu.Unlock()
if tokenBucket != nil {
err := tokenBucket.WaitN(context.Background(), n)
if err != nil {
fs.Errorf(nil, "Token bucket error: %v", err)
}
}
}
// Account the read and limit bandwidth
func (acc *Account) accountRead(n int) {
// Update Stats
@ -268,6 +292,7 @@ func (acc *Account) accountRead(n int) {
acc.stats.Bytes(int64(n))
limitBandwidth(n)
acc.limitPerFileBandwidth(n)
}
// read bytes from the io.Reader passed in and account them

View File

@ -83,6 +83,7 @@ type ConfigInfo struct {
UseListR bool
BufferSize SizeSuffix
BwLimit BwTimetable
BwLimitFile BwTimetable
TPSLimit float64
TPSLimitBurst int
BindAddr net.IP

View File

@ -96,6 +96,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
flags.FVarP(flagSet, &fs.Config.LogLevel, "log-level", "", "Log level DEBUG|INFO|NOTICE|ERROR")
flags.FVarP(flagSet, &fs.Config.StatsLogLevel, "stats-log-level", "", "Log level to show --stats output DEBUG|INFO|NOTICE|ERROR")
flags.FVarP(flagSet, &fs.Config.BwLimit, "bwlimit", "", "Bandwidth limit in kBytes/s, or use suffix b|k|M|G or a full timetable.")
flags.FVarP(flagSet, &fs.Config.BwLimitFile, "bwlimit-file", "", "Bandwidth limit per file in kBytes/s, or use suffix b|k|M|G or a full timetable.")
flags.FVarP(flagSet, &fs.Config.BufferSize, "buffer-size", "", "In memory buffer size when reading files for each --transfer.")
flags.FVarP(flagSet, &fs.Config.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.")
flags.FVarP(flagSet, &fs.Config.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList)