sync: add a buffer for checks, uploads and renames #379

--max-backlog controls the queue length.

Add statistics for the check/upload/rename queues.

This means that checking can complete before the uploads which will
give rclone the ability to show exactly what is outstanding.
This commit is contained in:
Nick Craig-Wood 2018-07-19 22:41:34 +01:00
parent eb84b58d3c
commit cb7a461287
8 changed files with 353 additions and 117 deletions

View File

@ -535,6 +535,22 @@ to reduce the value so rclone moves on to a high level retry (see the
Disable low level retries with `--low-level-retries 1`. Disable low level retries with `--low-level-retries 1`.
### --max-backlog=N ###
This is the maximum allowable backlog of files in a sync/copy/move
queued for being checked or transferred.
This can be set arbitrarily large. It will only use memory when the
queue is in use. Note that it will use in the order of N kB of memory
when the backlog is in use.
Setting this large allows rclone to calculate how many files are
pending more accurately and give a more accurate estimated finish
time.
Setting this small will make rclone more synchronous to the listings
of the remote which may be desirable.
### --max-delete=N ### ### --max-delete=N ###
This tells rclone not to delete more than N files. If that limit is This tells rclone not to delete more than N files. If that limit is

View File

@ -71,8 +71,14 @@ type StatsInfo struct {
lastError error lastError error
checks int64 checks int64
checking *stringSet checking *stringSet
checkQueue int
checkQueueSize int64
transfers int64 transfers int64
transferring *stringSet transferring *stringSet
transferQueue int
transferQueueSize int64
renameQueue int
renameQueueSize int64
deletes int64 deletes int64
start time.Time start time.Time
inProgress *inProgress inProgress *inProgress
@ -294,3 +300,27 @@ func (s *StatsInfo) DoneTransferring(remote string, ok bool) {
s.mu.Unlock() s.mu.Unlock()
} }
} }
// SetCheckQueue sets the number of queued checks
func (s *StatsInfo) SetCheckQueue(n int, size int64) {
s.mu.Lock()
s.checkQueue = n
s.checkQueueSize = size
s.mu.Unlock()
}
// SetTransferQueue sets the number of queued transfers
func (s *StatsInfo) SetTransferQueue(n int, size int64) {
s.mu.Lock()
s.transferQueue = n
s.transferQueueSize = size
s.mu.Unlock()
}
// SetRenameQueue sets the number of queued transfers
func (s *StatsInfo) SetRenameQueue(n int, size int64) {
s.mu.Lock()
s.renameQueue = n
s.renameQueueSize = size
s.mu.Unlock()
}

View File

@ -81,6 +81,7 @@ type ConfigInfo struct {
AskPassword bool AskPassword bool
UseServerModTime bool UseServerModTime bool
MaxTransfer SizeSuffix MaxTransfer SizeSuffix
MaxBacklog int
} }
// NewConfig creates a new config with everything set to the default // NewConfig creates a new config with everything set to the default
@ -109,6 +110,7 @@ func NewConfig() *ConfigInfo {
c.AskPassword = true c.AskPassword = true
c.TPSLimitBurst = 1 c.TPSLimitBurst = 1
c.MaxTransfer = -1 c.MaxTransfer = -1
c.MaxBacklog = 10000
return c return c
} }

View File

@ -83,6 +83,7 @@ func AddFlags(flagSet *pflag.FlagSet) {
flags.FVarP(flagSet, &fs.Config.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.") flags.FVarP(flagSet, &fs.Config.StreamingUploadCutoff, "streaming-upload-cutoff", "", "Cutoff for switching to chunked upload if file size is unknown. Upload starts after reaching cutoff or when file ends.")
flags.FVarP(flagSet, &fs.Config.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList) flags.FVarP(flagSet, &fs.Config.Dump, "dump", "", "List of items to dump from: "+fs.DumpFlagsList)
flags.FVarP(flagSet, &fs.Config.MaxTransfer, "max-transfer", "", "Maximum size of data to transfer.") flags.FVarP(flagSet, &fs.Config.MaxTransfer, "max-transfer", "", "Maximum size of data to transfer.")
flags.IntVarP(flagSet, &fs.Config.MaxBacklog, "max-backlog", "", fs.Config.MaxBacklog, "Maximum number of objects in sync or check backlog.")
} }
// SetFlags converts any flags into config which weren't straight foward // SetFlags converts any flags into config which weren't straight foward

View File

@ -827,9 +827,6 @@ type ObjectPair struct {
Src, Dst Object Src, Dst Object
} }
// ObjectPairChan is a channel of ObjectPair
type ObjectPairChan chan ObjectPair
// Find looks for an Info object for the name passed in // Find looks for an Info object for the name passed in
// //
// Services are looked up in the config file // Services are looked up in the config file

100
fs/sync/pipe.go Normal file
View File

@ -0,0 +1,100 @@
package sync
import (
"context"
"sync"
"github.com/ncw/rclone/fs"
)
// pipe provides an unbounded channel like experience
//
// Note unlike channels these aren't strictly ordered.
type pipe struct {
mu sync.Mutex
c chan struct{}
queue []fs.ObjectPair
closed bool
totalSize int64
stats func(items int, totalSize int64)
}
func newPipe(stats func(items int, totalSize int64), maxBacklog int) *pipe {
return &pipe{
c: make(chan struct{}, maxBacklog),
stats: stats,
}
}
// Put an pair into the pipe
//
// It returns ok = false if the context was cancelled
//
// It will panic if you call it after Close()
func (p *pipe) Put(ctx context.Context, pair fs.ObjectPair) (ok bool) {
if ctx.Err() != nil {
return false
}
p.mu.Lock()
p.queue = append(p.queue, pair)
size := pair.Src.Size()
if size > 0 {
p.totalSize += size
}
p.stats(len(p.queue), p.totalSize)
p.mu.Unlock()
select {
case <-ctx.Done():
return false
case p.c <- struct{}{}:
}
return true
}
// Get a pair from the pipe
//
// It returns ok = false if the context was cancelled or Close() has
// been called.
func (p *pipe) Get(ctx context.Context) (pair fs.ObjectPair, ok bool) {
if ctx.Err() != nil {
return
}
select {
case <-ctx.Done():
return
case _, ok = <-p.c:
if !ok {
return
}
}
p.mu.Lock()
pair, p.queue = p.queue[0], p.queue[1:]
size := pair.Src.Size()
if size > 0 {
p.totalSize -= size
}
if p.totalSize < 0 {
p.totalSize = 0
}
p.stats(len(p.queue), p.totalSize)
p.mu.Unlock()
return pair, true
}
// Stats reads the number of items in the queue and the totalSize
func (p *pipe) Stats() (items int, totalSize int64) {
p.mu.Lock()
items, totalSize = len(p.queue), p.totalSize
p.mu.Unlock()
return items, totalSize
}
// Close the pipe
//
// Writes to a closed pipe will panic as will double closing a pipe
func (p *pipe) Close() {
p.mu.Lock()
close(p.c)
p.closed = true
p.mu.Unlock()
}

122
fs/sync/pipe_test.go Normal file
View File

@ -0,0 +1,122 @@
package sync
import (
"context"
"sync"
"sync/atomic"
"testing"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fstest/mockobject"
"github.com/stretchr/testify/assert"
)
func TestPipe(t *testing.T) {
var queueLength int
var queueSize int64
stats := func(n int, size int64) {
queueLength, queueSize = n, size
}
// Make a new pipe
p := newPipe(stats, 10)
checkStats := func(expectedN int, expectedSize int64) {
n, size := p.Stats()
assert.Equal(t, expectedN, n)
assert.Equal(t, expectedSize, size)
assert.Equal(t, expectedN, queueLength)
assert.Equal(t, expectedSize, queueSize)
}
checkStats(0, 0)
ctx := context.Background()
obj1 := mockobject.New("potato").WithContent([]byte("hello"), mockobject.SeekModeNone)
pair1 := fs.ObjectPair{Src: obj1, Dst: nil}
// Put an object
ok := p.Put(ctx, pair1)
assert.Equal(t, true, ok)
checkStats(1, 5)
// Close the pipe showing reading on closed pipe is OK
p.Close()
// Read from pipe
pair2, ok := p.Get(ctx)
assert.Equal(t, pair1, pair2)
assert.Equal(t, true, ok)
checkStats(0, 0)
// Check read on closed pipe
pair2, ok = p.Get(ctx)
assert.Equal(t, fs.ObjectPair{}, pair2)
assert.Equal(t, false, ok)
// Check panic on write to closed pipe
assert.Panics(t, func() { p.Put(ctx, pair1) })
// Make a new pipe
p = newPipe(stats, 10)
ctx2, cancel := context.WithCancel(ctx)
// cancel it in the background - check read ceases
go cancel()
pair2, ok = p.Get(ctx2)
assert.Equal(t, fs.ObjectPair{}, pair2)
assert.Equal(t, false, ok)
// check we can't write
ok = p.Put(ctx2, pair1)
assert.Equal(t, false, ok)
}
// TestPipeConcurrent runs concurrent Get and Put to flush out any
// race conditions and concurrency problems.
func TestPipeConcurrent(t *testing.T) {
const (
N = 1000
readWriters = 10
)
stats := func(n int, size int64) {}
// Make a new pipe
p := newPipe(stats, 10)
var wg sync.WaitGroup
obj1 := mockobject.New("potato").WithContent([]byte("hello"), mockobject.SeekModeNone)
pair1 := fs.ObjectPair{Src: obj1, Dst: nil}
ctx := context.Background()
var count int64
for j := 0; j < readWriters; j++ {
wg.Add(2)
go func() {
defer wg.Done()
for i := 0; i < N; i++ {
// Read from pipe
pair2, ok := p.Get(ctx)
assert.Equal(t, pair1, pair2)
assert.Equal(t, true, ok)
atomic.AddInt64(&count, -1)
}
}()
go func() {
defer wg.Done()
for i := 0; i < N; i++ {
// Put an object
ok := p.Put(ctx, pair1)
assert.Equal(t, true, ok)
atomic.AddInt64(&count, 1)
}
}()
}
wg.Wait()
assert.Equal(t, int64(0), count)
}

View File

@ -43,9 +43,9 @@ type syncCopyMove struct {
srcEmptyDirsMu sync.Mutex // protect srcEmptyDirs srcEmptyDirsMu sync.Mutex // protect srcEmptyDirs
srcEmptyDirs map[string]fs.DirEntry // potentially empty directories srcEmptyDirs map[string]fs.DirEntry // potentially empty directories
checkerWg sync.WaitGroup // wait for checkers checkerWg sync.WaitGroup // wait for checkers
toBeChecked fs.ObjectPairChan // checkers channel toBeChecked *pipe // checkers channel
transfersWg sync.WaitGroup // wait for transfers transfersWg sync.WaitGroup // wait for transfers
toBeUploaded fs.ObjectPairChan // copiers channel toBeUploaded *pipe // copiers channel
errorMu sync.Mutex // Mutex covering the errors variables errorMu sync.Mutex // Mutex covering the errors variables
err error // normal error from copy process err error // normal error from copy process
noRetryErr error // error with NoRetry set noRetryErr error // error with NoRetry set
@ -54,7 +54,7 @@ type syncCopyMove struct {
renameMapMu sync.Mutex // mutex to protect the below renameMapMu sync.Mutex // mutex to protect the below
renameMap map[string][]fs.Object // dst files by hash - only used by trackRenames renameMap map[string][]fs.Object // dst files by hash - only used by trackRenames
renamerWg sync.WaitGroup // wait for renamers renamerWg sync.WaitGroup // wait for renamers
toBeRenamed fs.ObjectPairChan // renamers channel toBeRenamed *pipe // renamers channel
trackRenamesWg sync.WaitGroup // wg for background track renames trackRenamesWg sync.WaitGroup // wg for background track renames
trackRenamesCh chan fs.Object // objects are pumped in here trackRenamesCh chan fs.Object // objects are pumped in here
renameCheck []fs.Object // accumulate files to check for rename here renameCheck []fs.Object // accumulate files to check for rename here
@ -75,12 +75,12 @@ func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
dstFilesResult: make(chan error, 1), dstFilesResult: make(chan error, 1),
dstEmptyDirs: make(map[string]fs.DirEntry), dstEmptyDirs: make(map[string]fs.DirEntry),
srcEmptyDirs: make(map[string]fs.DirEntry), srcEmptyDirs: make(map[string]fs.DirEntry),
toBeChecked: make(fs.ObjectPairChan, fs.Config.Transfers), toBeChecked: newPipe(accounting.Stats.SetCheckQueue, fs.Config.MaxBacklog),
toBeUploaded: make(fs.ObjectPairChan, fs.Config.Transfers), toBeUploaded: newPipe(accounting.Stats.SetTransferQueue, fs.Config.MaxBacklog),
deleteFilesCh: make(chan fs.Object, fs.Config.Checkers), deleteFilesCh: make(chan fs.Object, fs.Config.Checkers),
trackRenames: fs.Config.TrackRenames, trackRenames: fs.Config.TrackRenames,
commonHash: fsrc.Hashes().Overlap(fdst.Hashes()).GetOne(), commonHash: fsrc.Hashes().Overlap(fdst.Hashes()).GetOne(),
toBeRenamed: make(fs.ObjectPairChan, fs.Config.Transfers), toBeRenamed: newPipe(accounting.Stats.SetRenameQueue, fs.Config.MaxBacklog),
trackRenamesCh: make(chan fs.Object, fs.Config.Checkers), trackRenamesCh: make(chan fs.Object, fs.Config.Checkers),
} }
s.ctx, s.cancel = context.WithCancel(context.Background()) s.ctx, s.cancel = context.WithCancel(context.Background())
@ -131,12 +131,7 @@ func newSyncCopyMove(fdst, fsrc fs.Fs, deleteMode fs.DeleteMode, DoMove bool, de
// Check to see if the context has been cancelled // Check to see if the context has been cancelled
func (s *syncCopyMove) aborting() bool { func (s *syncCopyMove) aborting() bool {
select { return s.ctx.Err() != nil
case <-s.ctx.Done():
return true
default:
}
return false
} }
// This reads the map and pumps it into the channel passed in, closing // This reads the map and pumps it into the channel passed in, closing
@ -197,14 +192,10 @@ func (s *syncCopyMove) currentError() error {
// pairChecker reads Objects~s on in send to out if they need transferring. // pairChecker reads Objects~s on in send to out if they need transferring.
// //
// FIXME potentially doing lots of hashes at once // FIXME potentially doing lots of hashes at once
func (s *syncCopyMove) pairChecker(in fs.ObjectPairChan, out fs.ObjectPairChan, wg *sync.WaitGroup) { func (s *syncCopyMove) pairChecker(in *pipe, out *pipe, wg *sync.WaitGroup) {
defer wg.Done() defer wg.Done()
for { for {
if s.aborting() { pair, ok := in.Get(s.ctx)
return
}
select {
case pair, ok := <-in:
if !ok { if !ok {
return return
} }
@ -228,17 +219,15 @@ func (s *syncCopyMove) pairChecker(in fs.ObjectPairChan, out fs.ObjectPairChan,
} else { } else {
// If successful zero out the dst as it is no longer there and copy the file // If successful zero out the dst as it is no longer there and copy the file
pair.Dst = nil pair.Dst = nil
select { ok = out.Put(s.ctx, pair)
case <-s.ctx.Done(): if !ok {
return return
case out <- pair:
} }
} }
} else { } else {
select { ok = out.Put(s.ctx, pair)
case <-s.ctx.Done(): if !ok {
return return
case out <- pair:
} }
} }
} }
@ -251,50 +240,35 @@ func (s *syncCopyMove) pairChecker(in fs.ObjectPairChan, out fs.ObjectPairChan,
} }
} }
accounting.Stats.DoneChecking(src.Remote()) accounting.Stats.DoneChecking(src.Remote())
case <-s.ctx.Done():
return
}
} }
} }
// pairRenamer reads Objects~s on in and attempts to rename them, // pairRenamer reads Objects~s on in and attempts to rename them,
// otherwise it sends them out if they need transferring. // otherwise it sends them out if they need transferring.
func (s *syncCopyMove) pairRenamer(in fs.ObjectPairChan, out fs.ObjectPairChan, wg *sync.WaitGroup) { func (s *syncCopyMove) pairRenamer(in *pipe, out *pipe, wg *sync.WaitGroup) {
defer wg.Done() defer wg.Done()
for { for {
if s.aborting() { pair, ok := in.Get(s.ctx)
return
}
select {
case pair, ok := <-in:
if !ok { if !ok {
return return
} }
src := pair.Src src := pair.Src
if !s.tryRename(src) { if !s.tryRename(src) {
// pass on if not renamed // pass on if not renamed
select { ok = out.Put(s.ctx, pair)
case <-s.ctx.Done(): if !ok {
return return
case out <- pair:
} }
} }
case <-s.ctx.Done():
return
}
} }
} }
// pairCopyOrMove reads Objects on in and moves or copies them. // pairCopyOrMove reads Objects on in and moves or copies them.
func (s *syncCopyMove) pairCopyOrMove(in fs.ObjectPairChan, fdst fs.Fs, wg *sync.WaitGroup) { func (s *syncCopyMove) pairCopyOrMove(in *pipe, fdst fs.Fs, wg *sync.WaitGroup) {
defer wg.Done() defer wg.Done()
var err error var err error
for { for {
if s.aborting() { pair, ok := in.Get(s.ctx)
return
}
select {
case pair, ok := <-in:
if !ok { if !ok {
return return
} }
@ -307,9 +281,6 @@ func (s *syncCopyMove) pairCopyOrMove(in fs.ObjectPairChan, fdst fs.Fs, wg *sync
} }
s.processError(err) s.processError(err)
accounting.Stats.DoneTransferring(src.Remote(), err == nil) accounting.Stats.DoneTransferring(src.Remote(), err == nil)
case <-s.ctx.Done():
return
}
} }
} }
@ -323,7 +294,7 @@ func (s *syncCopyMove) startCheckers() {
// This stops the background checkers // This stops the background checkers
func (s *syncCopyMove) stopCheckers() { func (s *syncCopyMove) stopCheckers() {
close(s.toBeChecked) s.toBeChecked.Close()
fs.Infof(s.fdst, "Waiting for checks to finish") fs.Infof(s.fdst, "Waiting for checks to finish")
s.checkerWg.Wait() s.checkerWg.Wait()
} }
@ -338,7 +309,7 @@ func (s *syncCopyMove) startTransfers() {
// This stops the background transfers // This stops the background transfers
func (s *syncCopyMove) stopTransfers() { func (s *syncCopyMove) stopTransfers() {
close(s.toBeUploaded) s.toBeUploaded.Close()
fs.Infof(s.fdst, "Waiting for transfers to finish") fs.Infof(s.fdst, "Waiting for transfers to finish")
s.transfersWg.Wait() s.transfersWg.Wait()
} }
@ -359,7 +330,7 @@ func (s *syncCopyMove) stopRenamers() {
if !s.trackRenames { if !s.trackRenames {
return return
} }
close(s.toBeRenamed) s.toBeRenamed.Close()
fs.Infof(s.fdst, "Waiting for renames to finish") fs.Infof(s.fdst, "Waiting for renames to finish")
s.renamerWg.Wait() s.renamerWg.Wait()
} }
@ -685,10 +656,9 @@ func (s *syncCopyMove) run() error {
s.makeRenameMap() s.makeRenameMap()
// Attempt renames for all the files which don't have a matching dst // Attempt renames for all the files which don't have a matching dst
for _, src := range s.renameCheck { for _, src := range s.renameCheck {
select { ok := s.toBeRenamed.Put(s.ctx, fs.ObjectPair{Src: src, Dst: nil})
case <-s.ctx.Done(): if !ok {
break break
case s.toBeRenamed <- fs.ObjectPair{Src: src, Dst: nil}:
} }
} }
} }
@ -792,10 +762,9 @@ func (s *syncCopyMove) SrcOnly(src fs.DirEntry) (recurse bool) {
} }
} else { } else {
// No need to check since doesn't exist // No need to check since doesn't exist
select { ok := s.toBeUploaded.Put(s.ctx, fs.ObjectPair{Src: x, Dst: nil})
case <-s.ctx.Done(): if !ok {
return return
case s.toBeUploaded <- fs.ObjectPair{Src: x, Dst: nil}:
} }
} }
case fs.Directory: case fs.Directory:
@ -825,10 +794,9 @@ func (s *syncCopyMove) Match(dst, src fs.DirEntry) (recurse bool) {
} }
dstX, ok := dst.(fs.Object) dstX, ok := dst.(fs.Object)
if ok { if ok {
select { ok = s.toBeChecked.Put(s.ctx, fs.ObjectPair{Src: srcX, Dst: dstX})
case <-s.ctx.Done(): if !ok {
return return false
case s.toBeChecked <- fs.ObjectPair{Src: srcX, Dst: dstX}:
} }
} else { } else {
// FIXME src is file, dst is directory // FIXME src is file, dst is directory