Limit concurrent directory listings.

Never run more than "fs.Config.Checkers" directory listings at once.
This commit is contained in:
Klaus Post 2015-09-13 16:05:13 +02:00 committed by Nick Craig-Wood
parent a1a780e847
commit dd48e62b7e

View File

@ -87,6 +87,7 @@ type FsAcd struct {
root string // the path we are working on
dirCache *dircache.DirCache // Map of directory path to directory id
pacer *pacer.Pacer // pacer for API calls
connTokens chan struct{} // Connection tokens for directory listings
}
// FsObjectAcd describes a acd object
@ -165,6 +166,12 @@ func NewFs(name, root string) (fs.Fs, error) {
root: root,
c: c,
pacer: pacer.New().SetMinSleep(minSleep).SetMaxSleep(maxSleep).SetDecayConstant(decayConstant),
connTokens: make(chan struct{}, fs.Config.Checkers),
}
// Insert connection tokens.
for i := 0; i < fs.Config.Checkers; i++ {
f.connTokens <- struct{}{}
}
// Update endpoints
@ -317,10 +324,14 @@ func (f *FsAcd) listAll(dirId string, title string, directoriesOnly bool, filesO
OUTER:
for {
var resp *http.Response
// Get a token
_ = <-f.connTokens
err = f.pacer.Call(func() (bool, error) {
nodes, resp, err = f.c.Nodes.GetNodes(&opts)
return shouldRetry(resp, err)
})
// Reinsert token
f.connTokens <- struct{}{}
if err != nil {
fs.Stats.Error()
fs.ErrorLog(f, "Couldn't list files: %v", err)