mirror of
https://github.com/rclone/rclone.git
synced 2024-12-23 15:38:57 +01:00
s3: use single memory pool
Previously we had a map of pools for different chunk sizes. In practice the mapping is not very useful and requires a lock. Pools of size other that ChunkSize can only happen when we have a huge file (over 10k * ChunkSize). We need to have a bunch of identically sized huge files. In such case most likely ChunkSize should be increased. The mapping and its lock is replaced with a single initialised pool for ChunkSize, in other cases pool is allocated and freed on per file basis.
This commit is contained in:
parent
64b5105edd
commit
399cf18013
@ -952,8 +952,7 @@ type Fs struct {
|
|||||||
cache *bucket.Cache // cache for bucket creation status
|
cache *bucket.Cache // cache for bucket creation status
|
||||||
pacer *fs.Pacer // To pace the API calls
|
pacer *fs.Pacer // To pace the API calls
|
||||||
srv *http.Client // a plain http client
|
srv *http.Client // a plain http client
|
||||||
poolMu sync.Mutex // mutex protecting memory pools map
|
pool *pool.Pool // memory pool
|
||||||
pools map[int64]*pool.Pool // memory pools
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Object describes a s3 object
|
// Object describes a s3 object
|
||||||
@ -1247,7 +1246,12 @@ func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
|
|||||||
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
|
||||||
cache: bucket.NewCache(),
|
cache: bucket.NewCache(),
|
||||||
srv: fshttp.NewClient(fs.Config),
|
srv: fshttp.NewClient(fs.Config),
|
||||||
pools: make(map[int64]*pool.Pool),
|
pool: pool.New(
|
||||||
|
time.Duration(opt.MemoryPoolFlushTime),
|
||||||
|
int(opt.ChunkSize),
|
||||||
|
opt.UploadConcurrency*fs.Config.Transfers,
|
||||||
|
opt.MemoryPoolUseMmap,
|
||||||
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
f.setRoot(root)
|
f.setRoot(root)
|
||||||
@ -1938,19 +1942,16 @@ func (f *Fs) Hashes() hash.Set {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
func (f *Fs) getMemoryPool(size int64) *pool.Pool {
|
||||||
f.poolMu.Lock()
|
if size == int64(f.opt.ChunkSize) {
|
||||||
defer f.poolMu.Unlock()
|
return f.pool
|
||||||
|
}
|
||||||
|
|
||||||
_, ok := f.pools[size]
|
return pool.New(
|
||||||
if !ok {
|
|
||||||
f.pools[size] = pool.New(
|
|
||||||
time.Duration(f.opt.MemoryPoolFlushTime),
|
time.Duration(f.opt.MemoryPoolFlushTime),
|
||||||
int(size),
|
int(size),
|
||||||
f.opt.UploadConcurrency*fs.Config.Transfers,
|
f.opt.UploadConcurrency*fs.Config.Transfers,
|
||||||
f.opt.MemoryPoolUseMmap,
|
f.opt.MemoryPoolUseMmap,
|
||||||
)
|
)
|
||||||
}
|
|
||||||
return f.pools[size]
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------
|
// ------------------------------------------------------------
|
||||||
|
Loading…
Reference in New Issue
Block a user