mirror of
https://github.com/rclone/rclone.git
synced 2025-02-16 18:41:54 +01:00
mailru: avoid prehashing of large local files
PR 4617
This commit is contained in:
parent
65ff109065
commit
dad8447423
@ -102,6 +102,7 @@ func init() {
|
|||||||
This feature is called "speedup" or "put by hash". It is especially efficient
|
This feature is called "speedup" or "put by hash". It is especially efficient
|
||||||
in case of generally available files like popular books, video or audio clips,
|
in case of generally available files like popular books, video or audio clips,
|
||||||
because files are searched by hash in all accounts of all mailru users.
|
because files are searched by hash in all accounts of all mailru users.
|
||||||
|
It is meaningless and ineffective if source file is unique or encrypted.
|
||||||
Please note that rclone may need local memory and disk space to calculate
|
Please note that rclone may need local memory and disk space to calculate
|
||||||
content hash in advance and decide whether full upload is required.
|
content hash in advance and decide whether full upload is required.
|
||||||
Also, if rclone does not know file size in advance (e.g. in case of
|
Also, if rclone does not know file size in advance (e.g. in case of
|
||||||
@ -1601,23 +1602,28 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
fileBuf []byte
|
fileBuf []byte
|
||||||
fileHash []byte
|
fileHash []byte
|
||||||
newHash []byte
|
newHash []byte
|
||||||
trySpeedup bool
|
slowHash bool
|
||||||
|
localSrc bool
|
||||||
)
|
)
|
||||||
|
if srcObj := fs.UnWrapObjectInfo(src); srcObj != nil {
|
||||||
|
srcFeatures := srcObj.Fs().Features()
|
||||||
|
slowHash = srcFeatures.SlowHash
|
||||||
|
localSrc = srcFeatures.IsLocal
|
||||||
|
}
|
||||||
|
|
||||||
// Don't disturb the source if file fits in hash.
|
// Try speedup if it's globally enabled but skip extra post
|
||||||
// Skip an extra speedup request if file fits in hash.
|
// request if file is small and fits in the metadata request
|
||||||
if size > mrhash.Size {
|
trySpeedup := o.fs.opt.SpeedupEnable && size > mrhash.Size
|
||||||
// Request hash from source.
|
|
||||||
|
// Try to get the hash if it's instant
|
||||||
|
if trySpeedup && !slowHash {
|
||||||
if srcHash, err := src.Hash(ctx, MrHashType); err == nil && srcHash != "" {
|
if srcHash, err := src.Hash(ctx, MrHashType); err == nil && srcHash != "" {
|
||||||
fileHash, _ = mrhash.DecodeString(srcHash)
|
fileHash, _ = mrhash.DecodeString(srcHash)
|
||||||
}
|
}
|
||||||
|
if fileHash != nil {
|
||||||
// Try speedup if it's globally enabled and source hash is available.
|
|
||||||
trySpeedup = o.fs.opt.SpeedupEnable
|
|
||||||
if trySpeedup && fileHash != nil {
|
|
||||||
if o.putByHash(ctx, fileHash, src, "source") {
|
if o.putByHash(ctx, fileHash, src, "source") {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1626,13 +1632,22 @@ func (o *Object) Update(ctx context.Context, in io.Reader, src fs.ObjectInfo, op
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Need to calculate hash, check whether file is still eligible for speedup
|
// Need to calculate hash, check whether file is still eligible for speedup
|
||||||
if trySpeedup {
|
trySpeedup = trySpeedup && o.fs.eligibleForSpeedup(o.Remote(), size, options...)
|
||||||
trySpeedup = o.fs.eligibleForSpeedup(o.Remote(), size, options...)
|
|
||||||
|
// Attempt to put by hash if file is local and eligible
|
||||||
|
if trySpeedup && localSrc {
|
||||||
|
if srcHash, err := src.Hash(ctx, MrHashType); err == nil && srcHash != "" {
|
||||||
|
fileHash, _ = mrhash.DecodeString(srcHash)
|
||||||
|
}
|
||||||
|
if fileHash != nil && o.putByHash(ctx, fileHash, src, "localfs") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// If local file hashing has failed, it's pointless to try anymore
|
||||||
|
trySpeedup = false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Attempt to put by calculating hash in memory
|
// Attempt to put by calculating hash in memory
|
||||||
if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) {
|
if trySpeedup && size <= int64(o.fs.opt.SpeedupMaxMem) {
|
||||||
//fs.Debugf(o, "attempt to put by hash from memory")
|
|
||||||
fileBuf, err = ioutil.ReadAll(in)
|
fileBuf, err = ioutil.ReadAll(in)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1762,6 +1777,7 @@ func (f *Fs) parseSpeedupPatterns(patternString string) (err error) {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// putByHash is a thin wrapper around addFileMetaData
|
||||||
func (o *Object) putByHash(ctx context.Context, mrHash []byte, info fs.ObjectInfo, method string) bool {
|
func (o *Object) putByHash(ctx context.Context, mrHash []byte, info fs.ObjectInfo, method string) bool {
|
||||||
oNew := new(Object)
|
oNew := new(Object)
|
||||||
*oNew = *o
|
*oNew = *o
|
||||||
@ -2188,6 +2204,7 @@ func (o *Object) Open(ctx context.Context, options ...fs.OpenOption) (in io.Read
|
|||||||
// Discard the beginning of the data
|
// Discard the beginning of the data
|
||||||
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
|
_, err = io.CopyN(ioutil.Discard, wrapStream, start)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
closeBody(res)
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user