2019-04-24 18:04:40 +02:00
|
|
|
package operations
|
|
|
|
|
|
|
|
import (
|
2023-06-02 14:00:06 +02:00
|
|
|
"bufio"
|
2019-04-24 18:04:40 +02:00
|
|
|
"context"
|
2021-11-04 11:12:57 +01:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2019-04-24 18:04:40 +02:00
|
|
|
"io"
|
2024-06-08 12:55:48 +02:00
|
|
|
"time"
|
2019-04-24 18:04:40 +02:00
|
|
|
|
2019-07-28 19:47:38 +02:00
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
"github.com/rclone/rclone/fs/accounting"
|
2023-08-21 17:21:11 +02:00
|
|
|
"github.com/rclone/rclone/lib/atexit"
|
2023-08-15 22:15:04 +02:00
|
|
|
"github.com/rclone/rclone/lib/multipart"
|
2019-04-24 18:04:40 +02:00
|
|
|
"golang.org/x/sync/errgroup"
|
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
2023-07-25 17:19:37 +02:00
|
|
|
multithreadChunkSize = 64 << 10
|
2019-04-24 18:04:40 +02:00
|
|
|
)
|
|
|
|
|
2019-08-12 23:09:40 +02:00
|
|
|
// Return a boolean as to whether we should use multi thread copy for
|
|
|
|
// this transfer
|
2020-11-05 12:33:32 +01:00
|
|
|
func doMultiThreadCopy(ctx context.Context, f fs.Fs, src fs.Object) bool {
|
|
|
|
ci := fs.GetConfig(ctx)
|
|
|
|
|
2019-08-12 23:09:40 +02:00
|
|
|
// Disable multi thread if...
|
|
|
|
|
|
|
|
// ...it isn't configured
|
2020-11-05 12:33:32 +01:00
|
|
|
if ci.MultiThreadStreams <= 1 {
|
2019-08-12 23:09:40 +02:00
|
|
|
return false
|
|
|
|
}
|
2023-05-09 18:40:58 +02:00
|
|
|
// ...if the source doesn't support it
|
|
|
|
if src.Fs().Features().NoMultiThreading {
|
|
|
|
return false
|
|
|
|
}
|
2019-08-12 23:09:40 +02:00
|
|
|
// ...size of object is less than cutoff
|
2020-11-05 12:33:32 +01:00
|
|
|
if src.Size() < int64(ci.MultiThreadCutoff) {
|
2019-08-12 23:09:40 +02:00
|
|
|
return false
|
|
|
|
}
|
2023-05-09 18:40:58 +02:00
|
|
|
// ...destination doesn't support it
|
2019-08-12 23:09:40 +02:00
|
|
|
dstFeatures := f.Features()
|
2023-07-25 17:19:37 +02:00
|
|
|
if dstFeatures.OpenChunkWriter == nil && dstFeatures.OpenWriterAt == nil {
|
2019-08-12 23:09:40 +02:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
// ...if --multi-thread-streams not in use and source and
|
|
|
|
// destination are both local
|
2020-11-05 12:33:32 +01:00
|
|
|
if !ci.MultiThreadSet && dstFeatures.IsLocal && src.Fs().Features().IsLocal {
|
2019-08-12 23:09:40 +02:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2019-04-24 18:04:40 +02:00
|
|
|
// state for a multi-thread copy
|
|
|
|
type multiThreadCopyState struct {
|
2023-10-08 19:09:53 +02:00
|
|
|
ctx context.Context
|
|
|
|
partSize int64
|
|
|
|
size int64
|
|
|
|
src fs.Object
|
|
|
|
acc *accounting.Account
|
|
|
|
numChunks int
|
|
|
|
noBuffering bool // set to read the input without buffering
|
2019-04-24 18:04:40 +02:00
|
|
|
}
|
|
|
|
|
2023-08-24 18:50:41 +02:00
|
|
|
// Copy a single chunk into place
|
|
|
|
func (mc *multiThreadCopyState) copyChunk(ctx context.Context, chunk int, writer fs.ChunkWriter) (err error) {
|
2019-04-24 18:04:40 +02:00
|
|
|
defer func() {
|
|
|
|
if err != nil {
|
2023-08-24 18:50:41 +02:00
|
|
|
fs.Debugf(mc.src, "multi-thread copy: chunk %d/%d failed: %v", chunk+1, mc.numChunks, err)
|
2019-04-24 18:04:40 +02:00
|
|
|
}
|
|
|
|
}()
|
2023-08-24 18:50:41 +02:00
|
|
|
start := int64(chunk) * mc.partSize
|
2019-04-24 18:04:40 +02:00
|
|
|
if start >= mc.size {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
end := start + mc.partSize
|
|
|
|
if end > mc.size {
|
|
|
|
end = mc.size
|
|
|
|
}
|
2023-08-15 22:15:04 +02:00
|
|
|
size := end - start
|
2019-04-24 18:04:40 +02:00
|
|
|
|
2023-08-24 18:50:41 +02:00
|
|
|
fs.Debugf(mc.src, "multi-thread copy: chunk %d/%d (%d-%d) size %v starting", chunk+1, mc.numChunks, start, end, fs.SizeSuffix(size))
|
2019-04-24 18:04:40 +02:00
|
|
|
|
2023-06-01 13:54:19 +02:00
|
|
|
rc, err := Open(ctx, mc.src, &fs.RangeOption{Start: start, End: end - 1})
|
2019-04-24 18:04:40 +02:00
|
|
|
if err != nil {
|
2023-08-14 19:05:19 +02:00
|
|
|
return fmt.Errorf("multi-thread copy: failed to open source: %w", err)
|
2019-04-24 18:04:40 +02:00
|
|
|
}
|
|
|
|
defer fs.CheckClose(rc, &err)
|
|
|
|
|
2023-08-15 22:15:04 +02:00
|
|
|
var rs io.ReadSeeker
|
2023-10-08 19:09:53 +02:00
|
|
|
if mc.noBuffering {
|
2023-08-15 22:15:04 +02:00
|
|
|
// Read directly if we are sure we aren't going to seek
|
2023-08-24 18:16:01 +02:00
|
|
|
// and account with accounting
|
2023-10-08 19:09:53 +02:00
|
|
|
rc.SetAccounting(mc.acc.AccountRead)
|
|
|
|
rs = rc
|
2023-08-15 22:15:04 +02:00
|
|
|
} else {
|
|
|
|
// Read the chunk into buffered reader
|
2023-08-24 18:16:01 +02:00
|
|
|
rw := multipart.NewRW()
|
|
|
|
defer fs.CheckClose(rw, &err)
|
|
|
|
_, err = io.CopyN(rw, rc, size)
|
2023-08-15 22:15:04 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("multi-thread copy: failed to read chunk: %w", err)
|
|
|
|
}
|
2023-08-24 18:16:01 +02:00
|
|
|
// Account as we go
|
|
|
|
rw.SetAccounting(mc.acc.AccountRead)
|
|
|
|
rs = rw
|
2023-08-15 22:15:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Write the chunk
|
2023-08-24 18:50:41 +02:00
|
|
|
bytesWritten, err := writer.WriteChunk(ctx, chunk, rs)
|
2023-07-25 17:19:37 +02:00
|
|
|
if err != nil {
|
2023-08-15 22:15:04 +02:00
|
|
|
return fmt.Errorf("multi-thread copy: failed to write chunk: %w", err)
|
2019-04-24 18:04:40 +02:00
|
|
|
}
|
2023-08-15 22:15:04 +02:00
|
|
|
|
2023-08-24 18:50:41 +02:00
|
|
|
fs.Debugf(mc.src, "multi-thread copy: chunk %d/%d (%d-%d) size %v finished", chunk+1, mc.numChunks, start, end, fs.SizeSuffix(bytesWritten))
|
2019-04-24 18:04:40 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2023-07-25 17:19:37 +02:00
|
|
|
// Given a file size and a chunkSize
|
|
|
|
// it returns the number of chunks, so that chunkSize * numChunks >= size
|
|
|
|
func calculateNumChunks(size int64, chunkSize int64) int {
|
|
|
|
numChunks := size / chunkSize
|
|
|
|
if size%chunkSize != 0 {
|
|
|
|
numChunks++
|
2019-04-24 18:04:40 +02:00
|
|
|
}
|
2023-07-25 17:19:37 +02:00
|
|
|
return int(numChunks)
|
2019-04-24 18:04:40 +02:00
|
|
|
}
|
|
|
|
|
2023-07-25 17:19:37 +02:00
|
|
|
// Copy src to (f, remote) using streams download threads. It tries to use the OpenChunkWriter feature
|
|
|
|
// and if that's not available it creates an adapter using OpenWriterAt
|
2023-10-10 16:31:21 +02:00
|
|
|
func multiThreadCopy(ctx context.Context, f fs.Fs, remote string, src fs.Object, concurrency int, tr *accounting.Transfer, options ...fs.OpenOption) (newDst fs.Object, err error) {
|
2023-07-25 17:19:37 +02:00
|
|
|
openChunkWriter := f.Features().OpenChunkWriter
|
|
|
|
ci := fs.GetConfig(ctx)
|
2023-10-08 19:09:53 +02:00
|
|
|
noBuffering := false
|
2024-05-08 18:50:31 +02:00
|
|
|
usingOpenWriterAt := false
|
2023-07-25 17:19:37 +02:00
|
|
|
if openChunkWriter == nil {
|
|
|
|
openWriterAt := f.Features().OpenWriterAt
|
|
|
|
if openWriterAt == nil {
|
2023-08-14 19:05:19 +02:00
|
|
|
return nil, errors.New("multi-thread copy: neither OpenChunkWriter nor OpenWriterAt supported")
|
2023-07-25 17:19:37 +02:00
|
|
|
}
|
|
|
|
openChunkWriter = openChunkWriterFromOpenWriterAt(openWriterAt, int64(ci.MultiThreadChunkSize), int64(ci.MultiThreadWriteBufferSize), f)
|
2023-10-08 19:09:53 +02:00
|
|
|
// If we are using OpenWriterAt we don't seek the chunks so don't need to buffer
|
|
|
|
fs.Debugf(src, "multi-thread copy: disabling buffering because destination uses OpenWriterAt")
|
|
|
|
noBuffering = true
|
2024-05-08 18:50:31 +02:00
|
|
|
usingOpenWriterAt = true
|
2023-10-08 19:09:53 +02:00
|
|
|
} else if src.Fs().Features().IsLocal {
|
|
|
|
// If the source fs is local we don't need to buffer
|
|
|
|
fs.Debugf(src, "multi-thread copy: disabling buffering because source is local disk")
|
|
|
|
noBuffering = true
|
|
|
|
} else if f.Features().ChunkWriterDoesntSeek {
|
|
|
|
// If the destination Fs promises not to seek its chunks
|
|
|
|
// (except for retries) then we don't need buffering.
|
|
|
|
fs.Debugf(src, "multi-thread copy: disabling buffering because destination has set ChunkWriterDoesntSeek")
|
|
|
|
noBuffering = true
|
2019-04-24 18:04:40 +02:00
|
|
|
}
|
2023-07-25 17:19:37 +02:00
|
|
|
|
2019-04-24 18:04:40 +02:00
|
|
|
if src.Size() < 0 {
|
2023-07-25 17:19:37 +02:00
|
|
|
return nil, fmt.Errorf("multi-thread copy: can't copy unknown sized file")
|
2019-04-24 18:04:40 +02:00
|
|
|
}
|
|
|
|
if src.Size() == 0 {
|
2023-07-25 17:19:37 +02:00
|
|
|
return nil, fmt.Errorf("multi-thread copy: can't copy zero sized file")
|
2019-04-24 18:04:40 +02:00
|
|
|
}
|
|
|
|
|
2023-10-10 16:31:21 +02:00
|
|
|
info, chunkWriter, err := openChunkWriter(ctx, remote, src, options...)
|
2023-08-14 19:05:19 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("multi-thread copy: failed to open chunk writer: %w", err)
|
|
|
|
}
|
2023-07-25 17:19:37 +02:00
|
|
|
|
2023-09-01 18:25:15 +02:00
|
|
|
uploadCtx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
2023-11-22 16:05:44 +01:00
|
|
|
uploadedOK := false
|
2023-08-21 17:21:11 +02:00
|
|
|
defer atexit.OnError(&err, func() {
|
2023-09-01 18:25:15 +02:00
|
|
|
cancel()
|
2023-11-22 16:05:44 +01:00
|
|
|
if info.LeavePartsOnError || uploadedOK {
|
2023-09-01 18:25:15 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
fs.Debugf(src, "multi-thread copy: cancelling transfer on exit")
|
2023-08-21 17:21:11 +02:00
|
|
|
abortErr := chunkWriter.Abort(ctx)
|
|
|
|
if abortErr != nil {
|
|
|
|
fs.Debugf(src, "multi-thread copy: abort failed: %v", abortErr)
|
|
|
|
}
|
|
|
|
})()
|
|
|
|
|
2023-09-01 18:25:15 +02:00
|
|
|
if info.ChunkSize > src.Size() {
|
|
|
|
fs.Debugf(src, "multi-thread copy: chunk size %v was bigger than source file size %v", fs.SizeSuffix(info.ChunkSize), fs.SizeSuffix(src.Size()))
|
|
|
|
info.ChunkSize = src.Size()
|
2019-04-24 18:04:40 +02:00
|
|
|
}
|
|
|
|
|
2023-09-13 08:18:06 +02:00
|
|
|
// Use the backend concurrency if it is higher than --multi-thread-streams or if --multi-thread-streams wasn't set explicitly
|
|
|
|
if !ci.MultiThreadSet || info.Concurrency > concurrency {
|
|
|
|
fs.Debugf(src, "multi-thread copy: using backend concurrency of %d instead of --multi-thread-streams %d", info.Concurrency, concurrency)
|
|
|
|
concurrency = info.Concurrency
|
|
|
|
}
|
|
|
|
|
2023-09-01 18:25:15 +02:00
|
|
|
numChunks := calculateNumChunks(src.Size(), info.ChunkSize)
|
|
|
|
if concurrency > numChunks {
|
|
|
|
fs.Debugf(src, "multi-thread copy: number of streams %d was bigger than number of chunks %d", concurrency, numChunks)
|
|
|
|
concurrency = numChunks
|
|
|
|
}
|
|
|
|
|
|
|
|
if concurrency < 1 {
|
|
|
|
concurrency = 1
|
2023-07-25 17:19:37 +02:00
|
|
|
}
|
|
|
|
|
2023-09-01 18:25:15 +02:00
|
|
|
g, gCtx := errgroup.WithContext(uploadCtx)
|
|
|
|
g.SetLimit(concurrency)
|
|
|
|
|
2023-07-25 17:19:37 +02:00
|
|
|
mc := &multiThreadCopyState{
|
2023-10-08 19:09:53 +02:00
|
|
|
ctx: gCtx,
|
|
|
|
size: src.Size(),
|
|
|
|
src: src,
|
|
|
|
partSize: info.ChunkSize,
|
|
|
|
numChunks: numChunks,
|
|
|
|
noBuffering: noBuffering,
|
2023-07-25 17:19:37 +02:00
|
|
|
}
|
2019-04-24 18:04:40 +02:00
|
|
|
|
2023-07-25 17:19:37 +02:00
|
|
|
// Make accounting
|
2023-09-01 18:25:15 +02:00
|
|
|
mc.acc = tr.Account(gCtx, nil)
|
2023-07-25 17:19:37 +02:00
|
|
|
|
2023-09-01 18:25:15 +02:00
|
|
|
fs.Debugf(src, "Starting multi-thread copy with %d chunks of size %v with %v parallel streams", mc.numChunks, fs.SizeSuffix(mc.partSize), concurrency)
|
2023-07-25 17:19:37 +02:00
|
|
|
for chunk := 0; chunk < mc.numChunks; chunk++ {
|
2023-08-14 19:05:19 +02:00
|
|
|
// Fail fast, in case an errgroup managed function returns an error
|
|
|
|
if gCtx.Err() != nil {
|
2023-07-25 17:19:37 +02:00
|
|
|
break
|
|
|
|
}
|
2023-08-14 19:05:19 +02:00
|
|
|
chunk := chunk
|
|
|
|
g.Go(func() error {
|
2023-08-24 18:50:41 +02:00
|
|
|
return mc.copyChunk(gCtx, chunk, chunkWriter)
|
2019-04-24 18:04:40 +02:00
|
|
|
})
|
|
|
|
}
|
2023-07-25 17:19:37 +02:00
|
|
|
|
2019-04-24 18:04:40 +02:00
|
|
|
err = g.Wait()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2023-11-22 16:05:44 +01:00
|
|
|
err = chunkWriter.Close(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("multi-thread copy: failed to close object after copy: %w", err)
|
2020-02-24 11:22:09 +01:00
|
|
|
}
|
2023-11-22 16:05:44 +01:00
|
|
|
uploadedOK = true // file is definitely uploaded OK so no need to abort
|
2019-04-24 18:04:40 +02:00
|
|
|
|
2019-06-17 10:34:30 +02:00
|
|
|
obj, err := f.NewObject(ctx, remote)
|
2019-04-24 18:04:40 +02:00
|
|
|
if err != nil {
|
2021-11-04 11:12:57 +01:00
|
|
|
return nil, fmt.Errorf("multi-thread copy: failed to find object after copy: %w", err)
|
2019-04-24 18:04:40 +02:00
|
|
|
}
|
|
|
|
|
2024-05-08 18:50:31 +02:00
|
|
|
// OpenWriterAt doesn't set metadata so we need to set it on completion
|
|
|
|
if usingOpenWriterAt {
|
|
|
|
setModTime := true
|
|
|
|
if ci.Metadata {
|
|
|
|
do, ok := obj.(fs.SetMetadataer)
|
|
|
|
if ok {
|
|
|
|
meta, err := fs.GetMetadataOptions(ctx, f, src, options)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("multi-thread copy: failed to read metadata from source object: %w", err)
|
|
|
|
}
|
2024-06-08 12:55:48 +02:00
|
|
|
if _, foundMeta := meta["mtime"]; !foundMeta {
|
|
|
|
meta.Set("mtime", src.ModTime(ctx).Format(time.RFC3339Nano))
|
|
|
|
}
|
2024-05-08 18:50:31 +02:00
|
|
|
err = do.SetMetadata(ctx, meta)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("multi-thread copy: failed to set metadata: %w", err)
|
|
|
|
}
|
|
|
|
setModTime = false
|
|
|
|
} else {
|
|
|
|
fs.Errorf(obj, "multi-thread copy: can't set metadata as SetMetadata isn't implemented in: %v", f)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if setModTime {
|
|
|
|
err = obj.SetModTime(ctx, src.ModTime(ctx))
|
|
|
|
switch err {
|
|
|
|
case nil, fs.ErrorCantSetModTime, fs.ErrorCantSetModTimeWithoutDelete:
|
|
|
|
default:
|
|
|
|
return nil, fmt.Errorf("multi-thread copy: failed to set modification time: %w", err)
|
|
|
|
}
|
2023-07-25 17:19:37 +02:00
|
|
|
}
|
2019-04-24 18:04:40 +02:00
|
|
|
}
|
|
|
|
|
2023-07-25 17:19:37 +02:00
|
|
|
fs.Debugf(src, "Finished multi-thread copy with %d parts of size %v", mc.numChunks, fs.SizeSuffix(mc.partSize))
|
2019-04-24 18:04:40 +02:00
|
|
|
return obj, nil
|
|
|
|
}
|
2023-07-25 17:19:37 +02:00
|
|
|
|
2023-08-14 19:05:19 +02:00
|
|
|
// writerAtChunkWriter converts a WriterAtCloser into a ChunkWriter
|
2023-07-25 17:19:37 +02:00
|
|
|
type writerAtChunkWriter struct {
|
|
|
|
remote string
|
|
|
|
size int64
|
|
|
|
writerAt fs.WriterAtCloser
|
|
|
|
chunkSize int64
|
|
|
|
chunks int
|
|
|
|
writeBufferSize int64
|
|
|
|
f fs.Fs
|
2023-11-22 16:05:44 +01:00
|
|
|
closed bool
|
2023-07-25 17:19:37 +02:00
|
|
|
}
|
|
|
|
|
2023-08-14 19:05:19 +02:00
|
|
|
// WriteChunk writes chunkNumber from reader
|
2023-11-22 16:05:44 +01:00
|
|
|
func (w *writerAtChunkWriter) WriteChunk(ctx context.Context, chunkNumber int, reader io.ReadSeeker) (int64, error) {
|
2023-07-25 17:19:37 +02:00
|
|
|
fs.Debugf(w.remote, "writing chunk %v", chunkNumber)
|
|
|
|
|
|
|
|
bytesToWrite := w.chunkSize
|
|
|
|
if chunkNumber == (w.chunks-1) && w.size%w.chunkSize != 0 {
|
|
|
|
bytesToWrite = w.size % w.chunkSize
|
|
|
|
}
|
|
|
|
|
2024-01-13 17:59:33 +01:00
|
|
|
var writer io.Writer = io.NewOffsetWriter(w.writerAt, int64(chunkNumber)*w.chunkSize)
|
2023-07-25 17:19:37 +02:00
|
|
|
if w.writeBufferSize > 0 {
|
|
|
|
writer = bufio.NewWriterSize(writer, int(w.writeBufferSize))
|
|
|
|
}
|
|
|
|
n, err := io.Copy(writer, reader)
|
|
|
|
if err != nil {
|
|
|
|
return -1, err
|
|
|
|
}
|
|
|
|
if n != bytesToWrite {
|
|
|
|
return -1, fmt.Errorf("expected to write %v bytes for chunk %v, but wrote %v bytes", bytesToWrite, chunkNumber, n)
|
|
|
|
}
|
2023-08-14 19:05:19 +02:00
|
|
|
// if we were buffering, flush to disk
|
2023-07-25 17:19:37 +02:00
|
|
|
switch w := writer.(type) {
|
|
|
|
case *bufio.Writer:
|
|
|
|
er2 := w.Flush()
|
|
|
|
if er2 != nil {
|
2023-08-14 19:05:19 +02:00
|
|
|
return -1, fmt.Errorf("multi-thread copy: flush failed: %w", err)
|
2023-07-25 17:19:37 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
2023-08-14 19:05:19 +02:00
|
|
|
// Close the chunk writing
|
2023-11-22 16:05:44 +01:00
|
|
|
func (w *writerAtChunkWriter) Close(ctx context.Context) error {
|
|
|
|
if w.closed {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
w.closed = true
|
2023-07-25 17:19:37 +02:00
|
|
|
return w.writerAt.Close()
|
|
|
|
}
|
|
|
|
|
2023-08-14 19:05:19 +02:00
|
|
|
// Abort the chunk writing
|
2023-11-22 16:05:44 +01:00
|
|
|
func (w *writerAtChunkWriter) Abort(ctx context.Context) error {
|
|
|
|
err := w.Close(ctx)
|
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(w.remote, "multi-thread copy: failed to close file before aborting: %v", err)
|
|
|
|
}
|
2023-08-19 18:30:55 +02:00
|
|
|
obj, err := w.f.NewObject(ctx, w.remote)
|
2023-07-25 17:19:37 +02:00
|
|
|
if err != nil {
|
|
|
|
return fmt.Errorf("multi-thread copy: failed to find temp file when aborting chunk writer: %w", err)
|
|
|
|
}
|
2023-08-19 18:30:55 +02:00
|
|
|
return obj.Remove(ctx)
|
2023-07-25 17:19:37 +02:00
|
|
|
}
|
|
|
|
|
2023-08-14 19:04:42 +02:00
|
|
|
// openChunkWriterFromOpenWriterAt adapts an OpenWriterAtFn into an OpenChunkWriterFn using chunkSize and writeBufferSize
|
|
|
|
func openChunkWriterFromOpenWriterAt(openWriterAt fs.OpenWriterAtFn, chunkSize int64, writeBufferSize int64, f fs.Fs) fs.OpenChunkWriterFn {
|
2023-09-01 18:25:15 +02:00
|
|
|
return func(ctx context.Context, remote string, src fs.ObjectInfo, options ...fs.OpenOption) (info fs.ChunkWriterInfo, writer fs.ChunkWriter, err error) {
|
|
|
|
ci := fs.GetConfig(ctx)
|
|
|
|
|
2023-07-25 17:19:37 +02:00
|
|
|
writerAt, err := openWriterAt(ctx, remote, src.Size())
|
|
|
|
if err != nil {
|
2023-09-01 18:25:15 +02:00
|
|
|
return info, nil, err
|
2023-07-25 17:19:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if writeBufferSize > 0 {
|
|
|
|
fs.Debugf(src.Remote(), "multi-thread copy: write buffer set to %v", writeBufferSize)
|
|
|
|
}
|
|
|
|
|
|
|
|
chunkWriter := &writerAtChunkWriter{
|
|
|
|
remote: remote,
|
|
|
|
size: src.Size(),
|
|
|
|
chunkSize: chunkSize,
|
|
|
|
chunks: calculateNumChunks(src.Size(), chunkSize),
|
|
|
|
writerAt: writerAt,
|
|
|
|
writeBufferSize: writeBufferSize,
|
|
|
|
f: f,
|
|
|
|
}
|
2023-09-01 18:25:15 +02:00
|
|
|
info = fs.ChunkWriterInfo{
|
|
|
|
ChunkSize: chunkSize,
|
|
|
|
Concurrency: ci.MultiThreadStreams,
|
|
|
|
}
|
|
|
|
return info, chunkWriter, nil
|
2023-07-25 17:19:37 +02:00
|
|
|
}
|
|
|
|
}
|