2018-01-12 17:30:54 +01:00
|
|
|
// Package accounting providers an accounting and limiting reader
|
|
|
|
package accounting
|
2013-01-03 23:50:00 +01:00
|
|
|
|
|
|
|
import (
|
2020-06-20 17:10:02 +02:00
|
|
|
"context"
|
2013-01-03 23:50:00 +01:00
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"sync"
|
|
|
|
"time"
|
2019-01-14 17:12:39 +01:00
|
|
|
"unicode/utf8"
|
2015-02-19 20:26:00 +01:00
|
|
|
|
2019-07-28 19:47:38 +02:00
|
|
|
"github.com/rclone/rclone/fs/rc"
|
2020-06-20 17:10:02 +02:00
|
|
|
"golang.org/x/time/rate"
|
2019-07-26 09:51:51 +02:00
|
|
|
|
2018-04-21 23:03:27 +02:00
|
|
|
"github.com/pkg/errors"
|
2019-07-28 19:47:38 +02:00
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
"github.com/rclone/rclone/fs/asyncreader"
|
|
|
|
"github.com/rclone/rclone/fs/fserrors"
|
2013-01-03 23:50:00 +01:00
|
|
|
)
|
|
|
|
|
2020-03-10 13:00:10 +01:00
|
|
|
// ErrorMaxTransferLimitReached defines error when transfer limit is reached.
|
|
|
|
// Used for checking on exit and matching to correct exit code.
|
|
|
|
var ErrorMaxTransferLimitReached = errors.New("Max transfer limit reached as set by --max-transfer")
|
|
|
|
|
|
|
|
// ErrorMaxTransferLimitReachedFatal is returned from Read when the max
|
2018-04-21 23:03:27 +02:00
|
|
|
// transfer limit is reached.
|
2020-03-10 13:00:10 +01:00
|
|
|
var ErrorMaxTransferLimitReachedFatal = fserrors.FatalError(ErrorMaxTransferLimitReached)
|
2018-04-21 23:03:27 +02:00
|
|
|
|
2020-09-09 13:53:21 +02:00
|
|
|
// ErrorMaxTransferLimitReachedGraceful is returned from operations.Copy when the max
|
|
|
|
// transfer limit is reached and a graceful stop is required.
|
|
|
|
var ErrorMaxTransferLimitReachedGraceful = fserrors.NoRetryError(ErrorMaxTransferLimitReached)
|
|
|
|
|
2013-01-03 23:50:00 +01:00
|
|
|
// Account limits and accounts for one transfer
|
|
|
|
type Account struct {
|
2019-07-16 13:56:20 +02:00
|
|
|
stats *StatsInfo
|
2015-06-09 18:29:25 +02:00
|
|
|
// The mutex is to make sure Read() and Close() aren't called
|
|
|
|
// concurrently. Unfortunately the persistent connection loop
|
|
|
|
// in http transport calls Read() after Do() returns on
|
|
|
|
// CancelRequest so this race can happen when it apparently
|
|
|
|
// shouldn't.
|
2020-05-12 17:16:17 +02:00
|
|
|
mu sync.Mutex // mutex protects these values
|
2018-02-01 16:41:58 +01:00
|
|
|
in io.Reader
|
2020-06-04 16:09:03 +02:00
|
|
|
ctx context.Context // current context for transfer - may change
|
2020-11-05 12:33:32 +01:00
|
|
|
ci *fs.ConfigInfo
|
2017-02-17 00:57:58 +01:00
|
|
|
origIn io.ReadCloser
|
2018-02-01 16:41:58 +01:00
|
|
|
close io.Closer
|
2015-09-15 16:46:06 +02:00
|
|
|
size int64
|
|
|
|
name string
|
2018-06-11 12:28:12 +02:00
|
|
|
closed bool // set if the file is closed
|
|
|
|
exit chan struct{} // channel that will be closed when transfer is finished
|
|
|
|
withBuf bool // is using a buffered in
|
2020-05-12 17:16:17 +02:00
|
|
|
|
2020-06-20 17:10:02 +02:00
|
|
|
tokenBucket *rate.Limiter // per file bandwidth limiter (may be nil)
|
|
|
|
|
2020-05-12 17:16:17 +02:00
|
|
|
values accountValues
|
|
|
|
}
|
|
|
|
|
|
|
|
// accountValues holds statistics for this Account
|
|
|
|
type accountValues struct {
|
|
|
|
mu sync.Mutex // Mutex for stat values.
|
|
|
|
bytes int64 // Total number of bytes read
|
|
|
|
max int64 // if >=0 the max number of bytes to transfer
|
|
|
|
start time.Time // Start time of first read
|
|
|
|
lpTime time.Time // Time of last average measurement
|
|
|
|
lpBytes int // Number of bytes read since last measurement
|
|
|
|
avg float64 // Moving average of last few measurements in bytes/s
|
2013-01-03 23:50:00 +01:00
|
|
|
}
|
|
|
|
|
2018-06-11 12:28:12 +02:00
|
|
|
const averagePeriod = 16 // period to do exponentially weighted averages over
|
|
|
|
|
2020-05-25 08:05:53 +02:00
|
|
|
// newAccountSizeName makes an Account reader for an io.ReadCloser of
|
2016-11-30 21:18:14 +01:00
|
|
|
// the given size and name
|
2020-06-04 16:09:03 +02:00
|
|
|
func newAccountSizeName(ctx context.Context, stats *StatsInfo, in io.ReadCloser, size int64, name string) *Account {
|
2015-09-15 16:46:06 +02:00
|
|
|
acc := &Account{
|
2019-07-16 13:56:20 +02:00
|
|
|
stats: stats,
|
2015-09-15 16:46:06 +02:00
|
|
|
in: in,
|
2020-06-04 16:09:03 +02:00
|
|
|
ctx: ctx,
|
2020-11-05 12:33:32 +01:00
|
|
|
ci: fs.GetConfig(ctx),
|
2018-02-01 16:41:58 +01:00
|
|
|
close: in,
|
2017-02-17 00:57:58 +01:00
|
|
|
origIn: in,
|
2016-11-30 21:18:14 +01:00
|
|
|
size: size,
|
|
|
|
name: name,
|
2015-09-15 16:46:06 +02:00
|
|
|
exit: make(chan struct{}),
|
2020-05-12 17:16:17 +02:00
|
|
|
values: accountValues{
|
|
|
|
avg: 0,
|
|
|
|
lpTime: time.Now(),
|
|
|
|
max: -1,
|
|
|
|
},
|
2019-10-30 20:23:17 +01:00
|
|
|
}
|
2020-11-05 12:33:32 +01:00
|
|
|
if acc.ci.CutoffMode == fs.CutoffModeHard {
|
|
|
|
acc.values.max = int64((acc.ci.MaxTransfer))
|
2015-09-15 16:46:06 +02:00
|
|
|
}
|
2020-11-05 12:33:32 +01:00
|
|
|
currLimit := acc.ci.BwLimitFile.LimitAt(time.Now())
|
2020-06-20 17:10:02 +02:00
|
|
|
if currLimit.Bandwidth > 0 {
|
|
|
|
fs.Debugf(acc.name, "Limiting file transfer to %v", currLimit.Bandwidth)
|
|
|
|
acc.tokenBucket = newTokenBucket(currLimit.Bandwidth)
|
|
|
|
}
|
|
|
|
|
2015-09-15 16:46:06 +02:00
|
|
|
go acc.averageLoop()
|
2019-07-16 13:56:20 +02:00
|
|
|
stats.inProgress.set(acc.name, acc)
|
2015-09-15 16:46:06 +02:00
|
|
|
return acc
|
|
|
|
}
|
|
|
|
|
2017-02-17 10:15:24 +01:00
|
|
|
// WithBuffer - If the file is above a certain size it adds an Async reader
|
|
|
|
func (acc *Account) WithBuffer() *Account {
|
2019-11-18 13:19:44 +01:00
|
|
|
// if already have a buffer then just return
|
|
|
|
if acc.withBuf {
|
|
|
|
return acc
|
|
|
|
}
|
2017-02-17 10:15:24 +01:00
|
|
|
acc.withBuf = true
|
2017-02-14 20:31:33 +01:00
|
|
|
var buffers int
|
2020-11-05 12:33:32 +01:00
|
|
|
if acc.size >= int64(acc.ci.BufferSize) || acc.size == -1 {
|
|
|
|
buffers = int(int64(acc.ci.BufferSize) / asyncreader.BufferSize)
|
2017-02-14 20:31:33 +01:00
|
|
|
} else {
|
2018-01-12 17:30:54 +01:00
|
|
|
buffers = int(acc.size / asyncreader.BufferSize)
|
2017-02-14 20:31:33 +01:00
|
|
|
}
|
2016-12-14 22:15:12 +01:00
|
|
|
// On big files add a buffer
|
2017-02-14 20:31:33 +01:00
|
|
|
if buffers > 0 {
|
2020-11-05 12:33:32 +01:00
|
|
|
rc, err := asyncreader.New(acc.ctx, acc.origIn, buffers)
|
2016-12-14 22:15:12 +01:00
|
|
|
if err != nil {
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.Errorf(acc.name, "Failed to make buffer: %v", err)
|
2016-12-14 22:15:12 +01:00
|
|
|
} else {
|
2018-02-01 16:41:58 +01:00
|
|
|
acc.in = rc
|
|
|
|
acc.close = rc
|
2016-12-14 22:15:12 +01:00
|
|
|
}
|
|
|
|
}
|
2017-02-17 00:57:58 +01:00
|
|
|
return acc
|
2016-12-14 22:15:12 +01:00
|
|
|
}
|
|
|
|
|
2020-06-26 18:24:16 +02:00
|
|
|
// HasBuffer - returns true if this Account has an AsyncReader with a buffer
|
|
|
|
func (acc *Account) HasBuffer() bool {
|
|
|
|
acc.mu.Lock()
|
|
|
|
defer acc.mu.Unlock()
|
|
|
|
_, ok := acc.in.(*asyncreader.AsyncReader)
|
|
|
|
return ok
|
|
|
|
}
|
|
|
|
|
2018-02-01 16:41:58 +01:00
|
|
|
// GetReader returns the underlying io.ReadCloser under any Buffer
|
2016-12-14 22:15:12 +01:00
|
|
|
func (acc *Account) GetReader() io.ReadCloser {
|
|
|
|
acc.mu.Lock()
|
|
|
|
defer acc.mu.Unlock()
|
2017-02-17 00:57:58 +01:00
|
|
|
return acc.origIn
|
|
|
|
}
|
|
|
|
|
2018-08-11 10:18:19 +02:00
|
|
|
// GetAsyncReader returns the current AsyncReader or nil if Account is unbuffered
|
|
|
|
func (acc *Account) GetAsyncReader() *asyncreader.AsyncReader {
|
|
|
|
acc.mu.Lock()
|
|
|
|
defer acc.mu.Unlock()
|
|
|
|
if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok {
|
|
|
|
return asyncIn
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2017-02-17 00:57:58 +01:00
|
|
|
// StopBuffering stops the async buffer doing any more buffering
|
|
|
|
func (acc *Account) StopBuffering() {
|
2020-06-08 17:24:29 +02:00
|
|
|
if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok {
|
|
|
|
asyncIn.StopBuffering()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Abandon stops the async buffer doing any more buffering
|
|
|
|
func (acc *Account) Abandon() {
|
2018-01-12 17:30:54 +01:00
|
|
|
if asyncIn, ok := acc.in.(*asyncreader.AsyncReader); ok {
|
2017-02-17 00:57:58 +01:00
|
|
|
asyncIn.Abandon()
|
|
|
|
}
|
2016-12-14 22:15:12 +01:00
|
|
|
}
|
|
|
|
|
2018-02-01 16:41:58 +01:00
|
|
|
// UpdateReader updates the underlying io.ReadCloser stopping the
|
2019-04-30 14:06:24 +02:00
|
|
|
// async buffer (if any) and re-adding it
|
2020-06-04 16:09:03 +02:00
|
|
|
func (acc *Account) UpdateReader(ctx context.Context, in io.ReadCloser) {
|
2016-12-14 22:15:12 +01:00
|
|
|
acc.mu.Lock()
|
2019-11-18 13:19:44 +01:00
|
|
|
withBuf := acc.withBuf
|
|
|
|
if withBuf {
|
2020-06-08 17:24:29 +02:00
|
|
|
acc.Abandon()
|
2019-11-18 13:19:44 +01:00
|
|
|
acc.withBuf = false
|
2019-09-12 12:12:19 +02:00
|
|
|
}
|
2017-02-17 10:15:24 +01:00
|
|
|
acc.in = in
|
2020-06-04 16:09:03 +02:00
|
|
|
acc.ctx = ctx
|
2018-02-01 16:41:58 +01:00
|
|
|
acc.close = in
|
2017-02-17 00:57:58 +01:00
|
|
|
acc.origIn = in
|
2019-09-18 17:54:34 +02:00
|
|
|
acc.closed = false
|
2019-11-18 13:19:44 +01:00
|
|
|
if withBuf {
|
2019-09-12 12:12:19 +02:00
|
|
|
acc.WithBuffer()
|
|
|
|
}
|
2016-12-14 22:15:12 +01:00
|
|
|
acc.mu.Unlock()
|
2020-05-12 17:20:30 +02:00
|
|
|
|
|
|
|
// Reset counter to stop percentage going over 100%
|
|
|
|
acc.values.mu.Lock()
|
|
|
|
acc.values.lpBytes = 0
|
|
|
|
acc.values.bytes = 0
|
|
|
|
acc.values.mu.Unlock()
|
2016-12-14 22:15:12 +01:00
|
|
|
}
|
|
|
|
|
2018-02-01 16:41:58 +01:00
|
|
|
// averageLoop calculates averages for the stats in the background
|
2016-08-22 22:19:38 +02:00
|
|
|
func (acc *Account) averageLoop() {
|
2015-09-15 16:46:06 +02:00
|
|
|
tick := time.NewTicker(time.Second)
|
2018-08-28 23:55:51 +02:00
|
|
|
var period float64
|
2015-09-15 16:46:06 +02:00
|
|
|
defer tick.Stop()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case now := <-tick.C:
|
2020-05-12 17:16:17 +02:00
|
|
|
acc.values.mu.Lock()
|
2015-09-15 16:46:06 +02:00
|
|
|
// Add average of last second.
|
2020-05-12 17:16:17 +02:00
|
|
|
elapsed := now.Sub(acc.values.lpTime).Seconds()
|
|
|
|
avg := float64(acc.values.lpBytes) / elapsed
|
2018-08-28 23:55:51 +02:00
|
|
|
// Soft start the moving average
|
|
|
|
if period < averagePeriod {
|
|
|
|
period++
|
|
|
|
}
|
2020-05-12 17:16:17 +02:00
|
|
|
acc.values.avg = (avg + (period-1)*acc.values.avg) / period
|
|
|
|
acc.values.lpBytes = 0
|
|
|
|
acc.values.lpTime = now
|
2015-09-15 16:46:06 +02:00
|
|
|
// Unlock stats
|
2020-05-12 17:16:17 +02:00
|
|
|
acc.values.mu.Unlock()
|
2016-08-22 22:19:38 +02:00
|
|
|
case <-acc.exit:
|
2015-09-15 16:46:06 +02:00
|
|
|
return
|
|
|
|
}
|
2013-01-03 23:50:00 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-04-23 11:39:06 +02:00
|
|
|
// Check the read before it has happened is valid returning the number
|
|
|
|
// of bytes remaining to read.
|
|
|
|
func (acc *Account) checkReadBefore() (bytesUntilLimit int64, err error) {
|
2020-06-04 16:32:17 +02:00
|
|
|
// Check to see if context is cancelled
|
|
|
|
if err = acc.ctx.Err(); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2020-05-12 17:16:17 +02:00
|
|
|
acc.values.mu.Lock()
|
|
|
|
if acc.values.max >= 0 {
|
|
|
|
bytesUntilLimit = acc.values.max - acc.stats.GetBytes()
|
2020-04-23 11:39:06 +02:00
|
|
|
if bytesUntilLimit < 0 {
|
2020-05-12 17:16:17 +02:00
|
|
|
acc.values.mu.Unlock()
|
2020-04-23 11:39:06 +02:00
|
|
|
return bytesUntilLimit, ErrorMaxTransferLimitReachedFatal
|
2020-03-13 17:20:15 +01:00
|
|
|
}
|
2020-04-23 11:39:06 +02:00
|
|
|
} else {
|
|
|
|
bytesUntilLimit = 1 << 62
|
2018-04-21 23:03:27 +02:00
|
|
|
}
|
|
|
|
// Set start time.
|
2020-05-12 17:16:17 +02:00
|
|
|
if acc.values.start.IsZero() {
|
|
|
|
acc.values.start = time.Now()
|
2015-09-15 16:46:06 +02:00
|
|
|
}
|
2020-05-12 17:16:17 +02:00
|
|
|
acc.values.mu.Unlock()
|
2020-04-23 11:39:06 +02:00
|
|
|
return bytesUntilLimit, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check the read call after the read has happened
|
2020-06-04 16:32:17 +02:00
|
|
|
func (acc *Account) checkReadAfter(bytesUntilLimit int64, n int, err error) (outN int, outErr error) {
|
2020-04-23 11:39:06 +02:00
|
|
|
bytesUntilLimit -= int64(n)
|
|
|
|
if bytesUntilLimit < 0 {
|
|
|
|
// chop the overage off
|
|
|
|
n += int(bytesUntilLimit)
|
|
|
|
if n < 0 {
|
|
|
|
n = 0
|
|
|
|
}
|
|
|
|
err = ErrorMaxTransferLimitReachedFatal
|
|
|
|
}
|
|
|
|
return n, err
|
2019-04-24 18:04:12 +02:00
|
|
|
}
|
2015-09-15 16:46:06 +02:00
|
|
|
|
2020-10-13 23:43:40 +02:00
|
|
|
// ServerSideCopyStart should be called at the start of a server-side copy
|
2019-08-28 18:35:58 +02:00
|
|
|
//
|
|
|
|
// This pretends a transfer has started
|
|
|
|
func (acc *Account) ServerSideCopyStart() {
|
2020-05-12 17:16:17 +02:00
|
|
|
acc.values.mu.Lock()
|
2019-08-28 18:35:58 +02:00
|
|
|
// Set start time.
|
2020-05-12 17:16:17 +02:00
|
|
|
if acc.values.start.IsZero() {
|
|
|
|
acc.values.start = time.Now()
|
2019-08-28 18:35:58 +02:00
|
|
|
}
|
2020-05-12 17:16:17 +02:00
|
|
|
acc.values.mu.Unlock()
|
2019-08-28 18:35:58 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// ServerSideCopyEnd accounts for a read of n bytes in a sever side copy
|
|
|
|
func (acc *Account) ServerSideCopyEnd(n int64) {
|
|
|
|
// Update Stats
|
2020-05-12 17:16:17 +02:00
|
|
|
acc.values.mu.Lock()
|
|
|
|
acc.values.bytes += n
|
|
|
|
acc.values.mu.Unlock()
|
2019-08-28 18:35:58 +02:00
|
|
|
|
|
|
|
acc.stats.Bytes(n)
|
|
|
|
}
|
|
|
|
|
2020-11-05 08:15:42 +01:00
|
|
|
// DryRun accounts for statistics without running the operation
|
|
|
|
func (acc *Account) DryRun(n int64) {
|
|
|
|
acc.ServerSideCopyStart()
|
|
|
|
acc.ServerSideCopyEnd(n)
|
|
|
|
}
|
|
|
|
|
2020-06-20 17:10:02 +02:00
|
|
|
// Account for n bytes from the current file bandwidth limit (if any)
|
|
|
|
func (acc *Account) limitPerFileBandwidth(n int) {
|
|
|
|
acc.values.mu.Lock()
|
|
|
|
tokenBucket := acc.tokenBucket
|
|
|
|
acc.values.mu.Unlock()
|
|
|
|
|
|
|
|
if tokenBucket != nil {
|
|
|
|
err := tokenBucket.WaitN(context.Background(), n)
|
|
|
|
if err != nil {
|
|
|
|
fs.Errorf(nil, "Token bucket error: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-24 18:04:12 +02:00
|
|
|
// Account the read and limit bandwidth
|
|
|
|
func (acc *Account) accountRead(n int) {
|
2015-09-15 16:46:06 +02:00
|
|
|
// Update Stats
|
2020-05-12 17:16:17 +02:00
|
|
|
acc.values.mu.Lock()
|
|
|
|
acc.values.lpBytes += n
|
|
|
|
acc.values.bytes += int64(n)
|
|
|
|
acc.values.mu.Unlock()
|
2015-09-15 16:46:06 +02:00
|
|
|
|
2019-07-16 13:56:20 +02:00
|
|
|
acc.stats.Bytes(int64(n))
|
2015-09-15 16:46:06 +02:00
|
|
|
|
2018-02-01 14:13:24 +01:00
|
|
|
limitBandwidth(n)
|
2020-06-20 17:10:02 +02:00
|
|
|
acc.limitPerFileBandwidth(n)
|
2019-04-24 18:04:12 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// read bytes from the io.Reader passed in and account them
|
|
|
|
func (acc *Account) read(in io.Reader, p []byte) (n int, err error) {
|
2020-04-23 11:39:06 +02:00
|
|
|
bytesUntilLimit, err := acc.checkReadBefore()
|
2019-04-24 18:04:12 +02:00
|
|
|
if err == nil {
|
|
|
|
n, err = in.Read(p)
|
|
|
|
acc.accountRead(n)
|
2020-06-04 16:32:17 +02:00
|
|
|
n, err = acc.checkReadAfter(bytesUntilLimit, n, err)
|
2019-04-24 18:04:12 +02:00
|
|
|
}
|
|
|
|
return n, err
|
2013-01-03 23:50:00 +01:00
|
|
|
}
|
|
|
|
|
2016-08-22 22:19:38 +02:00
|
|
|
// Read bytes from the object - see io.Reader
|
|
|
|
func (acc *Account) Read(p []byte) (n int, err error) {
|
|
|
|
acc.mu.Lock()
|
|
|
|
defer acc.mu.Unlock()
|
|
|
|
return acc.read(acc.in, p)
|
|
|
|
}
|
|
|
|
|
2020-02-13 17:06:05 +01:00
|
|
|
// Thin wrapper for w
|
|
|
|
type accountWriteTo struct {
|
|
|
|
w io.Writer
|
|
|
|
acc *Account
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write writes len(p) bytes from p to the underlying data stream. It
|
|
|
|
// returns the number of bytes written from p (0 <= n <= len(p)) and
|
|
|
|
// any error encountered that caused the write to stop early. Write
|
|
|
|
// must return a non-nil error if it returns n < len(p). Write must
|
|
|
|
// not modify the slice data, even temporarily.
|
|
|
|
//
|
|
|
|
// Implementations must not retain p.
|
|
|
|
func (awt *accountWriteTo) Write(p []byte) (n int, err error) {
|
2020-04-23 11:39:06 +02:00
|
|
|
bytesUntilLimit, err := awt.acc.checkReadBefore()
|
2020-02-13 17:06:05 +01:00
|
|
|
if err == nil {
|
|
|
|
n, err = awt.w.Write(p)
|
2020-06-04 16:32:17 +02:00
|
|
|
n, err = awt.acc.checkReadAfter(bytesUntilLimit, n, err)
|
2020-02-13 17:06:05 +01:00
|
|
|
awt.acc.accountRead(n)
|
|
|
|
}
|
|
|
|
return n, err
|
|
|
|
}
|
|
|
|
|
|
|
|
// WriteTo writes data to w until there's no more data to write or
|
|
|
|
// when an error occurs. The return value n is the number of bytes
|
|
|
|
// written. Any error encountered during the write is also returned.
|
|
|
|
func (acc *Account) WriteTo(w io.Writer) (n int64, err error) {
|
|
|
|
acc.mu.Lock()
|
|
|
|
in := acc.in
|
|
|
|
acc.mu.Unlock()
|
|
|
|
wrappedWriter := accountWriteTo{w: w, acc: acc}
|
|
|
|
if do, ok := in.(io.WriterTo); ok {
|
|
|
|
n, err = do.WriteTo(&wrappedWriter)
|
|
|
|
} else {
|
|
|
|
n, err = io.Copy(&wrappedWriter, in)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2019-04-24 18:04:12 +02:00
|
|
|
// AccountRead account having read n bytes
|
|
|
|
func (acc *Account) AccountRead(n int) (err error) {
|
|
|
|
acc.mu.Lock()
|
|
|
|
defer acc.mu.Unlock()
|
2020-04-23 11:39:06 +02:00
|
|
|
bytesUntilLimit, err := acc.checkReadBefore()
|
2019-04-24 18:04:12 +02:00
|
|
|
if err == nil {
|
2020-06-04 16:32:17 +02:00
|
|
|
n, err = acc.checkReadAfter(bytesUntilLimit, n, err)
|
2019-04-24 18:04:12 +02:00
|
|
|
acc.accountRead(n)
|
|
|
|
}
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2018-02-01 16:41:58 +01:00
|
|
|
// Close the object
|
|
|
|
func (acc *Account) Close() error {
|
|
|
|
acc.mu.Lock()
|
|
|
|
defer acc.mu.Unlock()
|
|
|
|
if acc.closed {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
acc.closed = true
|
2019-04-24 18:04:12 +02:00
|
|
|
if acc.close == nil {
|
|
|
|
return nil
|
|
|
|
}
|
2018-02-01 16:41:58 +01:00
|
|
|
return acc.close.Close()
|
|
|
|
}
|
|
|
|
|
2019-09-18 17:54:34 +02:00
|
|
|
// Done with accounting - must be called to free accounting goroutine
|
|
|
|
func (acc *Account) Done() {
|
|
|
|
acc.mu.Lock()
|
|
|
|
defer acc.mu.Unlock()
|
|
|
|
close(acc.exit)
|
|
|
|
acc.stats.inProgress.clear(acc.name)
|
|
|
|
}
|
|
|
|
|
2018-02-01 16:41:58 +01:00
|
|
|
// progress returns bytes read as well as the size.
|
2015-09-15 16:46:06 +02:00
|
|
|
// Size can be <= 0 if the size is unknown.
|
2018-02-01 16:41:58 +01:00
|
|
|
func (acc *Account) progress() (bytes, size int64) {
|
2016-08-22 22:19:38 +02:00
|
|
|
if acc == nil {
|
2015-09-15 16:46:06 +02:00
|
|
|
return 0, 0
|
|
|
|
}
|
2020-05-12 17:16:17 +02:00
|
|
|
acc.values.mu.Lock()
|
|
|
|
bytes, size = acc.values.bytes, acc.size
|
|
|
|
acc.values.mu.Unlock()
|
2017-06-13 12:22:16 +02:00
|
|
|
return bytes, size
|
2015-09-15 16:46:06 +02:00
|
|
|
}
|
|
|
|
|
2018-02-01 16:41:58 +01:00
|
|
|
// speed returns the speed of the current file transfer
|
2020-05-20 12:39:20 +02:00
|
|
|
// in bytes per second, as well an exponentially weighted moving average
|
2015-09-15 16:46:06 +02:00
|
|
|
// If no read has completed yet, 0 is returned for both values.
|
2018-02-01 16:41:58 +01:00
|
|
|
func (acc *Account) speed() (bps, current float64) {
|
2016-08-22 22:19:38 +02:00
|
|
|
if acc == nil {
|
2015-09-15 16:46:06 +02:00
|
|
|
return 0, 0
|
|
|
|
}
|
2020-05-12 17:16:17 +02:00
|
|
|
acc.values.mu.Lock()
|
|
|
|
defer acc.values.mu.Unlock()
|
|
|
|
if acc.values.bytes == 0 {
|
2015-09-15 16:46:06 +02:00
|
|
|
return 0, 0
|
|
|
|
}
|
|
|
|
// Calculate speed from first read.
|
2020-05-12 17:16:17 +02:00
|
|
|
total := float64(time.Now().Sub(acc.values.start)) / float64(time.Second)
|
|
|
|
bps = float64(acc.values.bytes) / total
|
|
|
|
current = acc.values.avg
|
2015-09-15 16:46:06 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-02-01 16:41:58 +01:00
|
|
|
// eta returns the ETA of the current operation,
|
2015-09-15 16:46:06 +02:00
|
|
|
// rounded to full seconds.
|
|
|
|
// If the ETA cannot be determined 'ok' returns false.
|
2018-08-28 12:17:05 +02:00
|
|
|
func (acc *Account) eta() (etaDuration time.Duration, ok bool) {
|
|
|
|
if acc == nil {
|
2015-09-15 16:46:06 +02:00
|
|
|
return 0, false
|
|
|
|
}
|
2020-05-12 17:16:17 +02:00
|
|
|
acc.values.mu.Lock()
|
|
|
|
defer acc.values.mu.Unlock()
|
|
|
|
return eta(acc.values.bytes, acc.size, acc.values.avg)
|
2015-09-15 16:46:06 +02:00
|
|
|
}
|
|
|
|
|
2019-01-14 17:12:39 +01:00
|
|
|
// shortenName shortens in to size runes long
|
|
|
|
// If size <= 0 then in is left untouched
|
|
|
|
func shortenName(in string, size int) string {
|
|
|
|
if size <= 0 {
|
|
|
|
return in
|
|
|
|
}
|
|
|
|
if utf8.RuneCountInString(in) <= size {
|
|
|
|
return in
|
|
|
|
}
|
|
|
|
name := []rune(in)
|
Spelling fixes
Fix spelling of: above, already, anonymous, associated,
authentication, bandwidth, because, between, blocks, calculate,
candidates, cautious, changelog, cleaner, clipboard, command,
completely, concurrently, considered, constructs, corrupt, current,
daemon, dependencies, deprecated, directory, dispatcher, download,
eligible, ellipsis, encrypter, endpoint, entrieslist, essentially,
existing writers, existing, expires, filesystem, flushing, frequently,
hierarchy, however, implementation, implements, inaccurate,
individually, insensitive, longer, maximum, metadata, modified,
multipart, namedirfirst, nextcloud, obscured, opened, optional,
owncloud, pacific, passphrase, password, permanently, persimmon,
positive, potato, protocol, quota, receiving, recommends, referring,
requires, revisited, satisfied, satisfies, satisfy, semver,
serialized, session, storage, strategies, stringlist, successful,
supported, surprise, temporarily, temporary, transactions, unneeded,
update, uploads, wrapped
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com>
2020-10-09 02:17:24 +02:00
|
|
|
size-- // don't count ellipsis rune
|
2019-01-14 17:12:39 +01:00
|
|
|
suffixLength := size / 2
|
|
|
|
prefixLength := size - suffixLength
|
|
|
|
suffixStart := len(name) - suffixLength
|
|
|
|
name = append(append(name[:prefixLength], '…'), name[suffixStart:]...)
|
|
|
|
return string(name)
|
|
|
|
}
|
|
|
|
|
2015-09-15 16:46:06 +02:00
|
|
|
// String produces stats for this file
|
2016-08-22 22:19:38 +02:00
|
|
|
func (acc *Account) String() string {
|
2018-02-01 16:41:58 +01:00
|
|
|
a, b := acc.progress()
|
|
|
|
_, cur := acc.speed()
|
|
|
|
eta, etaok := acc.eta()
|
2015-09-15 16:46:06 +02:00
|
|
|
etas := "-"
|
|
|
|
if etaok {
|
|
|
|
if eta > 0 {
|
|
|
|
etas = fmt.Sprintf("%v", eta)
|
|
|
|
} else {
|
|
|
|
etas = "0s"
|
|
|
|
}
|
|
|
|
}
|
2018-09-16 13:08:24 +02:00
|
|
|
|
2020-11-05 12:33:32 +01:00
|
|
|
if acc.ci.DataRateUnit == "bits" {
|
2017-06-13 12:22:16 +02:00
|
|
|
cur = cur * 8
|
2016-11-22 05:04:05 +01:00
|
|
|
}
|
|
|
|
|
2017-12-08 09:02:57 +01:00
|
|
|
percentageDone := 0
|
2016-11-22 05:04:05 +01:00
|
|
|
if b > 0 {
|
2017-12-08 09:02:57 +01:00
|
|
|
percentageDone = int(100 * float64(a) / float64(b))
|
2015-09-15 16:46:06 +02:00
|
|
|
}
|
2017-12-08 09:02:57 +01:00
|
|
|
|
2019-01-14 17:12:39 +01:00
|
|
|
return fmt.Sprintf("%*s:%3d%% /%s, %s/s, %s",
|
2020-11-05 12:33:32 +01:00
|
|
|
acc.ci.StatsFileNameLength,
|
|
|
|
shortenName(acc.name, acc.ci.StatsFileNameLength),
|
2019-01-14 17:12:39 +01:00
|
|
|
percentageDone,
|
|
|
|
fs.SizeSuffix(b),
|
2018-01-12 17:30:54 +01:00
|
|
|
fs.SizeSuffix(cur),
|
2016-11-22 05:04:05 +01:00
|
|
|
etas,
|
|
|
|
)
|
2018-08-07 21:56:40 +02:00
|
|
|
}
|
|
|
|
|
2020-08-05 17:59:44 +02:00
|
|
|
// rcStats produces remote control stats for this file
|
|
|
|
func (acc *Account) rcStats() (out rc.Params) {
|
2019-07-26 09:51:51 +02:00
|
|
|
out = make(rc.Params)
|
2018-08-07 21:56:40 +02:00
|
|
|
a, b := acc.progress()
|
|
|
|
out["bytes"] = a
|
|
|
|
out["size"] = b
|
|
|
|
spd, cur := acc.speed()
|
|
|
|
out["speed"] = spd
|
|
|
|
out["speedAvg"] = cur
|
|
|
|
|
|
|
|
eta, etaok := acc.eta()
|
|
|
|
out["eta"] = nil
|
|
|
|
if etaok {
|
|
|
|
if eta > 0 {
|
|
|
|
out["eta"] = eta.Seconds()
|
|
|
|
} else {
|
|
|
|
out["eta"] = 0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
out["name"] = acc.name
|
|
|
|
|
|
|
|
percentageDone := 0
|
|
|
|
if b > 0 {
|
|
|
|
percentageDone = int(100 * float64(a) / float64(b))
|
|
|
|
}
|
|
|
|
out["percentage"] = percentageDone
|
2019-10-29 11:13:21 +01:00
|
|
|
out["group"] = acc.stats.group
|
2018-08-07 21:56:40 +02:00
|
|
|
|
|
|
|
return out
|
2015-09-15 16:46:06 +02:00
|
|
|
}
|
|
|
|
|
2018-02-01 16:41:58 +01:00
|
|
|
// OldStream returns the top io.Reader
|
|
|
|
func (acc *Account) OldStream() io.Reader {
|
2016-08-22 22:19:38 +02:00
|
|
|
acc.mu.Lock()
|
|
|
|
defer acc.mu.Unlock()
|
2018-02-01 16:41:58 +01:00
|
|
|
return acc.in
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetStream updates the top io.Reader
|
|
|
|
func (acc *Account) SetStream(in io.Reader) {
|
|
|
|
acc.mu.Lock()
|
|
|
|
acc.in = in
|
|
|
|
acc.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// WrapStream wraps an io Reader so it will be accounted in the same
|
|
|
|
// way as account
|
|
|
|
func (acc *Account) WrapStream(in io.Reader) io.Reader {
|
|
|
|
return &accountStream{
|
|
|
|
acc: acc,
|
|
|
|
in: in,
|
2015-10-05 23:56:16 +02:00
|
|
|
}
|
2016-08-22 22:19:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// accountStream accounts a single io.Reader into a parent *Account
|
|
|
|
type accountStream struct {
|
|
|
|
acc *Account
|
|
|
|
in io.Reader
|
|
|
|
}
|
|
|
|
|
2018-02-01 16:41:58 +01:00
|
|
|
// OldStream return the underlying stream
|
|
|
|
func (a *accountStream) OldStream() io.Reader {
|
|
|
|
return a.in
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetStream set the underlying stream
|
|
|
|
func (a *accountStream) SetStream(in io.Reader) {
|
|
|
|
a.in = in
|
|
|
|
}
|
|
|
|
|
|
|
|
// WrapStream wrap in in an accounter
|
|
|
|
func (a *accountStream) WrapStream(in io.Reader) io.Reader {
|
|
|
|
return a.acc.WrapStream(in)
|
2016-08-22 22:19:38 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// Read bytes from the object - see io.Reader
|
|
|
|
func (a *accountStream) Read(p []byte) (n int, err error) {
|
|
|
|
return a.acc.read(a.in, p)
|
|
|
|
}
|
|
|
|
|
2018-02-01 16:41:58 +01:00
|
|
|
// Accounter accounts a stream allowing the accounting to be removed and re-added
|
|
|
|
type Accounter interface {
|
|
|
|
io.Reader
|
|
|
|
OldStream() io.Reader
|
|
|
|
SetStream(io.Reader)
|
|
|
|
WrapStream(io.Reader) io.Reader
|
2016-08-22 22:19:38 +02:00
|
|
|
}
|
|
|
|
|
2018-02-01 16:41:58 +01:00
|
|
|
// WrapFn wraps an io.Reader (for accounting purposes usually)
|
|
|
|
type WrapFn func(io.Reader) io.Reader
|
|
|
|
|
|
|
|
// UnWrap unwraps a reader returning unwrapped and wrap, a function to
|
|
|
|
// wrap it back up again. If `in` is an Accounter then this function
|
|
|
|
// will take the accounting unwrapped and wrap will put it back on
|
|
|
|
// again the new Reader passed in.
|
2016-08-22 22:19:38 +02:00
|
|
|
//
|
2018-02-01 16:41:58 +01:00
|
|
|
// This allows functions which wrap io.Readers to move the accounting
|
|
|
|
// to the end of the wrapped chain of readers. This is very important
|
|
|
|
// if buffering is being introduced and if the Reader might be wrapped
|
|
|
|
// again.
|
|
|
|
func UnWrap(in io.Reader) (unwrapped io.Reader, wrap WrapFn) {
|
|
|
|
acc, ok := in.(Accounter)
|
|
|
|
if !ok {
|
|
|
|
return in, func(r io.Reader) io.Reader { return r }
|
2016-08-22 22:19:38 +02:00
|
|
|
}
|
2018-02-01 16:41:58 +01:00
|
|
|
return acc.OldStream(), acc.WrapStream
|
2013-01-03 23:50:00 +01:00
|
|
|
}
|