2018-02-01 14:13:24 +01:00
|
|
|
package accounting
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"fmt"
|
2019-07-16 13:56:20 +02:00
|
|
|
"sort"
|
2018-02-01 14:13:24 +01:00
|
|
|
"strings"
|
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2019-07-28 19:47:38 +02:00
|
|
|
"github.com/rclone/rclone/fs"
|
|
|
|
"github.com/rclone/rclone/fs/fserrors"
|
|
|
|
"github.com/rclone/rclone/fs/rc"
|
2018-02-01 14:13:24 +01:00
|
|
|
)
|
|
|
|
|
2019-11-14 14:14:40 +01:00
|
|
|
// MaxCompletedTransfers specifies maximum number of completed transfers in startedTransfers list
|
|
|
|
var MaxCompletedTransfers = 100
|
2019-10-16 21:11:11 +02:00
|
|
|
|
2018-02-01 14:13:24 +01:00
|
|
|
// StatsInfo accounts all transfers
|
|
|
|
type StatsInfo struct {
|
2018-07-19 23:41:34 +02:00
|
|
|
mu sync.RWMutex
|
|
|
|
bytes int64
|
|
|
|
errors int64
|
|
|
|
lastError error
|
2018-08-30 18:26:07 +02:00
|
|
|
fatalError bool
|
|
|
|
retryError bool
|
2019-03-21 12:24:13 +01:00
|
|
|
retryAfter time.Time
|
2018-07-19 23:41:34 +02:00
|
|
|
checks int64
|
2020-06-13 20:37:42 +02:00
|
|
|
checking *transferMap
|
2018-07-19 23:41:34 +02:00
|
|
|
checkQueue int
|
|
|
|
checkQueueSize int64
|
|
|
|
transfers int64
|
2020-06-13 20:37:42 +02:00
|
|
|
transferring *transferMap
|
2018-07-19 23:41:34 +02:00
|
|
|
transferQueue int
|
|
|
|
transferQueueSize int64
|
2020-03-30 19:12:32 +02:00
|
|
|
renames int64
|
2018-07-19 23:41:34 +02:00
|
|
|
renameQueue int
|
|
|
|
renameQueueSize int64
|
|
|
|
deletes int64
|
|
|
|
inProgress *inProgress
|
2019-10-17 12:43:32 +02:00
|
|
|
startedTransfers []*Transfer // currently active transfers
|
|
|
|
oldTimeRanges timeRanges // a merged list of time ranges for the transfers
|
|
|
|
oldDuration time.Duration // duration of transfers we have culled
|
2019-10-29 11:13:21 +01:00
|
|
|
group string
|
2018-02-01 14:13:24 +01:00
|
|
|
}
|
|
|
|
|
2019-04-30 14:06:24 +02:00
|
|
|
// NewStats creates an initialised StatsInfo
|
2018-02-01 14:13:24 +01:00
|
|
|
func NewStats() *StatsInfo {
|
|
|
|
return &StatsInfo{
|
2020-06-13 20:37:42 +02:00
|
|
|
checking: newTransferMap(fs.Config.Checkers, "checking"),
|
|
|
|
transferring: newTransferMap(fs.Config.Transfers, "transferring"),
|
2018-02-01 14:13:24 +01:00
|
|
|
inProgress: newInProgress(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-07 21:56:40 +02:00
|
|
|
// RemoteStats returns stats for rc
|
2019-07-18 12:13:54 +02:00
|
|
|
func (s *StatsInfo) RemoteStats() (out rc.Params, err error) {
|
2018-08-07 21:56:40 +02:00
|
|
|
out = make(rc.Params)
|
|
|
|
s.mu.RLock()
|
2020-02-26 09:34:32 +01:00
|
|
|
out["speed"] = s.Speed()
|
2018-08-07 21:56:40 +02:00
|
|
|
out["bytes"] = s.bytes
|
|
|
|
out["errors"] = s.errors
|
2018-08-30 18:26:07 +02:00
|
|
|
out["fatalError"] = s.fatalError
|
|
|
|
out["retryError"] = s.retryError
|
2018-08-07 21:56:40 +02:00
|
|
|
out["checks"] = s.checks
|
|
|
|
out["transfers"] = s.transfers
|
|
|
|
out["deletes"] = s.deletes
|
2020-03-30 19:12:32 +02:00
|
|
|
out["renames"] = s.renames
|
2020-02-26 09:34:32 +01:00
|
|
|
out["elapsedTime"] = s.totalDuration().Seconds()
|
2018-08-07 21:56:40 +02:00
|
|
|
s.mu.RUnlock()
|
|
|
|
if !s.checking.empty() {
|
|
|
|
var c []string
|
|
|
|
s.checking.mu.RLock()
|
|
|
|
defer s.checking.mu.RUnlock()
|
2020-06-16 17:18:32 +02:00
|
|
|
for _, tr := range s.checking.sortedSlice() {
|
|
|
|
c = append(c, tr.remote)
|
2018-08-07 21:56:40 +02:00
|
|
|
}
|
|
|
|
out["checking"] = c
|
|
|
|
}
|
|
|
|
if !s.transferring.empty() {
|
|
|
|
s.transferring.mu.RLock()
|
2019-07-26 09:51:51 +02:00
|
|
|
|
|
|
|
var t []rc.Params
|
2020-06-16 17:18:32 +02:00
|
|
|
for _, tr := range s.transferring.sortedSlice() {
|
|
|
|
if acc := s.inProgress.get(tr.remote); acc != nil {
|
2018-08-07 21:56:40 +02:00
|
|
|
t = append(t, acc.RemoteStats())
|
|
|
|
} else {
|
2020-06-13 20:37:42 +02:00
|
|
|
t = append(t, s.transferRemoteStats(tr))
|
2018-08-07 21:56:40 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
out["transferring"] = t
|
2019-07-26 09:51:51 +02:00
|
|
|
s.transferring.mu.RUnlock()
|
2018-08-07 21:56:40 +02:00
|
|
|
}
|
|
|
|
if s.errors > 0 {
|
2019-07-26 09:51:51 +02:00
|
|
|
out["lastError"] = s.lastError.Error()
|
2018-08-07 21:56:40 +02:00
|
|
|
}
|
|
|
|
return out, nil
|
|
|
|
}
|
|
|
|
|
2020-02-26 09:34:32 +01:00
|
|
|
// Speed returns the average speed of the transfer in bytes/second
|
|
|
|
func (s *StatsInfo) Speed() float64 {
|
|
|
|
dt := s.totalDuration()
|
|
|
|
dtSeconds := dt.Seconds()
|
|
|
|
speed := 0.0
|
|
|
|
if dt > 0 {
|
|
|
|
speed = float64(s.bytes) / dtSeconds
|
|
|
|
}
|
|
|
|
return speed
|
|
|
|
}
|
|
|
|
|
2020-06-13 20:37:42 +02:00
|
|
|
func (s *StatsInfo) transferRemoteStats(tr *Transfer) rc.Params {
|
|
|
|
return rc.Params{
|
|
|
|
"name": tr.remote,
|
|
|
|
"size": tr.size,
|
2019-07-26 09:51:51 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-10-16 17:05:26 +02:00
|
|
|
// timeRange is a start and end time of a transfer
|
2019-07-16 13:56:20 +02:00
|
|
|
type timeRange struct {
|
|
|
|
start time.Time
|
|
|
|
end time.Time
|
|
|
|
}
|
|
|
|
|
2019-10-16 17:05:26 +02:00
|
|
|
// timeRanges is a list of non-overlapping start and end times for
|
|
|
|
// transfers
|
|
|
|
type timeRanges []timeRange
|
|
|
|
|
|
|
|
// merge all the overlapping time ranges
|
|
|
|
func (trs *timeRanges) merge() {
|
|
|
|
Trs := *trs
|
2019-07-16 13:56:20 +02:00
|
|
|
|
|
|
|
// Sort by the starting time.
|
2019-10-16 17:05:26 +02:00
|
|
|
sort.Slice(Trs, func(i, j int) bool {
|
|
|
|
return Trs[i].start.Before(Trs[j].start)
|
2019-07-16 13:56:20 +02:00
|
|
|
})
|
|
|
|
|
2019-10-16 17:05:26 +02:00
|
|
|
// Merge overlaps and add distinctive ranges together
|
|
|
|
var (
|
|
|
|
newTrs = Trs[:0]
|
|
|
|
i, j = 0, 1
|
|
|
|
)
|
|
|
|
for i < len(Trs) {
|
|
|
|
if j < len(Trs) {
|
|
|
|
if !Trs[i].end.Before(Trs[j].start) {
|
|
|
|
if Trs[i].end.Before(Trs[j].end) {
|
|
|
|
Trs[i].end = Trs[j].end
|
2019-07-16 13:56:20 +02:00
|
|
|
}
|
|
|
|
j++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
2019-10-16 17:05:26 +02:00
|
|
|
newTrs = append(newTrs, Trs[i])
|
2019-07-16 13:56:20 +02:00
|
|
|
i = j
|
|
|
|
j++
|
|
|
|
}
|
|
|
|
|
2019-10-16 17:05:26 +02:00
|
|
|
*trs = newTrs
|
|
|
|
}
|
|
|
|
|
2019-10-17 12:43:32 +02:00
|
|
|
// cull remove any ranges whose start and end are before cutoff
|
|
|
|
// returning their duration sum
|
|
|
|
func (trs *timeRanges) cull(cutoff time.Time) (d time.Duration) {
|
|
|
|
var newTrs = (*trs)[:0]
|
|
|
|
for _, tr := range *trs {
|
|
|
|
if cutoff.Before(tr.start) || cutoff.Before(tr.end) {
|
|
|
|
newTrs = append(newTrs, tr)
|
|
|
|
} else {
|
|
|
|
d += tr.end.Sub(tr.start)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
*trs = newTrs
|
|
|
|
return d
|
|
|
|
}
|
|
|
|
|
2019-10-16 17:05:26 +02:00
|
|
|
// total the time out of the time ranges
|
|
|
|
func (trs timeRanges) total() (total time.Duration) {
|
|
|
|
for _, tr := range trs {
|
|
|
|
total += tr.end.Sub(tr.start)
|
|
|
|
}
|
2019-07-16 13:56:20 +02:00
|
|
|
return total
|
|
|
|
}
|
|
|
|
|
2019-10-16 17:05:26 +02:00
|
|
|
// Total duration is union of durations of all transfers belonging to this
|
|
|
|
// object.
|
|
|
|
// Needs to be protected by mutex.
|
|
|
|
func (s *StatsInfo) totalDuration() time.Duration {
|
|
|
|
// copy of s.oldTimeRanges with extra room for the current transfers
|
|
|
|
timeRanges := make(timeRanges, len(s.oldTimeRanges), len(s.oldTimeRanges)+len(s.startedTransfers))
|
|
|
|
copy(timeRanges, s.oldTimeRanges)
|
|
|
|
|
|
|
|
// Extract time ranges of all transfers.
|
|
|
|
now := time.Now()
|
|
|
|
for i := range s.startedTransfers {
|
|
|
|
start, end := s.startedTransfers[i].TimeRange()
|
|
|
|
if end.IsZero() {
|
|
|
|
end = now
|
|
|
|
}
|
|
|
|
timeRanges = append(timeRanges, timeRange{start, end})
|
|
|
|
}
|
|
|
|
|
|
|
|
timeRanges.merge()
|
2019-10-17 12:43:32 +02:00
|
|
|
return s.oldDuration + timeRanges.total()
|
2019-10-16 17:05:26 +02:00
|
|
|
}
|
|
|
|
|
2018-08-28 12:17:05 +02:00
|
|
|
// eta returns the ETA of the current operation,
|
|
|
|
// rounded to full seconds.
|
|
|
|
// If the ETA cannot be determined 'ok' returns false.
|
|
|
|
func eta(size, total int64, rate float64) (eta time.Duration, ok bool) {
|
|
|
|
if total <= 0 || size < 0 || rate <= 0 {
|
|
|
|
return 0, false
|
|
|
|
}
|
|
|
|
remaining := total - size
|
|
|
|
if remaining < 0 {
|
|
|
|
return 0, false
|
|
|
|
}
|
|
|
|
seconds := float64(remaining) / rate
|
|
|
|
return time.Second * time.Duration(seconds), true
|
|
|
|
}
|
|
|
|
|
|
|
|
// etaString returns the ETA of the current operation,
|
|
|
|
// rounded to full seconds.
|
|
|
|
// If the ETA cannot be determined it returns "-"
|
|
|
|
func etaString(done, total int64, rate float64) string {
|
|
|
|
d, ok := eta(done, total, rate)
|
|
|
|
if !ok {
|
|
|
|
return "-"
|
|
|
|
}
|
2019-07-01 13:09:19 +02:00
|
|
|
return fs.Duration(d).ReadableString()
|
2018-08-28 12:17:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// percent returns a/b as a percentage rounded to the nearest integer
|
|
|
|
// as a string
|
|
|
|
//
|
|
|
|
// if the percentage is invalid it returns "-"
|
|
|
|
func percent(a int64, b int64) string {
|
|
|
|
if a < 0 || b <= 0 {
|
|
|
|
return "-"
|
|
|
|
}
|
|
|
|
return fmt.Sprintf("%d%%", int(float64(a)*100/float64(b)+0.5))
|
|
|
|
}
|
|
|
|
|
2018-02-01 14:13:24 +01:00
|
|
|
// String convert the StatsInfo to a string for printing
|
|
|
|
func (s *StatsInfo) String() string {
|
2018-08-28 12:17:05 +02:00
|
|
|
// checking and transferring have their own locking so read
|
|
|
|
// here before lock to prevent deadlock on GetBytes
|
|
|
|
transferring, checking := s.transferring.count(), s.checking.count()
|
2019-07-18 12:13:54 +02:00
|
|
|
transferringBytesDone, transferringBytesTotal := s.transferring.progress(s)
|
2018-08-28 12:17:05 +02:00
|
|
|
|
2018-05-02 18:01:39 +02:00
|
|
|
s.mu.RLock()
|
|
|
|
|
2019-07-16 13:56:20 +02:00
|
|
|
dt := s.totalDuration()
|
2018-02-01 14:13:24 +01:00
|
|
|
dtSeconds := dt.Seconds()
|
2020-01-28 10:54:25 +01:00
|
|
|
dtSecondsOnly := dt.Truncate(time.Second/10) % time.Minute
|
2018-02-01 14:13:24 +01:00
|
|
|
speed := 0.0
|
|
|
|
if dt > 0 {
|
|
|
|
speed = float64(s.bytes) / dtSeconds
|
|
|
|
}
|
|
|
|
|
2019-02-13 20:06:20 +01:00
|
|
|
displaySpeed := speed
|
2018-02-01 14:13:24 +01:00
|
|
|
if fs.Config.DataRateUnit == "bits" {
|
2019-02-13 20:06:20 +01:00
|
|
|
displaySpeed *= 8
|
2018-02-01 14:13:24 +01:00
|
|
|
}
|
|
|
|
|
2018-08-28 12:17:05 +02:00
|
|
|
var (
|
|
|
|
totalChecks = int64(s.checkQueue) + s.checks + int64(checking)
|
|
|
|
totalTransfer = int64(s.transferQueue) + s.transfers + int64(transferring)
|
|
|
|
// note that s.bytes already includes transferringBytesDone so
|
|
|
|
// we take it off here to avoid double counting
|
|
|
|
totalSize = s.transferQueueSize + s.bytes + transferringBytesTotal - transferringBytesDone
|
|
|
|
currentSize = s.bytes
|
|
|
|
buf = &bytes.Buffer{}
|
|
|
|
xfrchkString = ""
|
2019-03-26 03:41:45 +01:00
|
|
|
dateString = ""
|
2018-08-28 12:17:05 +02:00
|
|
|
)
|
2018-07-19 23:43:53 +02:00
|
|
|
|
2018-08-16 17:32:35 +02:00
|
|
|
if !fs.Config.StatsOneLine {
|
|
|
|
_, _ = fmt.Fprintf(buf, "\nTransferred: ")
|
2018-08-28 12:17:05 +02:00
|
|
|
} else {
|
2018-08-16 17:32:35 +02:00
|
|
|
xfrchk := []string{}
|
|
|
|
if totalTransfer > 0 && s.transferQueue > 0 {
|
|
|
|
xfrchk = append(xfrchk, fmt.Sprintf("xfr#%d/%d", s.transfers, totalTransfer))
|
|
|
|
}
|
|
|
|
if totalChecks > 0 && s.checkQueue > 0 {
|
|
|
|
xfrchk = append(xfrchk, fmt.Sprintf("chk#%d/%d", s.checks, totalChecks))
|
|
|
|
}
|
|
|
|
if len(xfrchk) > 0 {
|
|
|
|
xfrchkString = fmt.Sprintf(" (%s)", strings.Join(xfrchk, ", "))
|
|
|
|
}
|
2019-03-26 03:41:45 +01:00
|
|
|
if fs.Config.StatsOneLineDate {
|
|
|
|
t := time.Now()
|
|
|
|
dateString = t.Format(fs.Config.StatsOneLineDateFormat) // Including the separator so people can customize it
|
|
|
|
}
|
2018-07-19 23:43:53 +02:00
|
|
|
}
|
2018-08-28 12:17:05 +02:00
|
|
|
|
2019-11-21 12:15:47 +01:00
|
|
|
_, _ = fmt.Fprintf(buf, "%s%10s / %s, %s, %s, ETA %s%s\n",
|
2019-03-26 03:41:45 +01:00
|
|
|
dateString,
|
2018-08-28 12:17:05 +02:00
|
|
|
fs.SizeSuffix(s.bytes),
|
|
|
|
fs.SizeSuffix(totalSize).Unit("Bytes"),
|
|
|
|
percent(s.bytes, totalSize),
|
2019-02-13 20:06:20 +01:00
|
|
|
fs.SizeSuffix(displaySpeed).Unit(strings.Title(fs.Config.DataRateUnit)+"/s"),
|
2018-08-28 12:17:05 +02:00
|
|
|
etaString(currentSize, totalSize, speed),
|
|
|
|
xfrchkString,
|
|
|
|
)
|
|
|
|
|
2018-08-16 17:32:35 +02:00
|
|
|
if !fs.Config.StatsOneLine {
|
2018-08-30 18:26:07 +02:00
|
|
|
errorDetails := ""
|
|
|
|
switch {
|
|
|
|
case s.fatalError:
|
|
|
|
errorDetails = " (fatal error encountered)"
|
|
|
|
case s.retryError:
|
|
|
|
errorDetails = " (retrying may help)"
|
|
|
|
case s.errors != 0:
|
|
|
|
errorDetails = " (no need to retry)"
|
|
|
|
}
|
|
|
|
|
2019-11-21 12:15:47 +01:00
|
|
|
// Add only non zero stats
|
|
|
|
if s.errors != 0 {
|
|
|
|
_, _ = fmt.Fprintf(buf, "Errors: %10d%s\n",
|
|
|
|
s.errors, errorDetails)
|
|
|
|
}
|
|
|
|
if s.checks != 0 || totalChecks != 0 {
|
|
|
|
_, _ = fmt.Fprintf(buf, "Checks: %10d / %d, %s\n",
|
2019-12-20 07:37:21 +01:00
|
|
|
s.checks, totalChecks, percent(s.checks, totalChecks))
|
2019-11-21 12:15:47 +01:00
|
|
|
}
|
|
|
|
if s.deletes != 0 {
|
|
|
|
_, _ = fmt.Fprintf(buf, "Deleted: %10d\n", s.deletes)
|
|
|
|
}
|
2020-03-30 19:12:32 +02:00
|
|
|
if s.renames != 0 {
|
|
|
|
_, _ = fmt.Fprintf(buf, "Renamed: %10d\n", s.renames)
|
|
|
|
}
|
2019-11-21 12:15:47 +01:00
|
|
|
if s.transfers != 0 || totalTransfer != 0 {
|
|
|
|
_, _ = fmt.Fprintf(buf, "Transferred: %10d / %d, %s\n",
|
|
|
|
s.transfers, totalTransfer, percent(s.transfers, totalTransfer))
|
|
|
|
}
|
2020-01-28 10:54:25 +01:00
|
|
|
_, _ = fmt.Fprintf(buf, "Elapsed time: %10ss\n", strings.TrimRight(dt.Truncate(time.Minute).String(), "0s")+fmt.Sprintf("%.1f", dtSecondsOnly.Seconds()))
|
2018-08-16 17:32:35 +02:00
|
|
|
}
|
2018-05-02 18:01:39 +02:00
|
|
|
|
|
|
|
// checking and transferring have their own locking so unlock
|
|
|
|
// here to prevent deadlock on GetBytes
|
|
|
|
s.mu.RUnlock()
|
|
|
|
|
2018-08-28 12:17:05 +02:00
|
|
|
// Add per transfer stats if required
|
2018-08-16 17:32:35 +02:00
|
|
|
if !fs.Config.StatsOneLine {
|
|
|
|
if !s.checking.empty() {
|
2019-11-19 14:22:33 +01:00
|
|
|
_, _ = fmt.Fprintf(buf, "Checking:\n%s\n", s.checking.String(s.inProgress, s.transferring))
|
2018-08-16 17:32:35 +02:00
|
|
|
}
|
|
|
|
if !s.transferring.empty() {
|
2019-11-19 14:22:33 +01:00
|
|
|
_, _ = fmt.Fprintf(buf, "Transferring:\n%s\n", s.transferring.String(s.inProgress, nil))
|
2018-08-16 17:32:35 +02:00
|
|
|
}
|
2018-02-01 14:13:24 +01:00
|
|
|
}
|
2018-08-28 12:17:05 +02:00
|
|
|
|
2018-02-01 14:13:24 +01:00
|
|
|
return buf.String()
|
|
|
|
}
|
|
|
|
|
2019-07-22 21:11:46 +02:00
|
|
|
// Transferred returns list of all completed transfers including checked and
|
|
|
|
// failed ones.
|
|
|
|
func (s *StatsInfo) Transferred() []TransferSnapshot {
|
|
|
|
ts := make([]TransferSnapshot, 0, len(s.startedTransfers))
|
|
|
|
|
|
|
|
for _, tr := range s.startedTransfers {
|
|
|
|
if tr.IsDone() {
|
|
|
|
ts = append(ts, tr.Snapshot())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return ts
|
|
|
|
}
|
|
|
|
|
2018-02-01 14:13:24 +01:00
|
|
|
// Log outputs the StatsInfo to the log
|
|
|
|
func (s *StatsInfo) Log() {
|
2020-02-15 22:21:01 +01:00
|
|
|
if fs.Config.UseJSONLog {
|
|
|
|
out, _ := s.RemoteStats()
|
2020-04-11 19:02:50 +02:00
|
|
|
fs.LogLevelPrintf(fs.Config.StatsLogLevel, nil, "%v%v\n", s, fs.LogValue("stats", out))
|
2020-02-15 22:21:01 +01:00
|
|
|
} else {
|
|
|
|
fs.LogLevelPrintf(fs.Config.StatsLogLevel, nil, "%v\n", s)
|
|
|
|
}
|
|
|
|
|
2018-02-01 14:13:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Bytes updates the stats for bytes bytes
|
|
|
|
func (s *StatsInfo) Bytes(bytes int64) {
|
2018-05-02 18:01:39 +02:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2018-02-01 14:13:24 +01:00
|
|
|
s.bytes += bytes
|
|
|
|
}
|
|
|
|
|
2018-04-21 23:03:27 +02:00
|
|
|
// GetBytes returns the number of bytes transferred so far
|
|
|
|
func (s *StatsInfo) GetBytes() int64 {
|
2018-07-22 11:33:19 +02:00
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
2018-04-21 23:03:27 +02:00
|
|
|
return s.bytes
|
|
|
|
}
|
|
|
|
|
2019-10-30 20:23:17 +01:00
|
|
|
// GetBytesWithPending returns the number of bytes transferred and remaining transfers
|
|
|
|
func (s *StatsInfo) GetBytesWithPending() int64 {
|
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
|
|
|
pending := int64(0)
|
|
|
|
for _, tr := range s.startedTransfers {
|
|
|
|
if tr.acc != nil {
|
|
|
|
bytes, size := tr.acc.progress()
|
|
|
|
if bytes < size {
|
|
|
|
pending += size - bytes
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return s.bytes + pending
|
|
|
|
}
|
|
|
|
|
2018-02-01 14:13:24 +01:00
|
|
|
// Errors updates the stats for errors
|
|
|
|
func (s *StatsInfo) Errors(errors int64) {
|
2018-05-02 18:01:39 +02:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2018-02-01 14:13:24 +01:00
|
|
|
s.errors += errors
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetErrors reads the number of errors
|
|
|
|
func (s *StatsInfo) GetErrors() int64 {
|
2018-05-02 18:01:39 +02:00
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
2018-02-01 14:13:24 +01:00
|
|
|
return s.errors
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetLastError returns the lastError
|
|
|
|
func (s *StatsInfo) GetLastError() error {
|
2018-05-02 18:01:39 +02:00
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
2018-02-01 14:13:24 +01:00
|
|
|
return s.lastError
|
|
|
|
}
|
|
|
|
|
2018-12-31 12:58:55 +01:00
|
|
|
// GetChecks returns the number of checks
|
|
|
|
func (s *StatsInfo) GetChecks() int64 {
|
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
|
|
|
return s.checks
|
|
|
|
}
|
|
|
|
|
2018-08-30 18:26:07 +02:00
|
|
|
// FatalError sets the fatalError flag
|
|
|
|
func (s *StatsInfo) FatalError() {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
s.fatalError = true
|
|
|
|
}
|
|
|
|
|
|
|
|
// HadFatalError returns whether there has been at least one FatalError
|
|
|
|
func (s *StatsInfo) HadFatalError() bool {
|
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
|
|
|
return s.fatalError
|
|
|
|
}
|
|
|
|
|
|
|
|
// RetryError sets the retryError flag
|
|
|
|
func (s *StatsInfo) RetryError() {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
s.retryError = true
|
|
|
|
}
|
|
|
|
|
|
|
|
// HadRetryError returns whether there has been at least one non-NoRetryError
|
|
|
|
func (s *StatsInfo) HadRetryError() bool {
|
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
|
|
|
return s.retryError
|
|
|
|
}
|
|
|
|
|
2018-01-22 19:53:18 +01:00
|
|
|
// Deletes updates the stats for deletes
|
|
|
|
func (s *StatsInfo) Deletes(deletes int64) int64 {
|
2018-05-02 18:01:39 +02:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2018-01-22 19:53:18 +01:00
|
|
|
s.deletes += deletes
|
|
|
|
return s.deletes
|
|
|
|
}
|
|
|
|
|
2020-03-30 19:12:32 +02:00
|
|
|
// Renames updates the stats for renames
|
|
|
|
func (s *StatsInfo) Renames(renames int64) int64 {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
s.renames += renames
|
|
|
|
return s.renames
|
|
|
|
}
|
|
|
|
|
|
|
|
// ResetCounters sets the counters (bytes, checks, errors, transfers, deletes, renames) to 0 and resets lastError, fatalError and retryError
|
2018-02-01 14:13:24 +01:00
|
|
|
func (s *StatsInfo) ResetCounters() {
|
2018-07-22 11:33:19 +02:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2018-02-01 14:13:24 +01:00
|
|
|
s.bytes = 0
|
|
|
|
s.errors = 0
|
2018-08-30 18:26:07 +02:00
|
|
|
s.lastError = nil
|
|
|
|
s.fatalError = false
|
|
|
|
s.retryError = false
|
2019-03-21 12:24:13 +01:00
|
|
|
s.retryAfter = time.Time{}
|
2018-02-01 14:13:24 +01:00
|
|
|
s.checks = 0
|
|
|
|
s.transfers = 0
|
2018-01-22 19:53:18 +01:00
|
|
|
s.deletes = 0
|
2020-03-30 19:12:32 +02:00
|
|
|
s.renames = 0
|
2019-07-22 21:11:46 +02:00
|
|
|
s.startedTransfers = nil
|
2019-10-17 12:43:32 +02:00
|
|
|
s.oldDuration = 0
|
2018-02-01 14:13:24 +01:00
|
|
|
}
|
|
|
|
|
2018-08-30 18:26:07 +02:00
|
|
|
// ResetErrors sets the errors count to 0 and resets lastError, fatalError and retryError
|
2018-02-01 14:13:24 +01:00
|
|
|
func (s *StatsInfo) ResetErrors() {
|
2018-07-22 11:33:19 +02:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2018-02-01 14:13:24 +01:00
|
|
|
s.errors = 0
|
2018-08-30 18:26:07 +02:00
|
|
|
s.lastError = nil
|
|
|
|
s.fatalError = false
|
|
|
|
s.retryError = false
|
2019-03-21 12:24:13 +01:00
|
|
|
s.retryAfter = time.Time{}
|
2018-02-01 14:13:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Errored returns whether there have been any errors
|
|
|
|
func (s *StatsInfo) Errored() bool {
|
2018-05-02 18:01:39 +02:00
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
2018-02-01 14:13:24 +01:00
|
|
|
return s.errors != 0
|
|
|
|
}
|
|
|
|
|
2018-08-30 18:26:07 +02:00
|
|
|
// Error adds a single error into the stats, assigns lastError and eventually sets fatalError or retryError
|
2019-11-18 15:13:02 +01:00
|
|
|
func (s *StatsInfo) Error(err error) error {
|
|
|
|
if err == nil || fserrors.IsCounted(err) {
|
|
|
|
return err
|
2019-03-21 12:24:13 +01:00
|
|
|
}
|
2018-05-02 18:01:39 +02:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2018-02-01 14:13:24 +01:00
|
|
|
s.errors++
|
|
|
|
s.lastError = err
|
2019-11-18 15:13:02 +01:00
|
|
|
err = fserrors.FsError(err)
|
|
|
|
fserrors.Count(err)
|
2018-08-30 18:26:07 +02:00
|
|
|
switch {
|
|
|
|
case fserrors.IsFatalError(err):
|
|
|
|
s.fatalError = true
|
2019-03-21 12:24:13 +01:00
|
|
|
case fserrors.IsRetryAfterError(err):
|
|
|
|
retryAfter := fserrors.RetryAfterErrorTime(err)
|
|
|
|
if s.retryAfter.IsZero() || retryAfter.Sub(s.retryAfter) > 0 {
|
|
|
|
s.retryAfter = retryAfter
|
|
|
|
}
|
|
|
|
s.retryError = true
|
2018-08-30 18:26:07 +02:00
|
|
|
case !fserrors.IsNoRetryError(err):
|
|
|
|
s.retryError = true
|
|
|
|
}
|
2019-11-18 15:13:02 +01:00
|
|
|
return err
|
2018-02-01 14:13:24 +01:00
|
|
|
}
|
|
|
|
|
2019-03-21 12:24:13 +01:00
|
|
|
// RetryAfter returns the time to retry after if it is set. It will
|
|
|
|
// be Zero if it isn't set.
|
|
|
|
func (s *StatsInfo) RetryAfter() time.Time {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
return s.retryAfter
|
|
|
|
}
|
|
|
|
|
2019-07-22 21:11:46 +02:00
|
|
|
// NewCheckingTransfer adds a checking transfer to the stats, from the object.
|
|
|
|
func (s *StatsInfo) NewCheckingTransfer(obj fs.Object) *Transfer {
|
2020-06-13 20:37:42 +02:00
|
|
|
tr := newCheckingTransfer(s, obj)
|
|
|
|
s.checking.add(tr)
|
|
|
|
return tr
|
2018-02-01 14:13:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// DoneChecking removes a check from the stats
|
|
|
|
func (s *StatsInfo) DoneChecking(remote string) {
|
2018-05-02 18:01:39 +02:00
|
|
|
s.checking.del(remote)
|
|
|
|
s.mu.Lock()
|
2018-02-01 14:13:24 +01:00
|
|
|
s.checks++
|
2018-05-02 18:01:39 +02:00
|
|
|
s.mu.Unlock()
|
2018-02-01 14:13:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// GetTransfers reads the number of transfers
|
|
|
|
func (s *StatsInfo) GetTransfers() int64 {
|
2018-05-02 18:01:39 +02:00
|
|
|
s.mu.RLock()
|
|
|
|
defer s.mu.RUnlock()
|
2018-02-01 14:13:24 +01:00
|
|
|
return s.transfers
|
|
|
|
}
|
|
|
|
|
2019-07-16 13:56:20 +02:00
|
|
|
// NewTransfer adds a transfer to the stats from the object.
|
|
|
|
func (s *StatsInfo) NewTransfer(obj fs.Object) *Transfer {
|
2020-06-13 20:37:42 +02:00
|
|
|
tr := newTransfer(s, obj)
|
|
|
|
s.transferring.add(tr)
|
|
|
|
return tr
|
2019-07-16 13:56:20 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// NewTransferRemoteSize adds a transfer to the stats based on remote and size.
|
|
|
|
func (s *StatsInfo) NewTransferRemoteSize(remote string, size int64) *Transfer {
|
2020-06-13 20:37:42 +02:00
|
|
|
tr := newTransferRemoteSize(s, remote, size, false)
|
|
|
|
s.transferring.add(tr)
|
|
|
|
return tr
|
2018-02-01 14:13:24 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// DoneTransferring removes a transfer from the stats
|
|
|
|
//
|
|
|
|
// if ok is true then it increments the transfers count
|
|
|
|
func (s *StatsInfo) DoneTransferring(remote string, ok bool) {
|
2018-05-02 18:01:39 +02:00
|
|
|
s.transferring.del(remote)
|
2018-02-01 14:13:24 +01:00
|
|
|
if ok {
|
2018-05-02 18:01:39 +02:00
|
|
|
s.mu.Lock()
|
2018-02-01 14:13:24 +01:00
|
|
|
s.transfers++
|
2018-05-02 18:01:39 +02:00
|
|
|
s.mu.Unlock()
|
2018-02-01 14:13:24 +01:00
|
|
|
}
|
|
|
|
}
|
2018-07-19 23:41:34 +02:00
|
|
|
|
|
|
|
// SetCheckQueue sets the number of queued checks
|
|
|
|
func (s *StatsInfo) SetCheckQueue(n int, size int64) {
|
|
|
|
s.mu.Lock()
|
|
|
|
s.checkQueue = n
|
|
|
|
s.checkQueueSize = size
|
|
|
|
s.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetTransferQueue sets the number of queued transfers
|
|
|
|
func (s *StatsInfo) SetTransferQueue(n int, size int64) {
|
|
|
|
s.mu.Lock()
|
|
|
|
s.transferQueue = n
|
|
|
|
s.transferQueueSize = size
|
|
|
|
s.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetRenameQueue sets the number of queued transfers
|
|
|
|
func (s *StatsInfo) SetRenameQueue(n int, size int64) {
|
|
|
|
s.mu.Lock()
|
|
|
|
s.renameQueue = n
|
|
|
|
s.renameQueueSize = size
|
|
|
|
s.mu.Unlock()
|
|
|
|
}
|
2019-07-16 13:56:20 +02:00
|
|
|
|
|
|
|
// AddTransfer adds reference to the started transfer.
|
|
|
|
func (s *StatsInfo) AddTransfer(transfer *Transfer) {
|
|
|
|
s.mu.Lock()
|
|
|
|
s.startedTransfers = append(s.startedTransfers, transfer)
|
|
|
|
s.mu.Unlock()
|
|
|
|
}
|
2019-10-15 17:33:09 +02:00
|
|
|
|
2019-10-16 21:11:11 +02:00
|
|
|
// removeTransfer removes a reference to the started transfer in
|
|
|
|
// position i.
|
|
|
|
//
|
|
|
|
// Must be called with the lock held
|
|
|
|
func (s *StatsInfo) removeTransfer(transfer *Transfer, i int) {
|
2019-10-17 12:43:32 +02:00
|
|
|
now := time.Now()
|
|
|
|
|
2019-10-16 17:05:26 +02:00
|
|
|
// add finished transfer onto old time ranges
|
|
|
|
start, end := transfer.TimeRange()
|
|
|
|
if end.IsZero() {
|
2019-10-17 12:43:32 +02:00
|
|
|
end = now
|
2019-10-16 17:05:26 +02:00
|
|
|
}
|
|
|
|
s.oldTimeRanges = append(s.oldTimeRanges, timeRange{start, end})
|
|
|
|
s.oldTimeRanges.merge()
|
|
|
|
|
2019-10-16 21:11:11 +02:00
|
|
|
// remove the found entry
|
|
|
|
s.startedTransfers = append(s.startedTransfers[:i], s.startedTransfers[i+1:]...)
|
2019-10-17 12:43:32 +02:00
|
|
|
|
|
|
|
// Find youngest active transfer
|
|
|
|
oldestStart := now
|
|
|
|
for i := range s.startedTransfers {
|
|
|
|
start, _ := s.startedTransfers[i].TimeRange()
|
|
|
|
if start.Before(oldestStart) {
|
|
|
|
oldestStart = start
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// remove old entries older than that
|
|
|
|
s.oldDuration += s.oldTimeRanges.cull(oldestStart)
|
2019-10-16 21:11:11 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
// RemoveTransfer removes a reference to the started transfer.
|
|
|
|
func (s *StatsInfo) RemoveTransfer(transfer *Transfer) {
|
|
|
|
s.mu.Lock()
|
2019-10-15 17:33:09 +02:00
|
|
|
for i, tr := range s.startedTransfers {
|
|
|
|
if tr == transfer {
|
2019-10-16 21:11:11 +02:00
|
|
|
s.removeTransfer(tr, i)
|
2019-10-15 17:33:09 +02:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.mu.Unlock()
|
|
|
|
}
|
2019-10-16 21:11:11 +02:00
|
|
|
|
2019-11-14 14:14:40 +01:00
|
|
|
// PruneTransfers makes sure there aren't too many old transfers by removing
|
|
|
|
// single finished transfer.
|
2019-10-16 21:11:11 +02:00
|
|
|
func (s *StatsInfo) PruneTransfers() {
|
2019-11-14 14:14:40 +01:00
|
|
|
if MaxCompletedTransfers < 0 {
|
|
|
|
return
|
|
|
|
}
|
2019-10-16 21:11:11 +02:00
|
|
|
s.mu.Lock()
|
|
|
|
// remove a transfer from the start if we are over quota
|
2019-11-14 14:14:40 +01:00
|
|
|
if len(s.startedTransfers) > MaxCompletedTransfers+fs.Config.Transfers {
|
2019-10-16 21:11:11 +02:00
|
|
|
for i, tr := range s.startedTransfers {
|
|
|
|
if tr.IsDone() {
|
|
|
|
s.removeTransfer(tr, i)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.mu.Unlock()
|
|
|
|
}
|