2018-08-22 00:10:09 +02:00
|
|
|
// Package replication implements replication of filesystems with existing
|
|
|
|
// versions (snapshots) from a sender to a receiver.
|
|
|
|
package replication
|
2018-08-16 21:05:21 +02:00
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
2018-09-08 07:03:41 +02:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2018-10-12 13:12:28 +02:00
|
|
|
"github.com/zrepl/zrepl/daemon/job/wakeup"
|
2018-10-19 15:53:58 +02:00
|
|
|
"github.com/zrepl/zrepl/util/envconst"
|
2018-10-19 16:27:05 +02:00
|
|
|
"github.com/zrepl/zrepl/util/watchdog"
|
2018-08-16 21:05:21 +02:00
|
|
|
"math/bits"
|
2018-10-19 16:18:19 +02:00
|
|
|
"net"
|
2018-08-16 21:05:21 +02:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
|
2018-08-22 00:19:03 +02:00
|
|
|
"github.com/zrepl/zrepl/replication/fsrep"
|
|
|
|
. "github.com/zrepl/zrepl/replication/internal/diff"
|
2018-08-25 21:30:25 +02:00
|
|
|
. "github.com/zrepl/zrepl/replication/internal/queue"
|
|
|
|
"github.com/zrepl/zrepl/replication/pdu"
|
2018-08-16 21:05:21 +02:00
|
|
|
)
|
|
|
|
|
2018-10-12 22:10:49 +02:00
|
|
|
//go:generate enumer -type=State
|
2018-08-22 00:10:09 +02:00
|
|
|
type State uint
|
2018-08-16 21:05:21 +02:00
|
|
|
|
|
|
|
const (
|
2018-08-25 21:30:25 +02:00
|
|
|
Planning State = 1 << iota
|
2018-08-16 21:05:21 +02:00
|
|
|
PlanningError
|
|
|
|
Working
|
|
|
|
WorkingWait
|
|
|
|
Completed
|
2018-10-19 16:18:19 +02:00
|
|
|
PermanentError
|
2018-08-16 21:05:21 +02:00
|
|
|
)
|
|
|
|
|
2018-08-22 00:10:09 +02:00
|
|
|
func (s State) rsf() state {
|
2018-08-16 21:05:21 +02:00
|
|
|
idx := bits.TrailingZeros(uint(s))
|
|
|
|
if idx == bits.UintSize {
|
|
|
|
panic(s) // invalid value
|
|
|
|
}
|
2018-08-22 00:10:09 +02:00
|
|
|
m := []state{
|
|
|
|
statePlanning,
|
|
|
|
statePlanningError,
|
|
|
|
stateWorking,
|
|
|
|
stateWorkingWait,
|
2018-08-16 21:05:21 +02:00
|
|
|
nil,
|
|
|
|
nil,
|
|
|
|
}
|
|
|
|
return m[idx]
|
|
|
|
}
|
|
|
|
|
2018-10-19 16:27:05 +02:00
|
|
|
func (s State) IsTerminal() bool {
|
|
|
|
return s.rsf() == nil
|
|
|
|
}
|
|
|
|
|
2018-08-22 00:10:09 +02:00
|
|
|
// Replication implements the replication of multiple file systems from a Sender to a Receiver.
|
|
|
|
//
|
|
|
|
// It is a state machine that is driven by the Drive method
|
|
|
|
// and provides asynchronous reporting via the Report method (i.e. from another goroutine).
|
2018-08-16 21:05:21 +02:00
|
|
|
type Replication struct {
|
2018-09-08 07:03:41 +02:00
|
|
|
// not protected by lock
|
|
|
|
promSecsPerState *prometheus.HistogramVec // labels: state
|
|
|
|
promBytesReplicated *prometheus.CounterVec // labels: filesystem
|
|
|
|
|
2018-10-19 16:27:05 +02:00
|
|
|
Progress watchdog.KeepAlive
|
|
|
|
|
2018-08-16 21:05:21 +02:00
|
|
|
// lock protects all fields of this struct (but not the fields behind pointers!)
|
|
|
|
lock sync.Mutex
|
|
|
|
|
2018-08-22 00:10:09 +02:00
|
|
|
state State
|
2018-08-16 21:05:21 +02:00
|
|
|
|
|
|
|
// Working, WorkingWait, Completed, ContextDone
|
|
|
|
queue *ReplicationQueue
|
2018-08-22 00:10:09 +02:00
|
|
|
completed []*fsrep.Replication
|
2018-08-16 21:05:21 +02:00
|
|
|
active *ReplicationQueueItemHandle
|
|
|
|
|
2018-10-19 16:18:19 +02:00
|
|
|
// for PlanningError, WorkingWait and ContextError and Completed
|
|
|
|
err error
|
2018-08-16 21:05:21 +02:00
|
|
|
|
|
|
|
// PlanningError, WorkingWait
|
|
|
|
sleepUntil time.Time
|
|
|
|
}
|
|
|
|
|
|
|
|
type Report struct {
|
|
|
|
Status string
|
|
|
|
Problem string
|
2018-09-24 19:23:10 +02:00
|
|
|
SleepUntil time.Time
|
2018-08-22 00:10:09 +02:00
|
|
|
Completed []*fsrep.Report
|
|
|
|
Pending []*fsrep.Report
|
|
|
|
Active *fsrep.Report
|
2018-08-16 21:05:21 +02:00
|
|
|
}
|
|
|
|
|
2018-09-08 07:03:41 +02:00
|
|
|
func NewReplication(secsPerState *prometheus.HistogramVec, bytesReplicated *prometheus.CounterVec) *Replication {
|
2018-08-16 21:05:21 +02:00
|
|
|
r := Replication{
|
2018-09-08 07:03:41 +02:00
|
|
|
promSecsPerState: secsPerState,
|
|
|
|
promBytesReplicated: bytesReplicated,
|
|
|
|
state: Planning,
|
2018-08-16 21:05:21 +02:00
|
|
|
}
|
|
|
|
return &r
|
|
|
|
}
|
|
|
|
|
2018-08-26 15:12:43 +02:00
|
|
|
// Endpoint represents one side of the replication.
|
|
|
|
//
|
|
|
|
// An endpoint is either in Sender or Receiver mode, represented by the correspondingly
|
|
|
|
// named interfaces defined in this package.
|
2018-08-22 00:10:09 +02:00
|
|
|
type Endpoint interface {
|
|
|
|
// Does not include placeholder filesystems
|
|
|
|
ListFilesystems(ctx context.Context) ([]*pdu.Filesystem, error)
|
2018-08-26 15:12:43 +02:00
|
|
|
// FIXME document FilteredError handling
|
2018-08-22 00:10:09 +02:00
|
|
|
ListFilesystemVersions(ctx context.Context, fs string) ([]*pdu.FilesystemVersion, error) // fix depS
|
2018-08-30 11:51:47 +02:00
|
|
|
DestroySnapshots(ctx context.Context, req *pdu.DestroySnapshotsReq) (*pdu.DestroySnapshotsRes, error)
|
2018-08-22 00:10:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
type Sender interface {
|
|
|
|
Endpoint
|
|
|
|
fsrep.Sender
|
|
|
|
}
|
|
|
|
|
|
|
|
type Receiver interface {
|
|
|
|
Endpoint
|
|
|
|
fsrep.Receiver
|
|
|
|
}
|
|
|
|
|
|
|
|
type FilteredError struct{ fs string }
|
|
|
|
|
|
|
|
func NewFilteredError(fs string) *FilteredError {
|
|
|
|
return &FilteredError{fs}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f FilteredError) Error() string { return "endpoint does not allow access to filesystem " + f.fs }
|
|
|
|
|
|
|
|
type updater func(func(*Replication)) (newState State)
|
2018-10-19 16:27:05 +02:00
|
|
|
type state func(ctx context.Context, ka *watchdog.KeepAlive, sender Sender, receiver Receiver, u updater) state
|
2018-08-16 21:05:21 +02:00
|
|
|
|
2018-08-22 00:10:09 +02:00
|
|
|
// Drive starts the state machine and returns only after replication has finished (with or without errors).
|
|
|
|
// The Logger in ctx is used for both debug and error logging, but is not guaranteed to be stable
|
|
|
|
// or end-user friendly.
|
|
|
|
// User-facing replication progress reports and can be obtained using the Report method,
|
|
|
|
// whose output will not change after Drive returns.
|
|
|
|
//
|
|
|
|
// FIXME: Drive may be only called once per instance of Replication
|
|
|
|
func (r *Replication) Drive(ctx context.Context, sender Sender, receiver Receiver) {
|
2018-08-16 21:05:21 +02:00
|
|
|
|
2018-08-22 00:10:09 +02:00
|
|
|
var u updater = func(f func(*Replication)) State {
|
2018-08-16 21:05:21 +02:00
|
|
|
r.lock.Lock()
|
|
|
|
defer r.lock.Unlock()
|
|
|
|
if f != nil {
|
|
|
|
f(r)
|
|
|
|
}
|
|
|
|
return r.state
|
|
|
|
}
|
|
|
|
|
2018-08-22 00:10:09 +02:00
|
|
|
var s state = statePlanning
|
|
|
|
var pre, post State
|
2018-08-16 21:05:21 +02:00
|
|
|
for s != nil {
|
|
|
|
preTime := time.Now()
|
|
|
|
pre = u(nil)
|
2018-10-19 16:27:05 +02:00
|
|
|
s = s(ctx, &r.Progress, sender, receiver, u)
|
2018-08-16 21:05:21 +02:00
|
|
|
delta := time.Now().Sub(preTime)
|
2018-09-08 07:03:41 +02:00
|
|
|
r.promSecsPerState.WithLabelValues(pre.String()).Observe(delta.Seconds())
|
2018-08-16 21:05:21 +02:00
|
|
|
post = u(nil)
|
2018-08-22 00:10:09 +02:00
|
|
|
getLogger(ctx).
|
2018-08-16 21:05:21 +02:00
|
|
|
WithField("transition", fmt.Sprintf("%s => %s", pre, post)).
|
|
|
|
WithField("duration", delta).
|
|
|
|
Debug("main state transition")
|
2018-08-31 16:26:11 +02:00
|
|
|
if post == Working && pre != post {
|
|
|
|
getLogger(ctx).Info("start working")
|
|
|
|
}
|
2018-08-16 21:05:21 +02:00
|
|
|
}
|
|
|
|
|
2018-08-22 00:10:09 +02:00
|
|
|
getLogger(ctx).
|
2018-08-16 21:05:21 +02:00
|
|
|
WithField("final_state", post).
|
|
|
|
Debug("main final state")
|
|
|
|
}
|
|
|
|
|
|
|
|
func resolveConflict(conflict error) (path []*pdu.FilesystemVersion, msg string) {
|
|
|
|
if noCommonAncestor, ok := conflict.(*ConflictNoCommonAncestor); ok {
|
|
|
|
if len(noCommonAncestor.SortedReceiverVersions) == 0 {
|
2018-08-25 22:23:47 +02:00
|
|
|
// TODO this is hard-coded replication policy: most recent snapshot as source
|
2018-08-16 21:05:21 +02:00
|
|
|
var mostRecentSnap *pdu.FilesystemVersion
|
|
|
|
for n := len(noCommonAncestor.SortedSenderVersions) - 1; n >= 0; n-- {
|
|
|
|
if noCommonAncestor.SortedSenderVersions[n].Type == pdu.FilesystemVersion_Snapshot {
|
|
|
|
mostRecentSnap = noCommonAncestor.SortedSenderVersions[n]
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if mostRecentSnap == nil {
|
|
|
|
return nil, "no snapshots available on sender side"
|
|
|
|
}
|
|
|
|
return []*pdu.FilesystemVersion{mostRecentSnap}, fmt.Sprintf("start replication at most recent snapshot %s", mostRecentSnap.RelName())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil, "no automated way to handle conflict type"
|
|
|
|
}
|
|
|
|
|
2018-10-19 15:53:58 +02:00
|
|
|
var RetryInterval = envconst.Duration("ZREPL_REPLICATION_RETRY_INTERVAL", 4 * time.Second)
|
2018-10-12 13:12:28 +02:00
|
|
|
|
2018-10-19 16:18:19 +02:00
|
|
|
func isPermanent(err error) bool {
|
|
|
|
switch err {
|
|
|
|
case context.Canceled: return true
|
|
|
|
case context.DeadlineExceeded: return true
|
|
|
|
}
|
|
|
|
if operr, ok := err.(net.Error); ok {
|
|
|
|
return !operr.Temporary()
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2018-10-19 16:27:05 +02:00
|
|
|
func statePlanning(ctx context.Context, ka *watchdog.KeepAlive, sender Sender, receiver Receiver, u updater) state {
|
2018-08-16 21:05:21 +02:00
|
|
|
|
2018-08-22 00:10:09 +02:00
|
|
|
log := getLogger(ctx)
|
2018-08-16 21:05:21 +02:00
|
|
|
|
2018-08-31 16:26:11 +02:00
|
|
|
log.Info("start planning")
|
|
|
|
|
2018-08-22 00:10:09 +02:00
|
|
|
handlePlanningError := func(err error) state {
|
2018-08-16 21:05:21 +02:00
|
|
|
return u(func(r *Replication) {
|
2018-10-19 16:18:19 +02:00
|
|
|
r.err = err
|
|
|
|
if isPermanent(err) {
|
|
|
|
r.state = PermanentError
|
|
|
|
} else {
|
2018-10-19 15:53:58 +02:00
|
|
|
r.sleepUntil = time.Now().Add(RetryInterval)
|
2018-10-19 16:18:19 +02:00
|
|
|
r.state = PlanningError
|
|
|
|
}
|
2018-08-16 21:05:21 +02:00
|
|
|
}).rsf()
|
|
|
|
}
|
|
|
|
|
2018-08-21 22:15:00 +02:00
|
|
|
sfss, err := sender.ListFilesystems(ctx)
|
2018-08-16 21:05:21 +02:00
|
|
|
if err != nil {
|
|
|
|
log.WithError(err).Error("error listing sender filesystems")
|
|
|
|
return handlePlanningError(err)
|
|
|
|
}
|
|
|
|
|
2018-08-21 22:15:00 +02:00
|
|
|
rfss, err := receiver.ListFilesystems(ctx)
|
2018-08-16 21:05:21 +02:00
|
|
|
if err != nil {
|
|
|
|
log.WithError(err).Error("error listing receiver filesystems")
|
|
|
|
return handlePlanningError(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
q := NewReplicationQueue()
|
|
|
|
mainlog := log
|
|
|
|
for _, fs := range sfss {
|
|
|
|
|
|
|
|
log := mainlog.WithField("filesystem", fs.Path)
|
|
|
|
|
2018-08-31 16:26:11 +02:00
|
|
|
log.Debug("assessing filesystem")
|
2018-08-16 21:05:21 +02:00
|
|
|
|
2018-08-21 22:15:00 +02:00
|
|
|
sfsvs, err := sender.ListFilesystemVersions(ctx, fs.Path)
|
2018-08-16 21:05:21 +02:00
|
|
|
if err != nil {
|
|
|
|
log.WithError(err).Error("cannot get remote filesystem versions")
|
|
|
|
return handlePlanningError(err)
|
|
|
|
}
|
|
|
|
|
2018-08-30 17:50:43 +02:00
|
|
|
if len(sfsvs) < 1 {
|
2018-08-16 21:05:21 +02:00
|
|
|
err := errors.New("sender does not have any versions")
|
|
|
|
log.Error(err.Error())
|
2018-08-22 00:10:09 +02:00
|
|
|
q.Add(fsrep.NewReplicationWithPermanentError(fs.Path, err))
|
2018-08-16 21:05:21 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
receiverFSExists := false
|
|
|
|
for _, rfs := range rfss {
|
|
|
|
if rfs.Path == fs.Path {
|
|
|
|
receiverFSExists = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var rfsvs []*pdu.FilesystemVersion
|
|
|
|
if receiverFSExists {
|
2018-08-21 22:15:00 +02:00
|
|
|
rfsvs, err = receiver.ListFilesystemVersions(ctx, fs.Path)
|
2018-08-16 21:05:21 +02:00
|
|
|
if err != nil {
|
|
|
|
if _, ok := err.(*FilteredError); ok {
|
|
|
|
log.Info("receiver ignores filesystem")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
log.WithError(err).Error("receiver error")
|
|
|
|
return handlePlanningError(err)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
rfsvs = []*pdu.FilesystemVersion{}
|
|
|
|
}
|
|
|
|
|
|
|
|
path, conflict := IncrementalPath(rfsvs, sfsvs)
|
|
|
|
if conflict != nil {
|
|
|
|
var msg string
|
|
|
|
path, msg = resolveConflict(conflict) // no shadowing allowed!
|
|
|
|
if path != nil {
|
|
|
|
log.WithField("conflict", conflict).Info("conflict")
|
|
|
|
log.WithField("resolution", msg).Info("automatically resolved")
|
|
|
|
} else {
|
|
|
|
log.WithField("conflict", conflict).Error("conflict")
|
|
|
|
log.WithField("problem", msg).Error("cannot resolve conflict")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if path == nil {
|
2018-08-22 00:10:09 +02:00
|
|
|
q.Add(fsrep.NewReplicationWithPermanentError(fs.Path, conflict))
|
2018-08-16 21:05:21 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2018-09-08 07:03:41 +02:00
|
|
|
var promBytesReplicated *prometheus.CounterVec
|
|
|
|
u(func(replication *Replication) { // FIXME args struct like in pruner (also use for sender and receiver)
|
|
|
|
promBytesReplicated = replication.promBytesReplicated
|
|
|
|
})
|
|
|
|
fsrfsm := fsrep.BuildReplication(fs.Path, promBytesReplicated.WithLabelValues(fs.Path))
|
2018-08-16 21:05:21 +02:00
|
|
|
if len(path) == 1 {
|
|
|
|
fsrfsm.AddStep(nil, path[0])
|
|
|
|
} else {
|
|
|
|
for i := 0; i < len(path)-1; i++ {
|
|
|
|
fsrfsm.AddStep(path[i], path[i+1])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
qitem := fsrfsm.Done()
|
2018-08-31 16:26:11 +02:00
|
|
|
|
|
|
|
log.Debug("compute send size estimate")
|
2018-08-30 12:52:08 +02:00
|
|
|
if err = qitem.UpdateSizeEsitmate(ctx, sender); err != nil {
|
2018-08-31 16:26:11 +02:00
|
|
|
log.WithError(err).Error("error computing size estimate")
|
2018-08-30 12:52:08 +02:00
|
|
|
return handlePlanningError(err)
|
|
|
|
}
|
2018-10-19 16:27:05 +02:00
|
|
|
|
2018-08-16 21:05:21 +02:00
|
|
|
q.Add(qitem)
|
|
|
|
}
|
|
|
|
|
2018-10-19 16:27:05 +02:00
|
|
|
ka.MadeProgress()
|
|
|
|
|
2018-08-16 21:05:21 +02:00
|
|
|
return u(func(r *Replication) {
|
|
|
|
r.completed = nil
|
|
|
|
r.queue = q
|
2018-10-19 16:18:19 +02:00
|
|
|
r.err = nil
|
2018-08-16 21:05:21 +02:00
|
|
|
r.state = Working
|
|
|
|
}).rsf()
|
|
|
|
}
|
|
|
|
|
2018-10-19 16:27:05 +02:00
|
|
|
func statePlanningError(ctx context.Context, ka *watchdog.KeepAlive, sender Sender, receiver Receiver, u updater) state {
|
2018-10-12 13:12:28 +02:00
|
|
|
var sleepUntil time.Time
|
2018-08-16 21:05:21 +02:00
|
|
|
u(func(r *Replication) {
|
2018-10-12 13:12:28 +02:00
|
|
|
sleepUntil = r.sleepUntil
|
2018-08-16 21:05:21 +02:00
|
|
|
})
|
2018-10-12 13:12:28 +02:00
|
|
|
t := time.NewTimer(sleepUntil.Sub(time.Now()))
|
2018-08-31 16:26:11 +02:00
|
|
|
getLogger(ctx).WithField("until", sleepUntil).Info("retry wait after planning error")
|
2018-08-16 21:05:21 +02:00
|
|
|
defer t.Stop()
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return u(func(r *Replication) {
|
2018-10-19 16:18:19 +02:00
|
|
|
r.state = PermanentError
|
|
|
|
r.err = ctx.Err()
|
2018-08-16 21:05:21 +02:00
|
|
|
}).rsf()
|
|
|
|
case <-t.C:
|
2018-10-12 13:12:28 +02:00
|
|
|
case <-wakeup.Wait(ctx):
|
2018-08-16 21:05:21 +02:00
|
|
|
}
|
2018-10-12 13:12:28 +02:00
|
|
|
return u(func(r *Replication) {
|
|
|
|
r.state = Planning
|
|
|
|
}).rsf()
|
2018-08-16 21:05:21 +02:00
|
|
|
}
|
|
|
|
|
2018-10-19 16:27:05 +02:00
|
|
|
func stateWorking(ctx context.Context, ka *watchdog.KeepAlive, sender Sender, receiver Receiver, u updater) state {
|
2018-08-16 21:05:21 +02:00
|
|
|
|
|
|
|
var active *ReplicationQueueItemHandle
|
|
|
|
rsfNext := u(func(r *Replication) {
|
|
|
|
done, next := r.queue.GetNext()
|
|
|
|
r.completed = append(r.completed, done...)
|
|
|
|
if next == nil {
|
|
|
|
r.state = Completed
|
|
|
|
}
|
|
|
|
r.active = next
|
|
|
|
active = next
|
|
|
|
}).rsf()
|
|
|
|
|
|
|
|
if active == nil {
|
|
|
|
return rsfNext
|
|
|
|
}
|
|
|
|
|
2018-10-19 15:53:58 +02:00
|
|
|
state, nextStepDate := active.GetFSReplication().TakeStep(ctx, ka, sender, receiver)
|
|
|
|
u(func(r *Replication) {
|
|
|
|
active.Update(state, nextStepDate)
|
2018-08-16 21:05:21 +02:00
|
|
|
r.active = nil
|
|
|
|
}).rsf()
|
2018-10-19 16:18:19 +02:00
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return u(func(r *Replication) {
|
|
|
|
r.err = ctx.Err()
|
|
|
|
r.state = PermanentError
|
|
|
|
}).rsf()
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2018-10-19 15:53:58 +02:00
|
|
|
if err := active.GetFSReplication().Err(); err != nil {
|
|
|
|
return u(func(r *Replication) {
|
|
|
|
r.err = err
|
|
|
|
if isPermanent(err) {
|
|
|
|
r.state = PermanentError
|
|
|
|
} else {
|
|
|
|
r.sleepUntil = time.Now().Add(RetryInterval)
|
|
|
|
r.state = WorkingWait
|
|
|
|
}
|
|
|
|
}).rsf()
|
|
|
|
}
|
|
|
|
|
2018-10-19 16:18:19 +02:00
|
|
|
return u(nil).rsf()
|
2018-08-16 21:05:21 +02:00
|
|
|
}
|
|
|
|
|
2018-10-19 16:27:05 +02:00
|
|
|
func stateWorkingWait(ctx context.Context, ka *watchdog.KeepAlive, sender Sender, receiver Receiver, u updater) state {
|
2018-10-12 13:12:28 +02:00
|
|
|
var sleepUntil time.Time
|
2018-08-16 21:05:21 +02:00
|
|
|
u(func(r *Replication) {
|
2018-10-12 13:12:28 +02:00
|
|
|
sleepUntil = r.sleepUntil
|
2018-08-16 21:05:21 +02:00
|
|
|
})
|
2018-10-19 15:53:58 +02:00
|
|
|
t := time.NewTimer(RetryInterval)
|
|
|
|
getLogger(ctx).WithField("until", sleepUntil).Info("retry wait after replication step error")
|
2018-08-16 21:05:21 +02:00
|
|
|
defer t.Stop()
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return u(func(r *Replication) {
|
2018-10-19 16:18:19 +02:00
|
|
|
r.state = PermanentError
|
|
|
|
r.err = ctx.Err()
|
2018-08-16 21:05:21 +02:00
|
|
|
}).rsf()
|
2018-10-12 13:12:28 +02:00
|
|
|
|
2018-08-16 21:05:21 +02:00
|
|
|
case <-t.C:
|
2018-10-12 13:12:28 +02:00
|
|
|
case <-wakeup.Wait(ctx):
|
2018-08-16 21:05:21 +02:00
|
|
|
}
|
2018-10-12 13:12:28 +02:00
|
|
|
return u(func(r *Replication) {
|
|
|
|
r.state = Working
|
|
|
|
}).rsf()
|
2018-08-16 21:05:21 +02:00
|
|
|
}
|
|
|
|
|
2018-08-22 00:10:09 +02:00
|
|
|
// Report provides a summary of the progress of the Replication,
|
|
|
|
// i.e., a condensed dump of the internal state machine.
|
|
|
|
// Report is safe to be called asynchronously while Drive is running.
|
2018-08-16 21:05:21 +02:00
|
|
|
func (r *Replication) Report() *Report {
|
|
|
|
r.lock.Lock()
|
|
|
|
defer r.lock.Unlock()
|
|
|
|
|
|
|
|
rep := Report{
|
|
|
|
Status: r.state.String(),
|
2018-09-24 19:23:10 +02:00
|
|
|
SleepUntil: r.sleepUntil,
|
2018-08-16 21:05:21 +02:00
|
|
|
}
|
|
|
|
|
2018-10-19 16:18:19 +02:00
|
|
|
if r.state&(Planning|PlanningError|PermanentError) != 0 {
|
|
|
|
if r.err != nil {
|
|
|
|
rep.Problem = r.err.Error()
|
2018-08-16 21:05:21 +02:00
|
|
|
}
|
|
|
|
return &rep
|
|
|
|
}
|
|
|
|
|
2018-08-22 00:10:09 +02:00
|
|
|
rep.Pending = make([]*fsrep.Report, 0, r.queue.Len())
|
|
|
|
rep.Completed = make([]*fsrep.Report, 0, len(r.completed)) // room for active (potentially)
|
2018-08-16 21:05:21 +02:00
|
|
|
|
2018-08-22 00:10:09 +02:00
|
|
|
var active *fsrep.Replication
|
2018-08-16 21:05:21 +02:00
|
|
|
if r.active != nil {
|
|
|
|
active = r.active.GetFSReplication()
|
|
|
|
rep.Active = active.Report()
|
|
|
|
}
|
2018-08-21 22:15:00 +02:00
|
|
|
r.queue.Foreach(func(h *ReplicationQueueItemHandle) {
|
2018-08-16 21:05:21 +02:00
|
|
|
fsr := h.GetFSReplication()
|
|
|
|
if active != fsr {
|
|
|
|
rep.Pending = append(rep.Pending, fsr.Report())
|
|
|
|
}
|
|
|
|
})
|
|
|
|
for _, fsr := range r.completed {
|
2018-08-21 22:15:00 +02:00
|
|
|
rep.Completed = append(rep.Completed, fsr.Report())
|
2018-08-16 21:05:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return &rep
|
|
|
|
}
|
2018-10-19 16:27:05 +02:00
|
|
|
|
|
|
|
func (r *Replication) State() State {
|
|
|
|
r.lock.Lock()
|
|
|
|
defer r.lock.Unlock()
|
|
|
|
return r.state
|
|
|
|
}
|