replication + pruner: progress markers during planning

This commit is contained in:
Christian Schwarz 2018-10-21 13:37:30 +02:00
parent 5ec7a5c078
commit fffda09f67
2 changed files with 16 additions and 1 deletions

View File

@ -362,6 +362,10 @@ func onErr(u updater, e error) state {
func statePlan(a *args, u updater) state {
ctx, target, receiver := a.ctx, a.target, a.receiver
var ka *watchdog.KeepAlive
u(func(pruner *Pruner) {
ka = &pruner.Progress
})
tfss, err := target.ListFilesystems(ctx)
if err != nil {
@ -385,6 +389,8 @@ func statePlan(a *args, u updater) state {
l.WithError(err).Error("cannot list filesystem versions")
return onErr(u, err)
}
// no progress here since we could run in a live-lock (must have used target AND receiver before progress)
pfs.snaps = make([]pruning.Snapshot, 0, len(tfsvs))
rcReq := &pdu.ReplicationCursorReq{
@ -402,6 +408,7 @@ func statePlan(a *args, u updater) state {
l.WithField("reqErr", rc.GetError()).Error("cannot get replication cursor")
return onErr(u, err)
}
ka.MadeProgress()
// scan from older to newer, all snapshots older than cursor are interpreted as replicated
@ -448,7 +455,7 @@ func statePlan(a *args, u updater) state {
// Apply prune rules
pfs.destroyList = pruning.PruneSnapshots(pfs.snaps, a.rules)
ka.MadeProgress()
}
return u(func(pruner *Pruner) {

View File

@ -229,6 +229,7 @@ func statePlanning(ctx context.Context, ka *watchdog.KeepAlive, sender Sender, r
log.WithError(err).Error("error listing sender filesystems")
return handlePlanningError(err)
}
// no progress here since we could run in a live-lock on connectivity issues
rfss, err := receiver.ListFilesystems(ctx)
if err != nil {
@ -236,6 +237,8 @@ func statePlanning(ctx context.Context, ka *watchdog.KeepAlive, sender Sender, r
return handlePlanningError(err)
}
ka.MadeProgress() // for both sender and receiver
q := NewReplicationQueue()
mainlog := log
for _, fs := range sfss {
@ -249,6 +252,7 @@ func statePlanning(ctx context.Context, ka *watchdog.KeepAlive, sender Sender, r
log.WithError(err).Error("cannot get remote filesystem versions")
return handlePlanningError(err)
}
ka.MadeProgress()
if len(sfsvs) < 1 {
err := errors.New("sender does not have any versions")
@ -278,6 +282,7 @@ func statePlanning(ctx context.Context, ka *watchdog.KeepAlive, sender Sender, r
} else {
rfsvs = []*pdu.FilesystemVersion{}
}
ka.MadeProgress()
path, conflict := IncrementalPath(rfsvs, sfsvs)
if conflict != nil {
@ -291,6 +296,7 @@ func statePlanning(ctx context.Context, ka *watchdog.KeepAlive, sender Sender, r
log.WithField("problem", msg).Error("cannot resolve conflict")
}
}
ka.MadeProgress()
if path == nil {
q.Add(fsrep.NewReplicationWithPermanentError(fs.Path, conflict))
continue
@ -309,12 +315,14 @@ func statePlanning(ctx context.Context, ka *watchdog.KeepAlive, sender Sender, r
}
}
qitem := fsrfsm.Done()
ka.MadeProgress()
log.Debug("compute send size estimate")
if err = qitem.UpdateSizeEsitmate(ctx, sender); err != nil {
log.WithError(err).Error("error computing size estimate")
return handlePlanningError(err)
}
ka.MadeProgress()
q.Add(qitem)
}