mirror of
https://github.com/zrepl/zrepl.git
synced 2025-05-19 08:10:45 +02:00
replication + pruner: progress markers during planning
This commit is contained in:
parent
5ec7a5c078
commit
fffda09f67
@ -362,6 +362,10 @@ func onErr(u updater, e error) state {
|
|||||||
func statePlan(a *args, u updater) state {
|
func statePlan(a *args, u updater) state {
|
||||||
|
|
||||||
ctx, target, receiver := a.ctx, a.target, a.receiver
|
ctx, target, receiver := a.ctx, a.target, a.receiver
|
||||||
|
var ka *watchdog.KeepAlive
|
||||||
|
u(func(pruner *Pruner) {
|
||||||
|
ka = &pruner.Progress
|
||||||
|
})
|
||||||
|
|
||||||
tfss, err := target.ListFilesystems(ctx)
|
tfss, err := target.ListFilesystems(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -385,6 +389,8 @@ func statePlan(a *args, u updater) state {
|
|||||||
l.WithError(err).Error("cannot list filesystem versions")
|
l.WithError(err).Error("cannot list filesystem versions")
|
||||||
return onErr(u, err)
|
return onErr(u, err)
|
||||||
}
|
}
|
||||||
|
// no progress here since we could run in a live-lock (must have used target AND receiver before progress)
|
||||||
|
|
||||||
pfs.snaps = make([]pruning.Snapshot, 0, len(tfsvs))
|
pfs.snaps = make([]pruning.Snapshot, 0, len(tfsvs))
|
||||||
|
|
||||||
rcReq := &pdu.ReplicationCursorReq{
|
rcReq := &pdu.ReplicationCursorReq{
|
||||||
@ -402,6 +408,7 @@ func statePlan(a *args, u updater) state {
|
|||||||
l.WithField("reqErr", rc.GetError()).Error("cannot get replication cursor")
|
l.WithField("reqErr", rc.GetError()).Error("cannot get replication cursor")
|
||||||
return onErr(u, err)
|
return onErr(u, err)
|
||||||
}
|
}
|
||||||
|
ka.MadeProgress()
|
||||||
|
|
||||||
|
|
||||||
// scan from older to newer, all snapshots older than cursor are interpreted as replicated
|
// scan from older to newer, all snapshots older than cursor are interpreted as replicated
|
||||||
@ -448,7 +455,7 @@ func statePlan(a *args, u updater) state {
|
|||||||
|
|
||||||
// Apply prune rules
|
// Apply prune rules
|
||||||
pfs.destroyList = pruning.PruneSnapshots(pfs.snaps, a.rules)
|
pfs.destroyList = pruning.PruneSnapshots(pfs.snaps, a.rules)
|
||||||
|
ka.MadeProgress()
|
||||||
}
|
}
|
||||||
|
|
||||||
return u(func(pruner *Pruner) {
|
return u(func(pruner *Pruner) {
|
||||||
|
@ -229,6 +229,7 @@ func statePlanning(ctx context.Context, ka *watchdog.KeepAlive, sender Sender, r
|
|||||||
log.WithError(err).Error("error listing sender filesystems")
|
log.WithError(err).Error("error listing sender filesystems")
|
||||||
return handlePlanningError(err)
|
return handlePlanningError(err)
|
||||||
}
|
}
|
||||||
|
// no progress here since we could run in a live-lock on connectivity issues
|
||||||
|
|
||||||
rfss, err := receiver.ListFilesystems(ctx)
|
rfss, err := receiver.ListFilesystems(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -236,6 +237,8 @@ func statePlanning(ctx context.Context, ka *watchdog.KeepAlive, sender Sender, r
|
|||||||
return handlePlanningError(err)
|
return handlePlanningError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ka.MadeProgress() // for both sender and receiver
|
||||||
|
|
||||||
q := NewReplicationQueue()
|
q := NewReplicationQueue()
|
||||||
mainlog := log
|
mainlog := log
|
||||||
for _, fs := range sfss {
|
for _, fs := range sfss {
|
||||||
@ -249,6 +252,7 @@ func statePlanning(ctx context.Context, ka *watchdog.KeepAlive, sender Sender, r
|
|||||||
log.WithError(err).Error("cannot get remote filesystem versions")
|
log.WithError(err).Error("cannot get remote filesystem versions")
|
||||||
return handlePlanningError(err)
|
return handlePlanningError(err)
|
||||||
}
|
}
|
||||||
|
ka.MadeProgress()
|
||||||
|
|
||||||
if len(sfsvs) < 1 {
|
if len(sfsvs) < 1 {
|
||||||
err := errors.New("sender does not have any versions")
|
err := errors.New("sender does not have any versions")
|
||||||
@ -278,6 +282,7 @@ func statePlanning(ctx context.Context, ka *watchdog.KeepAlive, sender Sender, r
|
|||||||
} else {
|
} else {
|
||||||
rfsvs = []*pdu.FilesystemVersion{}
|
rfsvs = []*pdu.FilesystemVersion{}
|
||||||
}
|
}
|
||||||
|
ka.MadeProgress()
|
||||||
|
|
||||||
path, conflict := IncrementalPath(rfsvs, sfsvs)
|
path, conflict := IncrementalPath(rfsvs, sfsvs)
|
||||||
if conflict != nil {
|
if conflict != nil {
|
||||||
@ -291,6 +296,7 @@ func statePlanning(ctx context.Context, ka *watchdog.KeepAlive, sender Sender, r
|
|||||||
log.WithField("problem", msg).Error("cannot resolve conflict")
|
log.WithField("problem", msg).Error("cannot resolve conflict")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
ka.MadeProgress()
|
||||||
if path == nil {
|
if path == nil {
|
||||||
q.Add(fsrep.NewReplicationWithPermanentError(fs.Path, conflict))
|
q.Add(fsrep.NewReplicationWithPermanentError(fs.Path, conflict))
|
||||||
continue
|
continue
|
||||||
@ -309,12 +315,14 @@ func statePlanning(ctx context.Context, ka *watchdog.KeepAlive, sender Sender, r
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
qitem := fsrfsm.Done()
|
qitem := fsrfsm.Done()
|
||||||
|
ka.MadeProgress()
|
||||||
|
|
||||||
log.Debug("compute send size estimate")
|
log.Debug("compute send size estimate")
|
||||||
if err = qitem.UpdateSizeEsitmate(ctx, sender); err != nil {
|
if err = qitem.UpdateSizeEsitmate(ctx, sender); err != nil {
|
||||||
log.WithError(err).Error("error computing size estimate")
|
log.WithError(err).Error("error computing size estimate")
|
||||||
return handlePlanningError(err)
|
return handlePlanningError(err)
|
||||||
}
|
}
|
||||||
|
ka.MadeProgress()
|
||||||
|
|
||||||
q.Add(qitem)
|
q.Add(qitem)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user