remove JobStatus, Task abstraction and 'control status' subcommand

Control status will be replaced by job-specific output at some point.

Task was not useful anymore with state machine, may reintroduce
something similar at a later point, but consider alternatives:

- opentracing.io
- embedding everything in ctx
	- activity stack would work easily
	- log entries via proxy logger.Logger object
- progress reporting should be in status reports of individial jobs
This commit is contained in:
Christian Schwarz
2018-08-26 16:49:40 +02:00
parent 7ff72fb6d9
commit a0f72b585b
11 changed files with 100 additions and 672 deletions

View File

@ -9,29 +9,24 @@ import (
)
type IntervalAutosnap struct {
task *Task
DatasetFilter zfs.DatasetFilter
Prefix string
SnapshotInterval time.Duration
}
func (a *IntervalAutosnap) filterFilesystems() (fss []*zfs.DatasetPath, stop bool) {
a.task.Enter("filter_filesystems")
defer a.task.Finish()
func (a *IntervalAutosnap) filterFilesystems(ctx context.Context) (fss []*zfs.DatasetPath, stop bool) {
fss, err := zfs.ZFSListMapping(a.DatasetFilter)
stop = err != nil
if err != nil {
a.task.Log().WithError(err).Error("cannot list datasets")
getLogger(ctx).WithError(err).Error("cannot list datasets")
}
if len(fss) == 0 {
a.task.Log().Warn("no filesystem matching filesystem filter")
getLogger(ctx).Warn("no filesystem matching filesystem filter")
}
return fss, stop
}
func (a *IntervalAutosnap) findSyncPoint(fss []*zfs.DatasetPath) (syncPoint time.Time, err error) {
a.task.Enter("find_sync_point")
defer a.task.Finish()
func (a *IntervalAutosnap) findSyncPoint(log Logger, fss []*zfs.DatasetPath) (syncPoint time.Time, err error) {
type snapTime struct {
ds *zfs.DatasetPath
time time.Time
@ -45,10 +40,10 @@ func (a *IntervalAutosnap) findSyncPoint(fss []*zfs.DatasetPath) (syncPoint time
now := time.Now()
a.task.Log().Debug("examine filesystem state")
log.Debug("examine filesystem state")
for _, d := range fss {
l := a.task.Log().WithField("fs", d.ToString())
l := log.WithField("fs", d.ToString())
fsvs, err := zfs.ZFSListFilesystemVersions(d, NewPrefixFilter(a.Prefix))
if err != nil {
@ -96,76 +91,71 @@ func (a *IntervalAutosnap) findSyncPoint(fss []*zfs.DatasetPath) (syncPoint time
}
func (a *IntervalAutosnap) waitForSyncPoint(ctx context.Context, syncPoint time.Time) {
a.task.Enter("wait_sync_point")
defer a.task.Finish()
const LOG_TIME_FMT string = time.ANSIC
a.task.Log().WithField("sync_point", syncPoint.Format(LOG_TIME_FMT)).
getLogger(ctx).
WithField("sync_point", syncPoint.Format(LOG_TIME_FMT)).
Info("wait for sync point")
select {
case <-ctx.Done():
a.task.Log().WithError(ctx.Err()).Info("context done")
getLogger(ctx).WithError(ctx.Err()).Info("context done")
return
case <-time.After(syncPoint.Sub(time.Now())):
}
}
func (a *IntervalAutosnap) syncUpRun(ctx context.Context, didSnaps chan struct{}) (stop bool) {
a.task.Enter("sync_up")
defer a.task.Finish()
fss, stop := a.filterFilesystems()
fss, stop := a.filterFilesystems(ctx)
if stop {
return true
}
syncPoint, err := a.findSyncPoint(fss)
syncPoint, err := a.findSyncPoint(getLogger(ctx), fss)
if err != nil {
return true
}
a.waitForSyncPoint(ctx, syncPoint)
a.task.Log().Debug("snapshot all filesystems to enable further snaps in lockstep")
a.doSnapshots(didSnaps)
getLogger(ctx).Debug("snapshot all filesystems to enable further snaps in lockstep")
a.doSnapshots(ctx, didSnaps)
return false
}
func (a *IntervalAutosnap) Run(ctx context.Context, didSnaps chan struct{}) {
log := getLogger(ctx)
if a.syncUpRun(ctx, didSnaps) {
a.task.Log().Error("stoppping autosnap after error in sync up")
log.Error("stoppping autosnap after error in sync up")
return
}
// task drops back to idle here
a.task.Log().Debug("setting up ticker in SnapshotInterval")
log.Debug("setting up ticker in SnapshotInterval")
ticker := time.NewTicker(a.SnapshotInterval)
for {
select {
case <-ctx.Done():
ticker.Stop()
a.task.Log().WithError(ctx.Err()).Info("context done")
log.WithError(ctx.Err()).Info("context done")
return
case <-ticker.C:
a.doSnapshots(didSnaps)
a.doSnapshots(ctx, didSnaps)
}
}
}
func (a *IntervalAutosnap) doSnapshots(didSnaps chan struct{}) {
a.task.Enter("do_snapshots")
defer a.task.Finish()
func (a *IntervalAutosnap) doSnapshots(ctx context.Context, didSnaps chan struct{}) {
log := getLogger(ctx)
// don't cache the result from previous run in case the user added
// a new dataset in the meantime
ds, stop := a.filterFilesystems()
ds, stop := a.filterFilesystems(ctx)
if stop {
return
}
@ -175,20 +165,20 @@ func (a *IntervalAutosnap) doSnapshots(didSnaps chan struct{}) {
suffix := time.Now().In(time.UTC).Format("20060102_150405_000")
snapname := fmt.Sprintf("%s%s", a.Prefix, suffix)
l := a.task.Log().
l := log.
WithField("fs", d.ToString()).
WithField("snapname", snapname)
l.Info("create snapshot")
err := zfs.ZFSSnapshot(d, snapname, false)
if err != nil {
a.task.Log().WithError(err).Error("cannot create snapshot")
l.WithError(err).Error("cannot create snapshot")
}
l.Info("create corresponding bookmark")
err = zfs.ZFSBookmark(d, snapname, snapname)
if err != nil {
a.task.Log().WithError(err).Error("cannot create bookmark")
l.WithError(err).Error("cannot create bookmark")
}
}
@ -196,7 +186,7 @@ func (a *IntervalAutosnap) doSnapshots(didSnaps chan struct{}) {
select {
case didSnaps <- struct{}{}:
default:
a.task.Log().Error("warning: callback channel is full, discarding")
log.Error("warning: callback channel is full, discarding")
}
}