2017-09-13 23:27:18 +02:00
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"github.com/zrepl/zrepl/zfs"
|
|
|
|
"sort"
|
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
|
|
|
type IntervalAutosnap struct {
|
|
|
|
DatasetFilter zfs.DatasetFilter
|
|
|
|
Prefix string
|
|
|
|
SnapshotInterval time.Duration
|
|
|
|
}
|
|
|
|
|
2018-08-26 16:49:40 +02:00
|
|
|
func (a *IntervalAutosnap) filterFilesystems(ctx context.Context) (fss []*zfs.DatasetPath, stop bool) {
|
2017-12-26 19:46:44 +01:00
|
|
|
fss, err := zfs.ZFSListMapping(a.DatasetFilter)
|
2018-02-17 19:27:00 +01:00
|
|
|
stop = err != nil
|
2017-09-13 23:27:18 +02:00
|
|
|
if err != nil {
|
2018-08-26 16:49:40 +02:00
|
|
|
getLogger(ctx).WithError(err).Error("cannot list datasets")
|
2017-09-13 23:27:18 +02:00
|
|
|
}
|
2017-12-26 19:46:44 +01:00
|
|
|
if len(fss) == 0 {
|
2018-08-26 16:49:40 +02:00
|
|
|
getLogger(ctx).Warn("no filesystem matching filesystem filter")
|
2017-12-26 19:46:44 +01:00
|
|
|
}
|
|
|
|
return fss, stop
|
|
|
|
}
|
|
|
|
|
2018-08-26 16:49:40 +02:00
|
|
|
func (a *IntervalAutosnap) findSyncPoint(log Logger, fss []*zfs.DatasetPath) (syncPoint time.Time, err error) {
|
2017-12-26 19:46:44 +01:00
|
|
|
type snapTime struct {
|
|
|
|
ds *zfs.DatasetPath
|
|
|
|
time time.Time
|
2017-09-13 23:27:18 +02:00
|
|
|
}
|
|
|
|
|
2018-02-17 19:27:00 +01:00
|
|
|
if len(fss) == 0 {
|
|
|
|
return time.Now(), nil
|
|
|
|
}
|
|
|
|
|
2017-12-26 19:46:44 +01:00
|
|
|
snaptimes := make([]snapTime, 0, len(fss))
|
2017-09-13 23:27:18 +02:00
|
|
|
|
|
|
|
now := time.Now()
|
|
|
|
|
2018-08-26 16:49:40 +02:00
|
|
|
log.Debug("examine filesystem state")
|
2017-12-26 19:46:44 +01:00
|
|
|
for _, d := range fss {
|
2017-09-13 23:27:18 +02:00
|
|
|
|
2018-08-26 16:49:40 +02:00
|
|
|
l := log.WithField("fs", d.ToString())
|
2017-09-13 23:27:18 +02:00
|
|
|
|
2018-02-17 20:48:31 +01:00
|
|
|
fsvs, err := zfs.ZFSListFilesystemVersions(d, NewPrefixFilter(a.Prefix))
|
2017-09-13 23:27:18 +02:00
|
|
|
if err != nil {
|
2017-09-22 14:13:58 +02:00
|
|
|
l.WithError(err).Error("cannot list filesystem versions")
|
2017-09-13 23:27:18 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
if len(fsvs) <= 0 {
|
2017-09-22 14:13:58 +02:00
|
|
|
l.WithField("prefix", a.Prefix).Info("no filesystem versions with prefix")
|
2017-09-13 23:27:18 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// Sort versions by creation
|
|
|
|
sort.SliceStable(fsvs, func(i, j int) bool {
|
|
|
|
return fsvs[i].CreateTXG < fsvs[j].CreateTXG
|
|
|
|
})
|
|
|
|
|
|
|
|
latest := fsvs[len(fsvs)-1]
|
2017-09-22 14:13:58 +02:00
|
|
|
l.WithField("creation", latest.Creation).
|
|
|
|
Debug("found latest snapshot")
|
2017-09-13 23:27:18 +02:00
|
|
|
|
|
|
|
since := now.Sub(latest.Creation)
|
|
|
|
if since < 0 {
|
2017-09-22 14:13:58 +02:00
|
|
|
l.WithField("snapshot", latest.Name).
|
|
|
|
WithField("creation", latest.Creation).
|
|
|
|
Error("snapshot is from the future")
|
2017-09-13 23:27:18 +02:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
next := now
|
|
|
|
if since < a.SnapshotInterval {
|
|
|
|
next = latest.Creation.Add(a.SnapshotInterval)
|
|
|
|
}
|
2017-12-26 19:46:44 +01:00
|
|
|
snaptimes = append(snaptimes, snapTime{d, next})
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(snaptimes) == 0 {
|
|
|
|
snaptimes = append(snaptimes, snapTime{nil, now})
|
2017-09-13 23:27:18 +02:00
|
|
|
}
|
|
|
|
|
2017-12-26 19:46:44 +01:00
|
|
|
sort.Slice(snaptimes, func(i, j int) bool {
|
|
|
|
return snaptimes[i].time.Before(snaptimes[j].time)
|
2017-09-13 23:27:18 +02:00
|
|
|
})
|
|
|
|
|
2017-12-26 19:46:44 +01:00
|
|
|
return snaptimes[0].time, nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func (a *IntervalAutosnap) waitForSyncPoint(ctx context.Context, syncPoint time.Time) {
|
|
|
|
|
|
|
|
const LOG_TIME_FMT string = time.ANSIC
|
|
|
|
|
2018-08-26 16:49:40 +02:00
|
|
|
getLogger(ctx).
|
|
|
|
WithField("sync_point", syncPoint.Format(LOG_TIME_FMT)).
|
2017-09-22 14:13:58 +02:00
|
|
|
Info("wait for sync point")
|
2017-09-13 23:27:18 +02:00
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2018-08-26 16:49:40 +02:00
|
|
|
getLogger(ctx).WithError(ctx.Err()).Info("context done")
|
2017-09-13 23:27:18 +02:00
|
|
|
return
|
2017-12-26 19:46:44 +01:00
|
|
|
case <-time.After(syncPoint.Sub(time.Now())):
|
|
|
|
}
|
|
|
|
}
|
2017-09-13 23:27:18 +02:00
|
|
|
|
2017-12-26 19:46:44 +01:00
|
|
|
func (a *IntervalAutosnap) syncUpRun(ctx context.Context, didSnaps chan struct{}) (stop bool) {
|
2018-08-26 16:49:40 +02:00
|
|
|
fss, stop := a.filterFilesystems(ctx)
|
2017-12-26 19:46:44 +01:00
|
|
|
if stop {
|
|
|
|
return true
|
2017-09-13 23:27:18 +02:00
|
|
|
}
|
|
|
|
|
2018-08-26 16:49:40 +02:00
|
|
|
syncPoint, err := a.findSyncPoint(getLogger(ctx), fss)
|
2017-12-26 19:46:44 +01:00
|
|
|
if err != nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
a.waitForSyncPoint(ctx, syncPoint)
|
|
|
|
|
2018-08-26 16:49:40 +02:00
|
|
|
getLogger(ctx).Debug("snapshot all filesystems to enable further snaps in lockstep")
|
|
|
|
a.doSnapshots(ctx, didSnaps)
|
2017-12-26 19:46:44 +01:00
|
|
|
return false
|
|
|
|
}
|
2017-09-13 23:27:18 +02:00
|
|
|
|
2017-12-26 19:46:44 +01:00
|
|
|
func (a *IntervalAutosnap) Run(ctx context.Context, didSnaps chan struct{}) {
|
|
|
|
|
2018-08-26 16:49:40 +02:00
|
|
|
log := getLogger(ctx)
|
|
|
|
|
2017-12-26 19:46:44 +01:00
|
|
|
if a.syncUpRun(ctx, didSnaps) {
|
2018-08-26 16:49:40 +02:00
|
|
|
log.Error("stoppping autosnap after error in sync up")
|
2017-12-26 19:46:44 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// task drops back to idle here
|
|
|
|
|
2018-08-26 16:49:40 +02:00
|
|
|
log.Debug("setting up ticker in SnapshotInterval")
|
2017-12-26 19:46:44 +01:00
|
|
|
ticker := time.NewTicker(a.SnapshotInterval)
|
2017-09-13 23:27:18 +02:00
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
ticker.Stop()
|
2018-08-26 16:49:40 +02:00
|
|
|
log.WithError(ctx.Err()).Info("context done")
|
2017-09-13 23:27:18 +02:00
|
|
|
return
|
|
|
|
case <-ticker.C:
|
2018-08-26 16:49:40 +02:00
|
|
|
a.doSnapshots(ctx, didSnaps)
|
2017-09-13 23:27:18 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-08-26 16:49:40 +02:00
|
|
|
func (a *IntervalAutosnap) doSnapshots(ctx context.Context, didSnaps chan struct{}) {
|
|
|
|
log := getLogger(ctx)
|
2017-12-26 19:46:44 +01:00
|
|
|
|
|
|
|
// don't cache the result from previous run in case the user added
|
|
|
|
// a new dataset in the meantime
|
2018-08-26 16:49:40 +02:00
|
|
|
ds, stop := a.filterFilesystems(ctx)
|
2017-12-26 19:46:44 +01:00
|
|
|
if stop {
|
2017-09-13 23:27:18 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO channel programs -> allow a little jitter?
|
|
|
|
for _, d := range ds {
|
|
|
|
suffix := time.Now().In(time.UTC).Format("20060102_150405_000")
|
|
|
|
snapname := fmt.Sprintf("%s%s", a.Prefix, suffix)
|
|
|
|
|
2018-08-26 16:49:40 +02:00
|
|
|
l := log.
|
2018-08-25 22:15:37 +02:00
|
|
|
WithField("fs", d.ToString()).
|
2017-11-12 23:05:18 +01:00
|
|
|
WithField("snapname", snapname)
|
2017-09-22 14:13:58 +02:00
|
|
|
|
2017-11-12 23:05:18 +01:00
|
|
|
l.Info("create snapshot")
|
2017-09-13 23:27:18 +02:00
|
|
|
err := zfs.ZFSSnapshot(d, snapname, false)
|
|
|
|
if err != nil {
|
2018-08-26 16:49:40 +02:00
|
|
|
l.WithError(err).Error("cannot create snapshot")
|
2017-09-13 23:27:18 +02:00
|
|
|
}
|
2017-11-12 23:05:18 +01:00
|
|
|
|
|
|
|
l.Info("create corresponding bookmark")
|
|
|
|
err = zfs.ZFSBookmark(d, snapname, snapname)
|
|
|
|
if err != nil {
|
2018-08-26 16:49:40 +02:00
|
|
|
l.WithError(err).Error("cannot create bookmark")
|
2017-11-12 23:05:18 +01:00
|
|
|
}
|
|
|
|
|
2017-09-13 23:27:18 +02:00
|
|
|
}
|
|
|
|
|
2017-09-16 21:12:26 +02:00
|
|
|
select {
|
|
|
|
case didSnaps <- struct{}{}:
|
|
|
|
default:
|
2018-08-26 16:49:40 +02:00
|
|
|
log.Error("warning: callback channel is full, discarding")
|
2017-09-16 21:12:26 +02:00
|
|
|
}
|
|
|
|
|
2017-09-13 23:27:18 +02:00
|
|
|
}
|