implement automatic snapshotting feature

This commit is contained in:
Christian Schwarz 2017-07-01 20:28:46 +02:00
parent 8c8a6ee905
commit 655b3ab55f
5 changed files with 211 additions and 6 deletions

42
cmd/autosnap.go Normal file
View File

@ -0,0 +1,42 @@
package main
import (
"fmt"
"github.com/zrepl/zrepl/zfs"
"time"
)
type AutosnapContext struct {
Autosnap Autosnap
}
func doAutosnap(ctx AutosnapContext, log Logger) (err error) {
snap := ctx.Autosnap
filesystems, err := zfs.ZFSListMapping(snap.DatasetFilter)
if err != nil {
return fmt.Errorf("cannot filter datasets: %s", err)
}
suffix := time.Now().In(time.UTC).Format("20060102_150405_000")
snapname := fmt.Sprintf("%s%s", snap.Prefix, suffix)
hadError := false
for _, fs := range filesystems { // optimization: use recursive snapshots / channel programs here
log.Printf("snapshotting filesystem %s@%s", fs, snapname)
err := zfs.ZFSSnapshot(fs, snapname, false)
if err != nil {
log.Printf("error snapshotting %s: %s", fs, err)
hadError = true
}
}
if hadError {
err = fmt.Errorf("errors occurred during autosnap, check logs for details")
}
return
}

View File

@ -67,13 +67,21 @@ type Prune struct {
RetentionPolicy *RetentionGrid // TODO abstract interface to support future policies?
}
type Autosnap struct {
Name string
Prefix string
Interval jobrun.RepeatStrategy
DatasetFilter zfs.DatasetMapping
}
type Config struct {
Pools []Pool
Pushs []Push
Pulls []Pull
Sinks []ClientMapping
PullACLs []ClientMapping
Prunes []Prune
Pools []Pool
Pushs []Push
Pulls []Pull
Sinks []ClientMapping
PullACLs []ClientMapping
Prunes []Prune
Autosnaps []Autosnap
}
func ParseConfig(path string) (config Config, err error) {
@ -122,6 +130,9 @@ func parseMain(root map[string]interface{}) (c Config, err error) {
if c.Prunes, err = parsePrunes(root["prune"]); err != nil {
return
}
if c.Autosnaps, err = parseAutosnaps(root["autosnap"]); err != nil {
return
}
return
}
@ -624,3 +635,63 @@ func parseSnapshotFilter(fm map[string]string) (snapFilter zfs.FilesystemVersion
snapFilter = prefixSnapshotFilter{prefix}
return
}
func parseAutosnaps(m interface{}) (snaps []Autosnap, err error) {
asList := make([]map[string]interface{}, 0)
if err = mapstructure.Decode(m, &asList); err != nil {
return
}
snaps = make([]Autosnap, len(asList))
for i, e := range asList {
if snaps[i], err = parseAutosnap(e); err != nil {
err = fmt.Errorf("cannot parse autonsap job #%d: %s", i+1, err)
return
}
}
return
}
func parseAutosnap(m map[string]interface{}) (a Autosnap, err error) {
var i struct {
Name string
Prefix string
Interval string
DatasetFilter map[string]string `mapstructure:"dataset_filter"`
}
if err = mapstructure.Decode(m, &i); err != nil {
err = fmt.Errorf("structure unfit: %s", err)
return
}
a.Name = i.Name
if len(i.Prefix) < 1 {
err = fmt.Errorf("prefix must not be empty")
return
}
a.Prefix = i.Prefix
var interval time.Duration
if interval, err = time.ParseDuration(i.Interval); err != nil {
err = fmt.Errorf("cannot parse interval: %s", err)
return
}
a.Interval = &jobrun.PeriodicRepeatStrategy{interval}
if len(i.DatasetFilter) == 0 {
err = fmt.Errorf("dataset_filter not specified")
return
}
if a.DatasetFilter, err = parseComboMapping(i.DatasetFilter); err != nil {
err = fmt.Errorf("cannot parse dataset filter: %s", err)
}
return
}

View File

@ -119,6 +119,13 @@ func main() {
cli.BoolFlag{Name: "n", Usage: "simulation (dry run)"},
},
},
{
Name: "autosnap",
Action: cmdAutosnap,
Flags: []cli.Flag{
cli.StringFlag{Name: "job"},
},
},
}
app.Run(os.Args)
@ -306,6 +313,7 @@ func cmdPrune(c *cli.Context) error {
log.Printf("Prune job failed with error: %s", err)
}
log.Printf("\n")
}
}
@ -313,6 +321,45 @@ func cmdPrune(c *cli.Context) error {
if jobFailed {
return cli.NewExitError("At least one job failed with an error. Check log for details.", 1)
}
return nil
}
func cmdAutosnap(c *cli.Context) error {
log := defaultLog
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
runner.Start()
}()
log.Printf("autosnap...")
for i := range conf.Autosnaps {
snap := conf.Autosnaps[i]
if !c.IsSet("job") || (c.IsSet("job") && c.String("job") == snap.Name) {
job := jobrun.Job{
Name: fmt.Sprintf("autosnap.%s", snap.Name),
RepeatStrategy: snap.Interval,
RunFunc: func(log jobrun.Logger) error {
log.Printf("doing autosnap: %v", snap)
ctx := AutosnapContext{snap}
return doAutosnap(ctx, log)
},
}
runner.AddJob(job)
}
}
wg.Wait()
return nil
}

View File

@ -97,3 +97,25 @@ prune:
snapshot_filter: {
prefix: zrepl_
}
- name: hfbak_prune # cleans up after hfbak autosnap job
policy: grid
grid: 1x1min(keep=all)
dataset_filter: {
"pool1*": ok
}
snapshot_filter: {
prefix: zrepl_hfbak_
}
autosnap:
- name: hfbak
prefix: zrepl_hfbak_
interval: 1s
dataset_filter: {
"pool1*": ok
}
# prune: hfbak_prune
# future versions may inline the retention policy here, but for now,
# pruning has to be triggered manually (it's safe to run autosnap + prune in parallel)

View File

@ -204,3 +204,26 @@ func ZFSDestroy(dataset string) (err error) {
return
}
func ZFSSnapshot(fs DatasetPath, name string, recursive bool) (err error) {
snapname := fmt.Sprintf("%s@%s", fs.ToString(), name)
cmd := exec.Command(ZFS_BINARY, "snapshot", snapname)
stderr := bytes.NewBuffer(make([]byte, 0, 1024))
cmd.Stderr = stderr
if err = cmd.Start(); err != nil {
return err
}
if err = cmd.Wait(); err != nil {
err = ZFSError{
Stderr: stderr.Bytes(),
WaitErr: err,
}
}
return
}