2017-09-10 16:13:05 +02:00
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"time"
|
|
|
|
|
2017-09-13 23:27:18 +02:00
|
|
|
"context"
|
2017-09-10 16:13:05 +02:00
|
|
|
"github.com/mitchellh/mapstructure"
|
|
|
|
"github.com/pkg/errors"
|
2018-08-25 21:30:25 +02:00
|
|
|
"github.com/zrepl/zrepl/cmd/endpoint"
|
|
|
|
"github.com/zrepl/zrepl/replication"
|
2017-09-16 21:12:26 +02:00
|
|
|
"github.com/zrepl/zrepl/zfs"
|
|
|
|
"sync"
|
2017-09-10 16:13:05 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
type LocalJob struct {
|
2018-08-25 22:30:44 +02:00
|
|
|
Name string
|
|
|
|
Mapping *DatasetMapFilter
|
|
|
|
SnapshotPrefix string
|
|
|
|
Interval time.Duration
|
|
|
|
PruneLHS PrunePolicy
|
|
|
|
PruneRHS PrunePolicy
|
|
|
|
Debug JobDebugSettings
|
2017-09-10 16:13:05 +02:00
|
|
|
}
|
|
|
|
|
2017-09-17 18:20:05 +02:00
|
|
|
func parseLocalJob(c JobParsingContext, name string, i map[string]interface{}) (j *LocalJob, err error) {
|
2017-09-10 16:13:05 +02:00
|
|
|
|
|
|
|
var asMap struct {
|
2017-09-11 15:45:10 +02:00
|
|
|
Mapping map[string]string
|
|
|
|
SnapshotPrefix string `mapstructure:"snapshot_prefix"`
|
|
|
|
Interval string
|
|
|
|
InitialReplPolicy string `mapstructure:"initial_repl_policy"`
|
|
|
|
PruneLHS map[string]interface{} `mapstructure:"prune_lhs"`
|
|
|
|
PruneRHS map[string]interface{} `mapstructure:"prune_rhs"`
|
|
|
|
Debug map[string]interface{}
|
2017-09-10 16:13:05 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if err = mapstructure.Decode(i, &asMap); err != nil {
|
|
|
|
err = errors.Wrap(err, "mapstructure error")
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
j = &LocalJob{Name: name}
|
|
|
|
|
|
|
|
if j.Mapping, err = parseDatasetMapFilter(asMap.Mapping, false); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-09-16 20:24:46 +02:00
|
|
|
if j.SnapshotPrefix, err = parseSnapshotPrefix(asMap.SnapshotPrefix); err != nil {
|
2017-09-10 16:13:05 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-10-05 15:12:50 +02:00
|
|
|
if j.Interval, err = parsePostitiveDuration(asMap.Interval); err != nil {
|
2017-09-10 16:13:05 +02:00
|
|
|
err = errors.Wrap(err, "cannot parse interval")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-02-17 20:48:31 +01:00
|
|
|
if j.PruneLHS, err = parsePrunePolicy(asMap.PruneLHS, true); err != nil {
|
2017-09-10 16:13:05 +02:00
|
|
|
err = errors.Wrap(err, "cannot parse 'prune_lhs'")
|
|
|
|
return
|
|
|
|
}
|
2018-02-17 20:48:31 +01:00
|
|
|
if j.PruneRHS, err = parsePrunePolicy(asMap.PruneRHS, false); err != nil {
|
2017-09-10 16:13:05 +02:00
|
|
|
err = errors.Wrap(err, "cannot parse 'prune_rhs'")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-09-11 15:45:10 +02:00
|
|
|
if err = mapstructure.Decode(asMap.Debug, &j.Debug); err != nil {
|
|
|
|
err = errors.Wrap(err, "cannot parse 'debug'")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-09-10 16:13:05 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func (j *LocalJob) JobName() string {
|
|
|
|
return j.Name
|
|
|
|
}
|
|
|
|
|
2018-04-05 22:22:55 +02:00
|
|
|
func (j *LocalJob) JobType() JobType { return JobTypeLocal }
|
|
|
|
|
2017-09-13 23:27:18 +02:00
|
|
|
func (j *LocalJob) JobStart(ctx context.Context) {
|
|
|
|
|
2018-08-26 16:49:40 +02:00
|
|
|
log := getLogger(ctx)
|
2017-12-26 19:36:27 +01:00
|
|
|
|
2017-09-16 20:24:46 +02:00
|
|
|
// Allow access to any dataset since we control what mapping
|
|
|
|
// is passed to the pull routine.
|
|
|
|
// All local datasets will be passed to its Map() function,
|
|
|
|
// but only those for which a mapping exists will actually be pulled.
|
|
|
|
// We can pay this small performance penalty for now.
|
2018-06-20 20:20:37 +02:00
|
|
|
wildcardMapFilter := NewDatasetMapFilter(1, false)
|
|
|
|
wildcardMapFilter.Add("<", "<")
|
2018-08-26 14:51:20 +02:00
|
|
|
sender := endpoint.NewSender(wildcardMapFilter, NewPrefixFilter(j.SnapshotPrefix))
|
2017-09-16 20:24:46 +02:00
|
|
|
|
2018-08-22 00:52:46 +02:00
|
|
|
receiver, err := endpoint.NewReceiver(j.Mapping, NewPrefixFilter(j.SnapshotPrefix))
|
2018-06-20 20:20:37 +02:00
|
|
|
if err != nil {
|
2018-08-26 16:49:40 +02:00
|
|
|
log.WithError(err).Error("unexpected error setting up local handler")
|
2018-06-20 20:20:37 +02:00
|
|
|
}
|
2017-09-10 16:13:05 +02:00
|
|
|
|
2017-09-16 21:12:26 +02:00
|
|
|
snapper := IntervalAutosnap{
|
|
|
|
DatasetFilter: j.Mapping.AsFilter(),
|
|
|
|
Prefix: j.SnapshotPrefix,
|
|
|
|
SnapshotInterval: j.Interval,
|
|
|
|
}
|
|
|
|
|
2018-08-26 16:49:40 +02:00
|
|
|
plhs, err := j.Pruner(PrunePolicySideLeft, false)
|
2017-09-16 21:12:26 +02:00
|
|
|
if err != nil {
|
2018-08-26 16:49:40 +02:00
|
|
|
log.WithError(err).Error("error creating lhs pruner")
|
2017-09-16 21:12:26 +02:00
|
|
|
return
|
|
|
|
}
|
2018-08-26 16:49:40 +02:00
|
|
|
prhs, err := j.Pruner(PrunePolicySideRight, false)
|
2017-09-13 23:27:18 +02:00
|
|
|
if err != nil {
|
2018-08-26 16:49:40 +02:00
|
|
|
log.WithError(err).Error("error creating rhs pruner")
|
2017-09-16 21:12:26 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
didSnaps := make(chan struct{})
|
2018-08-26 16:49:40 +02:00
|
|
|
go snapper.Run(WithLogger(ctx, log.WithField(logSubsysField, "snap")), didSnaps)
|
2017-09-16 21:12:26 +02:00
|
|
|
|
|
|
|
outer:
|
|
|
|
for {
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
2018-08-26 16:49:40 +02:00
|
|
|
log.WithError(ctx.Err()).Info("context")
|
2017-09-16 21:12:26 +02:00
|
|
|
break outer
|
|
|
|
case <-didSnaps:
|
2018-08-26 16:49:40 +02:00
|
|
|
log.Debug("finished taking snapshots")
|
|
|
|
log.Info("starting replication procedure")
|
2017-09-16 21:12:26 +02:00
|
|
|
}
|
|
|
|
|
2018-08-26 16:49:40 +02:00
|
|
|
{
|
|
|
|
ctx := WithLogger(ctx, log.WithField(logSubsysField, "replication"))
|
|
|
|
rep := replication.NewReplication()
|
|
|
|
rep.Drive(ctx, sender, receiver)
|
2017-09-16 21:12:26 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
2018-08-26 16:49:40 +02:00
|
|
|
plhs.Run(WithLogger(ctx, log.WithField(logSubsysField, "prune_lhs")))
|
2017-09-16 21:12:26 +02:00
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
2018-08-26 16:49:40 +02:00
|
|
|
prhs.Run(WithLogger(ctx, log.WithField(logSubsysField, "prune_rhs")))
|
2017-09-16 21:12:26 +02:00
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
|
|
|
|
wg.Wait()
|
2017-09-13 23:27:18 +02:00
|
|
|
}
|
2017-09-16 21:12:26 +02:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2018-08-26 16:49:40 +02:00
|
|
|
func (j *LocalJob) Pruner(side PrunePolicySide, dryRun bool) (p Pruner, err error) {
|
2017-09-16 21:12:26 +02:00
|
|
|
|
|
|
|
var dsfilter zfs.DatasetFilter
|
|
|
|
var pp PrunePolicy
|
|
|
|
switch side {
|
|
|
|
case PrunePolicySideLeft:
|
|
|
|
pp = j.PruneLHS
|
|
|
|
dsfilter = j.Mapping.AsFilter()
|
|
|
|
case PrunePolicySideRight:
|
|
|
|
pp = j.PruneRHS
|
|
|
|
dsfilter, err = j.Mapping.InvertedFilter()
|
|
|
|
if err != nil {
|
|
|
|
err = errors.Wrap(err, "cannot invert mapping for prune_rhs")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
err = errors.Errorf("must be either left or right side")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
p = Pruner{
|
|
|
|
time.Now(),
|
|
|
|
dryRun,
|
|
|
|
dsfilter,
|
|
|
|
j.SnapshotPrefix,
|
|
|
|
pp,
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
2017-09-10 16:13:05 +02:00
|
|
|
}
|