2017-07-06 13:03:44 +02:00
|
|
|
package cmd
|
2017-05-20 17:08:18 +02:00
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"io"
|
2017-07-06 15:36:53 +02:00
|
|
|
"os"
|
2017-07-06 13:03:44 +02:00
|
|
|
"sync"
|
2017-05-20 17:08:18 +02:00
|
|
|
"time"
|
2017-07-30 09:14:37 +02:00
|
|
|
|
|
|
|
"github.com/spf13/cobra"
|
|
|
|
"github.com/zrepl/zrepl/jobrun"
|
|
|
|
"github.com/zrepl/zrepl/rpc"
|
|
|
|
"github.com/zrepl/zrepl/util"
|
|
|
|
"github.com/zrepl/zrepl/zfs"
|
2017-05-20 17:08:18 +02:00
|
|
|
)
|
|
|
|
|
2017-07-06 13:03:44 +02:00
|
|
|
var runArgs struct {
|
|
|
|
job string
|
|
|
|
once bool
|
|
|
|
}
|
|
|
|
|
|
|
|
var RunCmd = &cobra.Command{
|
|
|
|
Use: "run",
|
|
|
|
Short: "run push & pull replication",
|
|
|
|
Run: cmdRun,
|
|
|
|
}
|
|
|
|
|
2017-07-06 15:36:53 +02:00
|
|
|
var PushCmd = &cobra.Command{
|
|
|
|
Use: "push",
|
|
|
|
Short: "run push job (first positional argument)",
|
|
|
|
Run: cmdPush,
|
|
|
|
}
|
|
|
|
|
|
|
|
var PullCmd = &cobra.Command{
|
|
|
|
Use: "pull",
|
|
|
|
Short: "run pull job (first positional argument)",
|
|
|
|
Run: cmdPull,
|
|
|
|
}
|
|
|
|
|
2017-07-06 13:03:44 +02:00
|
|
|
func init() {
|
|
|
|
RootCmd.AddCommand(RunCmd)
|
|
|
|
RunCmd.Flags().BoolVar(&runArgs.once, "once", false, "run jobs only once, regardless of configured repeat behavior")
|
|
|
|
RunCmd.Flags().StringVar(&runArgs.job, "job", "", "run only the given job")
|
2017-07-06 15:36:53 +02:00
|
|
|
|
|
|
|
RootCmd.AddCommand(PushCmd)
|
|
|
|
RootCmd.AddCommand(PullCmd)
|
|
|
|
}
|
|
|
|
|
|
|
|
func cmdPush(cmd *cobra.Command, args []string) {
|
|
|
|
|
|
|
|
if len(args) != 1 {
|
|
|
|
log.Printf("must specify exactly one job as positional argument")
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
job, ok := conf.Pushs[args[0]]
|
|
|
|
if !ok {
|
|
|
|
log.Printf("could not find push job %s", args[0])
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
if err := jobPush(job, log); err != nil {
|
|
|
|
log.Printf("error doing push: %s", err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func cmdPull(cmd *cobra.Command, args []string) {
|
|
|
|
|
|
|
|
if len(args) != 1 {
|
|
|
|
log.Printf("must specify exactly one job as positional argument")
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
job, ok := conf.Pulls[args[0]]
|
|
|
|
if !ok {
|
|
|
|
log.Printf("could not find pull job %s", args[0])
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
if err := jobPull(job, log); err != nil {
|
|
|
|
log.Printf("error doing pull: %s", err)
|
|
|
|
os.Exit(1)
|
|
|
|
}
|
|
|
|
|
2017-07-06 13:03:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func cmdRun(cmd *cobra.Command, args []string) {
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
runner.Start()
|
|
|
|
}()
|
|
|
|
|
|
|
|
jobs := make([]jobrun.Job, len(conf.Pulls)+len(conf.Pushs))
|
|
|
|
i := 0
|
|
|
|
for _, pull := range conf.Pulls {
|
|
|
|
jobs[i] = jobrun.Job{
|
|
|
|
Name: fmt.Sprintf("pull.%d", i),
|
|
|
|
RepeatStrategy: pull.RepeatStrategy,
|
|
|
|
RunFunc: func(log jobrun.Logger) error {
|
|
|
|
log.Printf("doing pull: %v", pull)
|
|
|
|
return jobPull(pull, log)
|
|
|
|
},
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
for _, push := range conf.Pushs {
|
|
|
|
jobs[i] = jobrun.Job{
|
|
|
|
Name: fmt.Sprintf("push.%d", i),
|
|
|
|
RepeatStrategy: push.RepeatStrategy,
|
|
|
|
RunFunc: func(log jobrun.Logger) error {
|
|
|
|
log.Printf("doing push: %v", push)
|
|
|
|
return jobPush(push, log)
|
|
|
|
},
|
|
|
|
}
|
|
|
|
i++
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, j := range jobs {
|
|
|
|
if runArgs.once {
|
|
|
|
j.RepeatStrategy = jobrun.NoRepeatStrategy{}
|
|
|
|
}
|
|
|
|
if runArgs.job != "" {
|
|
|
|
if runArgs.job == j.Name {
|
|
|
|
runner.AddJob(j)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
runner.AddJob(j)
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case job := <-runner.NotificationChan():
|
|
|
|
log.Printf("job %s reported error: %v\n", job.Name, job.LastError)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-08-05 21:15:37 +02:00
|
|
|
type localPullACL struct{}
|
|
|
|
|
|
|
|
func (a localPullACL) Filter(p zfs.DatasetPath) (pass bool, err error) {
|
|
|
|
return true, nil
|
|
|
|
}
|
|
|
|
|
2017-07-06 15:36:53 +02:00
|
|
|
func jobPull(pull *Pull, log jobrun.Logger) (err error) {
|
2017-07-06 13:03:44 +02:00
|
|
|
|
|
|
|
if lt, ok := pull.From.Transport.(LocalTransport); ok {
|
2017-08-05 21:15:37 +02:00
|
|
|
|
2017-07-06 13:03:44 +02:00
|
|
|
lt.SetHandler(Handler{
|
2017-08-05 21:15:37 +02:00
|
|
|
Logger: log,
|
|
|
|
// Allow access to any dataset since we control what mapping
|
|
|
|
// is passed to the pull routine.
|
|
|
|
// All local datasets will be passed to its Map() function,
|
|
|
|
// but only those for which a mapping exists will actually be pulled.
|
|
|
|
// We can pay this small performance penalty for now.
|
|
|
|
PullACL: localPullACL{},
|
2017-07-06 13:03:44 +02:00
|
|
|
})
|
|
|
|
pull.From.Transport = lt
|
|
|
|
log.Printf("fixing up local transport: %#v", pull.From.Transport)
|
|
|
|
}
|
|
|
|
|
|
|
|
var remote rpc.RPCRequester
|
|
|
|
|
|
|
|
if remote, err = pull.From.Transport.Connect(log); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
defer closeRPCWithTimeout(log, remote, time.Second*10, "")
|
|
|
|
|
|
|
|
return doPull(PullContext{remote, log, pull.Mapping, pull.InitialReplPolicy})
|
|
|
|
}
|
|
|
|
|
2017-07-06 15:36:53 +02:00
|
|
|
func jobPush(push *Push, log jobrun.Logger) (err error) {
|
2017-07-06 13:03:44 +02:00
|
|
|
|
|
|
|
if _, ok := push.To.Transport.(LocalTransport); ok {
|
|
|
|
panic("no support for local pushs")
|
|
|
|
}
|
|
|
|
|
|
|
|
var remote rpc.RPCRequester
|
|
|
|
if remote, err = push.To.Transport.Connect(log); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
defer closeRPCWithTimeout(log, remote, time.Second*10, "")
|
|
|
|
|
|
|
|
log.Printf("building handler for PullMeRequest")
|
|
|
|
handler := Handler{
|
|
|
|
Logger: log,
|
|
|
|
PullACL: push.Filter,
|
|
|
|
SinkMappingFunc: nil, // no need for that in the handler for PullMe
|
|
|
|
}
|
|
|
|
log.Printf("handler: %#v", handler)
|
|
|
|
|
|
|
|
r := rpc.PullMeRequest{
|
|
|
|
InitialReplPolicy: push.InitialReplPolicy,
|
|
|
|
}
|
|
|
|
log.Printf("doing PullMeRequest: %#v", r)
|
|
|
|
|
|
|
|
if err = remote.PullMeRequest(r, handler); err != nil {
|
|
|
|
log.Printf("PullMeRequest failed: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
log.Printf("push job finished")
|
|
|
|
return
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-05-20 17:08:18 +02:00
|
|
|
func closeRPCWithTimeout(log Logger, remote rpc.RPCRequester, timeout time.Duration, goodbye string) {
|
|
|
|
log.Printf("closing rpc connection")
|
|
|
|
|
|
|
|
ch := make(chan error)
|
|
|
|
go func() {
|
|
|
|
ch <- remote.CloseRequest(rpc.CloseRequest{goodbye})
|
|
|
|
}()
|
|
|
|
|
|
|
|
var err error
|
|
|
|
select {
|
|
|
|
case <-time.After(timeout):
|
|
|
|
err = fmt.Errorf("timeout exceeded (%s)", timeout)
|
|
|
|
case closeRequestErr := <-ch:
|
|
|
|
err = closeRequestErr
|
|
|
|
}
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error closing connection: %s", err)
|
|
|
|
err = remote.ForceClose()
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("error force-closing connection: %s", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
type PullContext struct {
|
|
|
|
Remote rpc.RPCRequester
|
|
|
|
Log Logger
|
2017-08-05 21:15:37 +02:00
|
|
|
Mapping DatasetMapping
|
2017-05-20 17:08:18 +02:00
|
|
|
InitialReplPolicy rpc.InitialReplPolicy
|
|
|
|
}
|
|
|
|
|
|
|
|
func doPull(pull PullContext) (err error) {
|
|
|
|
|
|
|
|
remote := pull.Remote
|
|
|
|
log := pull.Log
|
|
|
|
|
2017-05-20 17:34:24 +02:00
|
|
|
fsr := rpc.FilesystemRequest{}
|
2017-05-20 17:08:18 +02:00
|
|
|
var remoteFilesystems []zfs.DatasetPath
|
|
|
|
if remoteFilesystems, err = remote.FilesystemRequest(fsr); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-07-30 14:56:16 +02:00
|
|
|
// build mapping (local->RemoteLocalMapping) + traversal datastructure
|
2017-05-20 17:08:18 +02:00
|
|
|
type RemoteLocalMapping struct {
|
2017-07-30 14:56:16 +02:00
|
|
|
Remote zfs.DatasetPath
|
|
|
|
Local zfs.DatasetPath
|
2017-05-20 17:08:18 +02:00
|
|
|
}
|
|
|
|
replMapping := make(map[string]RemoteLocalMapping, len(remoteFilesystems))
|
|
|
|
localTraversal := zfs.NewDatasetPathForest()
|
|
|
|
{
|
|
|
|
|
|
|
|
log.Printf("mapping using %#v\n", pull.Mapping)
|
|
|
|
for fs := range remoteFilesystems {
|
|
|
|
var err error
|
|
|
|
var localFs zfs.DatasetPath
|
|
|
|
localFs, err = pull.Mapping.Map(remoteFilesystems[fs])
|
|
|
|
if err != nil {
|
2017-08-05 21:15:37 +02:00
|
|
|
if err != NoMatchError {
|
2017-05-20 17:08:18 +02:00
|
|
|
log.Printf("error mapping %s: %#v\n", remoteFilesystems[fs], err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
2017-07-30 14:56:16 +02:00
|
|
|
m := RemoteLocalMapping{remoteFilesystems[fs], localFs}
|
2017-05-20 17:08:18 +02:00
|
|
|
replMapping[m.Local.ToString()] = m
|
|
|
|
localTraversal.Add(m.Local)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-07-30 14:56:16 +02:00
|
|
|
// get info about local filesystems
|
|
|
|
localFilesystemState, err := zfs.ZFSListFilesystemState()
|
|
|
|
if err != nil {
|
|
|
|
log.Printf("cannot get local filesystems map: %s", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2017-05-20 17:08:18 +02:00
|
|
|
log.Printf("remoteFilesystems: %#v\nreplMapping: %#v\n", remoteFilesystems, replMapping)
|
|
|
|
|
|
|
|
// per fs sync, assume sorted in top-down order TODO
|
|
|
|
|
|
|
|
localTraversal.WalkTopDown(func(v zfs.DatasetPathVisit) bool {
|
|
|
|
|
|
|
|
if v.FilledIn {
|
2017-07-30 14:56:16 +02:00
|
|
|
if _, exists := localFilesystemState[v.Path.ToString()]; exists {
|
|
|
|
// No need to verify if this is a placeholder or not. It is sufficient
|
|
|
|
// to know we can add child filesystems to it
|
2017-05-20 17:08:18 +02:00
|
|
|
return true
|
|
|
|
}
|
2017-07-30 14:56:16 +02:00
|
|
|
log.Printf("creating placeholder filesystem %s", v.Path)
|
|
|
|
err = zfs.ZFSCreatePlaceholderFilesystem(v.Path)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("aborting, cannot create placeholder filesystem %s: %s", v.Path, err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return true
|
2017-05-20 17:08:18 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
m, ok := replMapping[v.Path.ToString()]
|
|
|
|
if !ok {
|
|
|
|
panic("internal inconsistency: replMapping should contain mapping for any path that was not filled in by WalkTopDown()")
|
|
|
|
}
|
|
|
|
|
|
|
|
log := func(format string, args ...interface{}) {
|
|
|
|
log.Printf("[%s => %s]: %s", m.Remote.ToString(), m.Local.ToString(), fmt.Sprintf(format, args...))
|
|
|
|
}
|
|
|
|
|
|
|
|
log("mapping: %#v\n", m)
|
|
|
|
|
2017-07-30 14:56:16 +02:00
|
|
|
localState, localExists := localFilesystemState[m.Local.ToString()]
|
|
|
|
|
2017-05-20 17:08:18 +02:00
|
|
|
var versions []zfs.FilesystemVersion
|
2017-07-30 14:56:16 +02:00
|
|
|
switch {
|
|
|
|
case !localExists:
|
|
|
|
log("local filesystem does not exist")
|
|
|
|
case localState.Placeholder:
|
|
|
|
log("local filesystem is marked as placeholder")
|
|
|
|
default:
|
|
|
|
log("local filesystem exists, retrieving versions for diff")
|
2017-06-22 21:49:14 +02:00
|
|
|
if versions, err = zfs.ZFSListFilesystemVersions(m.Local, nil); err != nil {
|
2017-05-20 17:08:18 +02:00
|
|
|
log("cannot get filesystem versions, stopping...: %v\n", m.Local.ToString(), m, err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var theirVersions []zfs.FilesystemVersion
|
|
|
|
theirVersions, err = remote.FilesystemVersionsRequest(rpc.FilesystemVersionsRequest{
|
|
|
|
Filesystem: m.Remote,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
log("cannot fetch remote filesystem versions, stopping: %s", err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
diff := zfs.MakeFilesystemDiff(versions, theirVersions)
|
|
|
|
log("diff: %#v\n", diff)
|
|
|
|
|
2017-07-30 14:56:16 +02:00
|
|
|
if localState.Placeholder && diff.Conflict != zfs.ConflictAllRight {
|
|
|
|
panic("internal inconsistency: local placeholder implies ConflictAllRight")
|
|
|
|
}
|
|
|
|
|
2017-07-08 13:13:16 +02:00
|
|
|
switch diff.Conflict {
|
|
|
|
case zfs.ConflictAllRight:
|
|
|
|
|
2017-05-20 17:08:18 +02:00
|
|
|
log("performing initial sync, following policy: %#v", pull.InitialReplPolicy)
|
|
|
|
|
|
|
|
if pull.InitialReplPolicy != rpc.InitialReplPolicyMostRecent {
|
|
|
|
panic(fmt.Sprintf("policy %#v not implemented", pull.InitialReplPolicy))
|
|
|
|
}
|
|
|
|
|
|
|
|
snapsOnly := make([]zfs.FilesystemVersion, 0, len(diff.MRCAPathRight))
|
|
|
|
for s := range diff.MRCAPathRight {
|
|
|
|
if diff.MRCAPathRight[s].Type == zfs.Snapshot {
|
|
|
|
snapsOnly = append(snapsOnly, diff.MRCAPathRight[s])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(snapsOnly) < 1 {
|
|
|
|
log("cannot perform initial sync: no remote snapshots. stopping...")
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
r := rpc.InitialTransferRequest{
|
|
|
|
Filesystem: m.Remote,
|
|
|
|
FilesystemVersion: snapsOnly[len(snapsOnly)-1],
|
|
|
|
}
|
|
|
|
|
|
|
|
log("requesting initial transfer")
|
|
|
|
|
|
|
|
var stream io.Reader
|
|
|
|
if stream, err = remote.InitialTransferRequest(r); err != nil {
|
|
|
|
log("error initial transfer request, stopping...: %s", err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
log("received initial transfer request response. zfs recv...")
|
|
|
|
|
2017-07-30 09:14:37 +02:00
|
|
|
watcher := util.IOProgressWatcher{Reader: stream}
|
|
|
|
watcher.KickOff(1*time.Second, func(p util.IOProgress) {
|
|
|
|
log("progress on receive operation: %v bytes received", p.TotalRX)
|
|
|
|
})
|
|
|
|
|
2017-07-30 14:56:16 +02:00
|
|
|
recvArgs := []string{"-u"}
|
|
|
|
if localState.Placeholder {
|
|
|
|
log("receive with forced rollback to replace placeholder filesystem")
|
|
|
|
recvArgs = append(recvArgs, "-F")
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = zfs.ZFSRecv(m.Local, &watcher, recvArgs...); err != nil {
|
2017-05-20 17:08:18 +02:00
|
|
|
log("error receiving stream, stopping...: %s", err)
|
|
|
|
return false
|
|
|
|
}
|
2017-07-30 09:14:37 +02:00
|
|
|
log("received stream, %v bytes total", watcher.Progress().TotalRX)
|
2017-05-20 17:08:18 +02:00
|
|
|
|
|
|
|
log("configuring properties of received filesystem")
|
|
|
|
|
|
|
|
if err = zfs.ZFSSet(m.Local, "readonly", "on"); err != nil {
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
log("finished initial transfer")
|
2017-07-08 13:13:16 +02:00
|
|
|
return true
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2017-07-08 13:13:16 +02:00
|
|
|
case zfs.ConflictIncremental:
|
|
|
|
|
|
|
|
if len(diff.IncrementalPath) < 2 {
|
|
|
|
log("remote and local are in sync")
|
|
|
|
return true
|
|
|
|
}
|
2017-05-20 17:08:18 +02:00
|
|
|
|
|
|
|
log("incremental transfers using path: %#v", diff.IncrementalPath)
|
2017-07-30 09:14:37 +02:00
|
|
|
var pathRx uint64
|
2017-05-20 17:08:18 +02:00
|
|
|
|
|
|
|
for i := 0; i < len(diff.IncrementalPath)-1; i++ {
|
|
|
|
|
|
|
|
from, to := diff.IncrementalPath[i], diff.IncrementalPath[i+1]
|
|
|
|
|
|
|
|
log := func(format string, args ...interface{}) {
|
|
|
|
log("[%s => %s]: %s", from.Name, to.Name, fmt.Sprintf(format, args...))
|
|
|
|
}
|
|
|
|
|
|
|
|
r := rpc.IncrementalTransferRequest{
|
|
|
|
Filesystem: m.Remote,
|
|
|
|
From: from,
|
|
|
|
To: to,
|
|
|
|
}
|
|
|
|
log("requesting incremental transfer: %#v", r)
|
|
|
|
|
|
|
|
var stream io.Reader
|
|
|
|
if stream, err = remote.IncrementalTransferRequest(r); err != nil {
|
|
|
|
log("error requesting incremental transfer, stopping...: %s", err.Error())
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
log("receving incremental transfer")
|
|
|
|
|
2017-07-30 09:14:37 +02:00
|
|
|
watcher := util.IOProgressWatcher{Reader: stream}
|
|
|
|
watcher.KickOff(1*time.Second, func(p util.IOProgress) {
|
|
|
|
log("progress on receive operation: %v bytes received", p.TotalRX)
|
|
|
|
})
|
|
|
|
|
|
|
|
if err = zfs.ZFSRecv(m.Local, &watcher); err != nil {
|
2017-05-20 17:08:18 +02:00
|
|
|
log("error receiving stream, stopping...: %s", err)
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2017-07-30 09:14:37 +02:00
|
|
|
totalRx := watcher.Progress().TotalRX
|
|
|
|
pathRx += totalRx
|
|
|
|
log("finished incremental transfer, %v bytes total", totalRx)
|
2017-05-20 17:08:18 +02:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-07-30 09:14:37 +02:00
|
|
|
log("finished incremental transfer path, %v bytes total", pathRx)
|
2017-07-08 13:13:16 +02:00
|
|
|
return true
|
|
|
|
|
|
|
|
case zfs.ConflictNoCommonAncestor:
|
|
|
|
|
|
|
|
log("sender and receiver filesystem have snapshots, but no common one")
|
|
|
|
log("perform manual replication to establish a common snapshot history")
|
|
|
|
log("sender snapshot list: %#v", diff.MRCAPathRight)
|
|
|
|
log("receiver snapshot list: %#v", diff.MRCAPathLeft)
|
|
|
|
return false
|
|
|
|
|
|
|
|
case zfs.ConflictDiverged:
|
|
|
|
|
|
|
|
log("sender and receiver filesystem share a history but have diverged")
|
|
|
|
log("perform manual replication or delete snapshots on the receiving" +
|
|
|
|
"side to establish an incremental replication parse")
|
|
|
|
log("sender-only snapshots: %#v", diff.MRCAPathRight)
|
|
|
|
log("receiver-only snapshots: %#v", diff.MRCAPathLeft)
|
|
|
|
return false
|
2017-05-20 17:08:18 +02:00
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-07-08 13:13:16 +02:00
|
|
|
panic("implementation error: this should not be reached")
|
|
|
|
return false
|
2017-05-20 17:08:18 +02:00
|
|
|
|
|
|
|
})
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
}
|