mirror of
https://github.com/zrepl/zrepl.git
synced 2024-11-22 00:13:52 +01:00
replication + endpoint: replication guarantees: guarantee_{resumability,incremental,nothing}
This commit - adds a configuration in which no step holds, replication cursors, etc. are created - removes the send.step_holds.disable_incremental setting - creates a new config option `replication` for active-side jobs - adds the replication.protection.{initial,incremental} settings, each of which can have values - `guarantee_resumability` - `guarantee_incremental` - `guarantee_nothing` (refer to docs/configuration/replication.rst for semantics) The `replication` config from an active side is sent to both endpoint.Sender and endpoint.Receiver for each replication step. Sender and Receiver then act accordingly. For `guarantee_incremental`, we add the new `tentative-replication-cursor` abstraction. The necessity for that abstraction is outlined in https://github.com/zrepl/zrepl/issues/340. fixes https://github.com/zrepl/zrepl/issues/340
This commit is contained in:
parent
27673a23e9
commit
30cdc1430e
@ -52,11 +52,12 @@ func (j JobEnum) Name() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ActiveJob struct {
|
type ActiveJob struct {
|
||||||
Type string `yaml:"type"`
|
Type string `yaml:"type"`
|
||||||
Name string `yaml:"name"`
|
Name string `yaml:"name"`
|
||||||
Connect ConnectEnum `yaml:"connect"`
|
Connect ConnectEnum `yaml:"connect"`
|
||||||
Pruning PruningSenderReceiver `yaml:"pruning"`
|
Pruning PruningSenderReceiver `yaml:"pruning"`
|
||||||
Debug JobDebugSettings `yaml:"debug,optional"`
|
Debug JobDebugSettings `yaml:"debug,optional"`
|
||||||
|
Replication *Replication `yaml:"replication,optional,fromdefaults"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type PassiveJob struct {
|
type PassiveJob struct {
|
||||||
@ -76,18 +77,7 @@ type SnapJob struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type SendOptions struct {
|
type SendOptions struct {
|
||||||
Encrypted bool `yaml:"encrypted"`
|
Encrypted bool `yaml:"encrypted,optional,default=false"`
|
||||||
StepHolds SendOptionsStepHolds `yaml:"step_holds,optional"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type SendOptionsStepHolds struct {
|
|
||||||
DisableIncremental bool `yaml:"disable_incremental,optional"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ yaml.Defaulter = (*SendOptions)(nil)
|
|
||||||
|
|
||||||
func (l *SendOptions) SetDefault() {
|
|
||||||
*l = SendOptions{Encrypted: false}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type RecvOptions struct {
|
type RecvOptions struct {
|
||||||
@ -98,10 +88,13 @@ type RecvOptions struct {
|
|||||||
// Reencrypt bool `yaml:"reencrypt"`
|
// Reencrypt bool `yaml:"reencrypt"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ yaml.Defaulter = (*RecvOptions)(nil)
|
type Replication struct {
|
||||||
|
Protection *ReplicationOptionsProtection `yaml:"protection,optional,fromdefaults"`
|
||||||
|
}
|
||||||
|
|
||||||
func (l *RecvOptions) SetDefault() {
|
type ReplicationOptionsProtection struct {
|
||||||
*l = RecvOptions{}
|
Initial string `yaml:"initial,optional,default=guarantee_resumability"`
|
||||||
|
Incremental string `yaml:"incremental,optional,default=guarantee_resumability"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type PushJob struct {
|
type PushJob struct {
|
||||||
@ -111,6 +104,9 @@ type PushJob struct {
|
|||||||
Send *SendOptions `yaml:"send,fromdefaults,optional"`
|
Send *SendOptions `yaml:"send,fromdefaults,optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (j *PushJob) GetFilesystems() FilesystemsFilter { return j.Filesystems }
|
||||||
|
func (j *PushJob) GetSendOptions() *SendOptions { return j.Send }
|
||||||
|
|
||||||
type PullJob struct {
|
type PullJob struct {
|
||||||
ActiveJob `yaml:",inline"`
|
ActiveJob `yaml:",inline"`
|
||||||
RootFS string `yaml:"root_fs"`
|
RootFS string `yaml:"root_fs"`
|
||||||
@ -118,6 +114,10 @@ type PullJob struct {
|
|||||||
Recv *RecvOptions `yaml:"recv,fromdefaults,optional"`
|
Recv *RecvOptions `yaml:"recv,fromdefaults,optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (j *PullJob) GetRootFS() string { return j.RootFS }
|
||||||
|
func (j *PullJob) GetAppendClientIdentity() bool { return false }
|
||||||
|
func (j *PullJob) GetRecvOptions() *RecvOptions { return j.Recv }
|
||||||
|
|
||||||
type PositiveDurationOrManual struct {
|
type PositiveDurationOrManual struct {
|
||||||
Interval time.Duration
|
Interval time.Duration
|
||||||
Manual bool
|
Manual bool
|
||||||
@ -155,6 +155,10 @@ type SinkJob struct {
|
|||||||
Recv *RecvOptions `yaml:"recv,optional,fromdefaults"`
|
Recv *RecvOptions `yaml:"recv,optional,fromdefaults"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (j *SinkJob) GetRootFS() string { return j.RootFS }
|
||||||
|
func (j *SinkJob) GetAppendClientIdentity() bool { return true }
|
||||||
|
func (j *SinkJob) GetRecvOptions() *RecvOptions { return j.Recv }
|
||||||
|
|
||||||
type SourceJob struct {
|
type SourceJob struct {
|
||||||
PassiveJob `yaml:",inline"`
|
PassiveJob `yaml:",inline"`
|
||||||
Snapshotting SnapshottingEnum `yaml:"snapshotting"`
|
Snapshotting SnapshottingEnum `yaml:"snapshotting"`
|
||||||
@ -162,6 +166,9 @@ type SourceJob struct {
|
|||||||
Send *SendOptions `yaml:"send,optional,fromdefaults"`
|
Send *SendOptions `yaml:"send,optional,fromdefaults"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (j *SourceJob) GetFilesystems() FilesystemsFilter { return j.Filesystems }
|
||||||
|
func (j *SourceJob) GetSendOptions() *SendOptions { return j.Send }
|
||||||
|
|
||||||
type FilesystemsFilter map[string]bool
|
type FilesystemsFilter map[string]bool
|
||||||
|
|
||||||
type SnapshottingEnum struct {
|
type SnapshottingEnum struct {
|
||||||
|
@ -60,9 +60,9 @@ jobs:
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("encrypted_unspecified", func(t *testing.T) {
|
t.Run("encrypted_unspecified", func(t *testing.T) {
|
||||||
c, err := testConfig(t, fill(encrypted_unspecified))
|
c = testValidConfig(t, fill(encrypted_unspecified))
|
||||||
assert.Error(t, err)
|
encrypted := c.Jobs[0].Ret.(*PushJob).Send.Encrypted
|
||||||
assert.Nil(t, c)
|
assert.Equal(t, false, encrypted)
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("send_not_specified", func(t *testing.T) {
|
t.Run("send_not_specified", func(t *testing.T) {
|
||||||
|
@ -52,12 +52,15 @@ jobs:
|
|||||||
}
|
}
|
||||||
send:
|
send:
|
||||||
encrypted: true
|
encrypted: true
|
||||||
# disable incremental step holds so that
|
replication:
|
||||||
# - we can yank out the backup drive during replication
|
protection:
|
||||||
# - thereby sacrificing resumability
|
initial: guarantee_resumability
|
||||||
# - in exchange for the replicating snapshot not sticking around until we reconnect the backup drive
|
# Downgrade protection to guarantee_incremental which uses zfs bookmarks instead of zfs holds.
|
||||||
step_holds:
|
# Thus, when we yank out the backup drive during replication
|
||||||
disable_incremental: true
|
# - we might not be able to resume the interrupted replication step because the partially received `to` snapshot of a `from`->`to` step may be pruned any time
|
||||||
|
# - but in exchange we get back the disk space allocated by `to` when we prune it
|
||||||
|
# - and because we still have the bookmarks created by `guarantee_incremental`, we can still do incremental replication of `from`->`to2` in the future
|
||||||
|
incremental: guarantee_incremental
|
||||||
snapshotting:
|
snapshotting:
|
||||||
type: manual
|
type: manual
|
||||||
pruning:
|
pruning:
|
||||||
|
@ -12,7 +12,6 @@ import (
|
|||||||
"github.com/zrepl/zrepl/daemon/logging/trace"
|
"github.com/zrepl/zrepl/daemon/logging/trace"
|
||||||
|
|
||||||
"github.com/zrepl/zrepl/config"
|
"github.com/zrepl/zrepl/config"
|
||||||
"github.com/zrepl/zrepl/daemon/filters"
|
|
||||||
"github.com/zrepl/zrepl/daemon/job/reset"
|
"github.com/zrepl/zrepl/daemon/job/reset"
|
||||||
"github.com/zrepl/zrepl/daemon/job/wakeup"
|
"github.com/zrepl/zrepl/daemon/job/wakeup"
|
||||||
"github.com/zrepl/zrepl/daemon/pruner"
|
"github.com/zrepl/zrepl/daemon/pruner"
|
||||||
@ -145,23 +144,24 @@ func (m *modePush) ResetConnectBackoff() {
|
|||||||
|
|
||||||
func modePushFromConfig(g *config.Global, in *config.PushJob, jobID endpoint.JobID) (*modePush, error) {
|
func modePushFromConfig(g *config.Global, in *config.PushJob, jobID endpoint.JobID) (*modePush, error) {
|
||||||
m := &modePush{}
|
m := &modePush{}
|
||||||
|
var err error
|
||||||
|
|
||||||
fsf, err := filters.DatasetMapFilterFromConfig(in.Filesystems)
|
m.senderConfig, err = buildSenderConfig(in, jobID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "cannot build filesystem filter")
|
return nil, errors.Wrap(err, "sender config")
|
||||||
}
|
}
|
||||||
|
|
||||||
m.senderConfig = &endpoint.SenderConfig{
|
replicationConfig, err := logic.ReplicationConfigFromConfig(in.Replication)
|
||||||
FSF: fsf,
|
if err != nil {
|
||||||
Encrypt: &zfs.NilBool{B: in.Send.Encrypted},
|
return nil, errors.Wrap(err, "field `replication`")
|
||||||
DisableIncrementalStepHolds: in.Send.StepHolds.DisableIncremental,
|
|
||||||
JobID: jobID,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
m.plannerPolicy = &logic.PlannerPolicy{
|
m.plannerPolicy = &logic.PlannerPolicy{
|
||||||
EncryptedSend: logic.TriFromBool(in.Send.Encrypted),
|
EncryptedSend: logic.TriFromBool(in.Send.Encrypted),
|
||||||
|
ReplicationConfig: *replicationConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.snapper, err = snapper.FromConfig(g, fsf, in.Snapshotting); err != nil {
|
if m.snapper, err = snapper.FromConfig(g, m.senderConfig.FSF, in.Snapshotting); err != nil {
|
||||||
return nil, errors.Wrap(err, "cannot build snapper")
|
return nil, errors.Wrap(err, "cannot build snapper")
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -173,7 +173,6 @@ type modePull struct {
|
|||||||
receiver *endpoint.Receiver
|
receiver *endpoint.Receiver
|
||||||
receiverConfig endpoint.ReceiverConfig
|
receiverConfig endpoint.ReceiverConfig
|
||||||
sender *rpc.Client
|
sender *rpc.Client
|
||||||
rootFS *zfs.DatasetPath
|
|
||||||
plannerPolicy *logic.PlannerPolicy
|
plannerPolicy *logic.PlannerPolicy
|
||||||
interval config.PositiveDurationOrManual
|
interval config.PositiveDurationOrManual
|
||||||
}
|
}
|
||||||
@ -247,26 +246,19 @@ func modePullFromConfig(g *config.Global, in *config.PullJob, jobID endpoint.Job
|
|||||||
m = &modePull{}
|
m = &modePull{}
|
||||||
m.interval = in.Interval
|
m.interval = in.Interval
|
||||||
|
|
||||||
m.rootFS, err = zfs.NewDatasetPath(in.RootFS)
|
replicationConfig, err := logic.ReplicationConfigFromConfig(in.Replication)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("RootFS is not a valid zfs filesystem path")
|
return nil, errors.Wrap(err, "field `replication`")
|
||||||
}
|
|
||||||
if m.rootFS.Length() <= 0 {
|
|
||||||
return nil, errors.New("RootFS must not be empty") // duplicates error check of receiver
|
|
||||||
}
|
}
|
||||||
|
|
||||||
m.plannerPolicy = &logic.PlannerPolicy{
|
m.plannerPolicy = &logic.PlannerPolicy{
|
||||||
EncryptedSend: logic.DontCare,
|
EncryptedSend: logic.DontCare,
|
||||||
|
ReplicationConfig: *replicationConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
m.receiverConfig = endpoint.ReceiverConfig{
|
m.receiverConfig, err = buildReceiverConfig(in, jobID)
|
||||||
JobID: jobID,
|
if err != nil {
|
||||||
RootWithoutClientComponent: m.rootFS,
|
return nil, err
|
||||||
AppendClientIdentity: false, // !
|
|
||||||
UpdateLastReceivedHold: true,
|
|
||||||
}
|
|
||||||
if err := m.receiverConfig.Validate(); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "cannot build receiver config")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return m, nil
|
return m, nil
|
||||||
@ -365,7 +357,7 @@ func (j *ActiveSide) OwnedDatasetSubtreeRoot() (rfs *zfs.DatasetPath, ok bool) {
|
|||||||
_ = j.mode.(*modePush) // make sure we didn't introduce a new job type
|
_ = j.mode.(*modePush) // make sure we didn't introduce a new job type
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
return pull.rootFS.Copy(), true
|
return pull.receiverConfig.RootWithoutClientComponent.Copy(), true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j *ActiveSide) SenderConfig() *endpoint.SenderConfig {
|
func (j *ActiveSide) SenderConfig() *endpoint.SenderConfig {
|
||||||
|
55
daemon/job/build_jobs_sendrecvoptions.go
Normal file
55
daemon/job/build_jobs_sendrecvoptions.go
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
package job
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/zrepl/zrepl/config"
|
||||||
|
"github.com/zrepl/zrepl/daemon/filters"
|
||||||
|
"github.com/zrepl/zrepl/endpoint"
|
||||||
|
"github.com/zrepl/zrepl/zfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type SendingJobConfig interface {
|
||||||
|
GetFilesystems() config.FilesystemsFilter
|
||||||
|
GetSendOptions() *config.SendOptions // must not be nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildSenderConfig(in SendingJobConfig, jobID endpoint.JobID) (*endpoint.SenderConfig, error) {
|
||||||
|
|
||||||
|
fsf, err := filters.DatasetMapFilterFromConfig(in.GetFilesystems())
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "cannot build filesystem filter")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &endpoint.SenderConfig{
|
||||||
|
FSF: fsf,
|
||||||
|
Encrypt: &zfs.NilBool{B: in.GetSendOptions().Encrypted},
|
||||||
|
JobID: jobID,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ReceivingJobConfig interface {
|
||||||
|
GetRootFS() string
|
||||||
|
GetAppendClientIdentity() bool
|
||||||
|
GetRecvOptions() *config.RecvOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
func buildReceiverConfig(in ReceivingJobConfig, jobID endpoint.JobID) (rc endpoint.ReceiverConfig, err error) {
|
||||||
|
rootFs, err := zfs.NewDatasetPath(in.GetRootFS())
|
||||||
|
if err != nil {
|
||||||
|
return rc, errors.New("root_fs is not a valid zfs filesystem path")
|
||||||
|
}
|
||||||
|
if rootFs.Length() <= 0 {
|
||||||
|
return rc, errors.New("root_fs must not be empty") // duplicates error check of receiver
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = endpoint.ReceiverConfig{
|
||||||
|
JobID: jobID,
|
||||||
|
RootWithoutClientComponent: rootFs,
|
||||||
|
AppendClientIdentity: in.GetAppendClientIdentity(),
|
||||||
|
}
|
||||||
|
if err := rc.Validate(); err != nil {
|
||||||
|
return rc, errors.Wrap(err, "cannot build receiver config")
|
||||||
|
}
|
||||||
|
|
||||||
|
return rc, nil
|
||||||
|
}
|
@ -9,7 +9,6 @@ import (
|
|||||||
"github.com/zrepl/zrepl/daemon/logging/trace"
|
"github.com/zrepl/zrepl/daemon/logging/trace"
|
||||||
|
|
||||||
"github.com/zrepl/zrepl/config"
|
"github.com/zrepl/zrepl/config"
|
||||||
"github.com/zrepl/zrepl/daemon/filters"
|
|
||||||
"github.com/zrepl/zrepl/daemon/logging"
|
"github.com/zrepl/zrepl/daemon/logging"
|
||||||
"github.com/zrepl/zrepl/daemon/snapper"
|
"github.com/zrepl/zrepl/daemon/snapper"
|
||||||
"github.com/zrepl/zrepl/endpoint"
|
"github.com/zrepl/zrepl/endpoint"
|
||||||
@ -48,19 +47,9 @@ func (m *modeSink) SnapperReport() *snapper.Report { return nil }
|
|||||||
func modeSinkFromConfig(g *config.Global, in *config.SinkJob, jobID endpoint.JobID) (m *modeSink, err error) {
|
func modeSinkFromConfig(g *config.Global, in *config.SinkJob, jobID endpoint.JobID) (m *modeSink, err error) {
|
||||||
m = &modeSink{}
|
m = &modeSink{}
|
||||||
|
|
||||||
rootDataset, err := zfs.NewDatasetPath(in.RootFS)
|
m.receiverConfig, err = buildReceiverConfig(in, jobID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.New("root dataset is not a valid zfs filesystem path")
|
return nil, err
|
||||||
}
|
|
||||||
|
|
||||||
m.receiverConfig = endpoint.ReceiverConfig{
|
|
||||||
JobID: jobID,
|
|
||||||
RootWithoutClientComponent: rootDataset,
|
|
||||||
AppendClientIdentity: true, // !
|
|
||||||
UpdateLastReceivedHold: true,
|
|
||||||
}
|
|
||||||
if err := m.receiverConfig.Validate(); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "cannot build receiver config")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return m, nil
|
return m, nil
|
||||||
@ -74,18 +63,13 @@ type modeSource struct {
|
|||||||
func modeSourceFromConfig(g *config.Global, in *config.SourceJob, jobID endpoint.JobID) (m *modeSource, err error) {
|
func modeSourceFromConfig(g *config.Global, in *config.SourceJob, jobID endpoint.JobID) (m *modeSource, err error) {
|
||||||
// FIXME exact dedup of modePush
|
// FIXME exact dedup of modePush
|
||||||
m = &modeSource{}
|
m = &modeSource{}
|
||||||
fsf, err := filters.DatasetMapFilterFromConfig(in.Filesystems)
|
|
||||||
|
m.senderConfig, err = buildSenderConfig(in, jobID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "cannot build filesystem filter")
|
return nil, errors.Wrap(err, "send options")
|
||||||
}
|
|
||||||
m.senderConfig = &endpoint.SenderConfig{
|
|
||||||
FSF: fsf,
|
|
||||||
Encrypt: &zfs.NilBool{B: in.Send.Encrypted},
|
|
||||||
DisableIncrementalStepHolds: in.Send.StepHolds.DisableIncremental,
|
|
||||||
JobID: jobID,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if m.snapper, err = snapper.FromConfig(g, fsf, in.Snapshotting); err != nil {
|
if m.snapper, err = snapper.FromConfig(g, m.senderConfig.FSF, in.Snapshotting); err != nil {
|
||||||
return nil, errors.Wrap(err, "cannot build snapper")
|
return nil, errors.Wrap(err, "cannot build snapper")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,8 +175,6 @@ func (j *SnapJob) doPrune(ctx context.Context) {
|
|||||||
FSF: j.fsfilter,
|
FSF: j.fsfilter,
|
||||||
// FIXME encryption setting is irrelevant for SnapJob because the endpoint is only used as pruner.Target
|
// FIXME encryption setting is irrelevant for SnapJob because the endpoint is only used as pruner.Target
|
||||||
Encrypt: &zfs.NilBool{B: true},
|
Encrypt: &zfs.NilBool{B: true},
|
||||||
// FIXME DisableIncrementalStepHolds setting is irrelevant for SnapJob because the endpoint is only used as pruner.Target
|
|
||||||
DisableIncrementalStepHolds: false,
|
|
||||||
})
|
})
|
||||||
j.pruner = j.prunerFactory.BuildLocalPruner(ctx, sender, alwaysUpToDateReplicationCursorHistory{sender})
|
j.pruner = j.prunerFactory.BuildLocalPruner(ctx, sender, alwaysUpToDateReplicationCursorHistory{sender})
|
||||||
log.Info("start pruning")
|
log.Info("start pruning")
|
||||||
|
@ -11,7 +11,6 @@ import (
|
|||||||
"github.com/zrepl/zrepl/daemon/logging/trace"
|
"github.com/zrepl/zrepl/daemon/logging/trace"
|
||||||
|
|
||||||
"github.com/zrepl/zrepl/config"
|
"github.com/zrepl/zrepl/config"
|
||||||
"github.com/zrepl/zrepl/daemon/filters"
|
|
||||||
"github.com/zrepl/zrepl/daemon/hooks"
|
"github.com/zrepl/zrepl/daemon/hooks"
|
||||||
"github.com/zrepl/zrepl/daemon/logging"
|
"github.com/zrepl/zrepl/daemon/logging"
|
||||||
"github.com/zrepl/zrepl/logger"
|
"github.com/zrepl/zrepl/logger"
|
||||||
@ -49,7 +48,7 @@ type args struct {
|
|||||||
ctx context.Context
|
ctx context.Context
|
||||||
prefix string
|
prefix string
|
||||||
interval time.Duration
|
interval time.Duration
|
||||||
fsf *filters.DatasetMapFilter
|
fsf zfs.DatasetFilter
|
||||||
snapshotsTaken chan<- struct{}
|
snapshotsTaken chan<- struct{}
|
||||||
hooks *hooks.List
|
hooks *hooks.List
|
||||||
dryRun bool
|
dryRun bool
|
||||||
@ -109,7 +108,7 @@ func getLogger(ctx context.Context) Logger {
|
|||||||
return logging.GetLogger(ctx, logging.SubsysSnapshot)
|
return logging.GetLogger(ctx, logging.SubsysSnapshot)
|
||||||
}
|
}
|
||||||
|
|
||||||
func PeriodicFromConfig(g *config.Global, fsf *filters.DatasetMapFilter, in *config.SnapshottingPeriodic) (*Snapper, error) {
|
func PeriodicFromConfig(g *config.Global, fsf zfs.DatasetFilter, in *config.SnapshottingPeriodic) (*Snapper, error) {
|
||||||
if in.Prefix == "" {
|
if in.Prefix == "" {
|
||||||
return nil, errors.New("prefix must not be empty")
|
return nil, errors.New("prefix must not be empty")
|
||||||
}
|
}
|
||||||
@ -383,7 +382,7 @@ func wait(a args, u updater) state {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func listFSes(ctx context.Context, mf *filters.DatasetMapFilter) (fss []*zfs.DatasetPath, err error) {
|
func listFSes(ctx context.Context, mf zfs.DatasetFilter) (fss []*zfs.DatasetPath, err error) {
|
||||||
return zfs.ZFSListMapping(ctx, mf)
|
return zfs.ZFSListMapping(ctx, mf)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -5,7 +5,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/zrepl/zrepl/config"
|
"github.com/zrepl/zrepl/config"
|
||||||
"github.com/zrepl/zrepl/daemon/filters"
|
"github.com/zrepl/zrepl/zfs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FIXME: properly abstract snapshotting:
|
// FIXME: properly abstract snapshotting:
|
||||||
@ -32,7 +32,7 @@ func (s *PeriodicOrManual) Report() *Report {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func FromConfig(g *config.Global, fsf *filters.DatasetMapFilter, in config.SnapshottingEnum) (*PeriodicOrManual, error) {
|
func FromConfig(g *config.Global, fsf zfs.DatasetFilter, in config.SnapshottingEnum) (*PeriodicOrManual, error) {
|
||||||
switch v := in.Ret.(type) {
|
switch v := in.Ret.(type) {
|
||||||
case *config.SnapshottingPeriodic:
|
case *config.SnapshottingPeriodic:
|
||||||
snapper, err := PeriodicFromConfig(g, fsf, v)
|
snapper, err := PeriodicFromConfig(g, fsf, v)
|
||||||
|
@ -34,27 +34,30 @@ This is a big one! Headlining features:
|
|||||||
|
|
||||||
* **Resumable Send & Recv Support**
|
* **Resumable Send & Recv Support**
|
||||||
No knobs required, automatically used where supported.
|
No knobs required, automatically used where supported.
|
||||||
* **Hold-Protected Send & Recv**
|
|
||||||
Automatic ZFS holds to ensure that we can always use resumable send&recv for a replication step.
|
|
||||||
* **Encrypted Send & Recv Support** for OpenZFS native encryption,
|
* **Encrypted Send & Recv Support** for OpenZFS native encryption,
|
||||||
:ref:`configurable <job-send-options>` at the job level, i.e., for all filesystems a job is responsible for.
|
:ref:`configurable <job-send-options>` at the job level, i.e., for all filesystems a job is responsible for.
|
||||||
* **Receive-side hold on last received dataset**
|
* **Replication Guarantees**
|
||||||
The counterpart to the replication cursor bookmark on the send-side.
|
Automatic use of ZFS holds and bookmarks to protect a replicated filesystem from losing synchronization between sender and receiver.
|
||||||
Ensures that incremental replication will always be possible between a sender and receiver.
|
By default, zrepl guarantees that incremental replication will always be possible and interrupted steps will always be resumable.
|
||||||
|
|
||||||
.. TIP::
|
.. TIP::
|
||||||
|
|
||||||
We highly recommend studying the :ref:`overview section of the configuration chapter <overview-how-replication-works>` to understand how replication works.
|
We highly recommend studying the updated :ref:`overview section of the configuration chapter <overview-how-replication-works>` to understand how replication works.
|
||||||
|
|
||||||
|
Quick-start guides:
|
||||||
|
|
||||||
|
* We have added :ref:`another quick-start guide for a typical workstation use case for zrepl <quickstart-backup-to-external-disk>`.
|
||||||
|
Check it out to learn how you can use zrepl to back up your workstation's OpenZFS natively-encrypted root filesystem to an external disk.
|
||||||
|
|
||||||
Additional changelog:
|
Additional changelog:
|
||||||
|
|
||||||
* |break| |break_config| **more restrictive job names than in prior zrepl versions**
|
* |break| |break_config| **more restrictive job names than in prior zrepl versions**
|
||||||
Starting with this version, job names are going to be embedded into ZFS holds and bookmark names (see :ref:`here<replication-cursor-and-last-received-hold>` and :ref:`here<step-holds-and-bookmarks>`).
|
Starting with this version, job names are going to be embedded into ZFS holds and bookmark names (see :ref:`this section for details <zrepl-zfs-abstractions>`).
|
||||||
Therefore you might need to adjust your job names.
|
Therefore you might need to adjust your job names.
|
||||||
**Note that jobs** cannot be renamed easily **once you start using zrepl 0.3.**
|
**Note that jobs** cannot be renamed easily **once you start using zrepl 0.3.**
|
||||||
* |break| |mig| replication cursor representation changed
|
* |break| |mig| replication cursor representation changed
|
||||||
|
|
||||||
* zrepl now manages the :ref:`replication cursor bookmark <replication-cursor-and-last-received-hold>` per job-filesystem tuple instead of a single replication cursor per filesystem.
|
* zrepl now manages the :ref:`replication cursor bookmark <zrepl-zfs-abstractions>` per job-filesystem tuple instead of a single replication cursor per filesystem.
|
||||||
In the future, this will permit multiple sending jobs to send from the same filesystems.
|
In the future, this will permit multiple sending jobs to send from the same filesystems.
|
||||||
* ZFS does not allow bookmark renaming, thus we cannot migrate the old replication cursors.
|
* ZFS does not allow bookmark renaming, thus we cannot migrate the old replication cursors.
|
||||||
* zrepl 0.3 will automatically create cursors in the new format for new replications, and warn if it still finds ones in the old format.
|
* zrepl 0.3 will automatically create cursors in the new format for new replications, and warn if it still finds ones in the old format.
|
||||||
|
@ -12,6 +12,7 @@ Configuration
|
|||||||
configuration/transports
|
configuration/transports
|
||||||
configuration/filter_syntax
|
configuration/filter_syntax
|
||||||
configuration/sendrecvoptions
|
configuration/sendrecvoptions
|
||||||
|
configuration/replication
|
||||||
configuration/snapshotting
|
configuration/snapshotting
|
||||||
configuration/prune
|
configuration/prune
|
||||||
configuration/logging
|
configuration/logging
|
||||||
|
@ -132,23 +132,10 @@ The following high-level steps take place during replication and can be monitore
|
|||||||
|
|
||||||
The idea behind the execution order of replication steps is that if the sender snapshots all filesystems simultaneously at fixed intervals, the receiver will have all filesystems snapshotted at time ``T1`` before the first snapshot at ``T2 = T1 + $interval`` is replicated.
|
The idea behind the execution order of replication steps is that if the sender snapshots all filesystems simultaneously at fixed intervals, the receiver will have all filesystems snapshotted at time ``T1`` before the first snapshot at ``T2 = T1 + $interval`` is replicated.
|
||||||
|
|
||||||
Placeholder Filesystems
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
.. _replication-placeholder-property:
|
|
||||||
|
|
||||||
**Placeholder filesystems** on the receiving side are regular ZFS filesystems with the placeholder property ``zrepl:placeholder=on``.
|
|
||||||
Placeholders allow the receiving side to mirror the sender's ZFS dataset hierarchy without replicating every filesystem at every intermediary dataset path component.
|
|
||||||
Consider the following example: ``S/H/J`` shall be replicated to ``R/sink/job/S/H/J``, but neither ``S/H`` nor ``S`` shall be replicated.
|
|
||||||
ZFS requires the existence of ``R/sink/job/S`` and ``R/sink/job/S/H`` in order to receive into ``R/sink/job/S/H/J``.
|
|
||||||
Thus, zrepl creates the parent filesystems as placeholders on the receiving side.
|
|
||||||
If at some point ``S/H`` and ``S`` shall be replicated, the receiving side invalidates the placeholder flag automatically.
|
|
||||||
The ``zrepl test placeholder`` command can be used to check whether a filesystem is a placeholder.
|
|
||||||
|
|
||||||
ZFS Background Knowledge
|
ZFS Background Knowledge
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
This section gives some background knowledge about ZFS features that zrepl uses to provide guarantees for a replication filesystem.
|
||||||
This section gives some background knowledge about ZFS features that zrepl uses to guarantee that
|
Specifically, zrepl guarantees by default that **incremental replication is always possible and that started replication steps can always be resumed if they are interrupted.**
|
||||||
**incremental replication is always possible and that started replication steps can always be resumed if they are interrupted.**
|
|
||||||
|
|
||||||
**ZFS Send Modes & Bookmarks**
|
**ZFS Send Modes & Bookmarks**
|
||||||
ZFS supports full sends (``zfs send fs@to``) and incremental sends (``zfs send -i @from fs@to``).
|
ZFS supports full sends (``zfs send fs@to``) and incremental sends (``zfs send -i @from fs@to``).
|
||||||
@ -166,41 +153,56 @@ An incremental send can only be resumed if ``@to`` still exists *and* either ``@
|
|||||||
|
|
||||||
**ZFS Holds**
|
**ZFS Holds**
|
||||||
ZFS holds prevent a snapshot from being deleted through ``zfs destroy``, letting the destroy fail with a ``datset is busy`` error.
|
ZFS holds prevent a snapshot from being deleted through ``zfs destroy``, letting the destroy fail with a ``datset is busy`` error.
|
||||||
Holds are created and referred to by a user-defined *tag*. They can be thought of as a named, persistent lock on the snapshot.
|
Holds are created and referred to by a *tag*. They can be thought of as a named, persistent lock on the snapshot.
|
||||||
|
|
||||||
|
|
||||||
|
.. _zrepl-zfs-abstractions:
|
||||||
|
|
||||||
|
ZFS Abstractions Managed By zrepl
|
||||||
|
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||||
|
With the background knowledge from the previous paragraph, we now summarize the different on-disk ZFS objects that zrepl manages to provide its functionality.
|
||||||
|
|
||||||
|
.. _replication-placeholder-property:
|
||||||
|
|
||||||
|
**Placeholder filesystems** on the receiving side are regular ZFS filesystems with the placeholder property ``zrepl:placeholder=on``.
|
||||||
|
Placeholders allow the receiving side to mirror the sender's ZFS dataset hierarchy without replicating every filesystem at every intermediary dataset path component.
|
||||||
|
Consider the following example: ``S/H/J`` shall be replicated to ``R/sink/job/S/H/J``, but neither ``S/H`` nor ``S`` shall be replicated.
|
||||||
|
ZFS requires the existence of ``R/sink/job/S`` and ``R/sink/job/S/H`` in order to receive into ``R/sink/job/S/H/J``.
|
||||||
|
Thus, zrepl creates the parent filesystems as placeholders on the receiving side.
|
||||||
|
If at some point ``S/H`` and ``S`` shall be replicated, the receiving side invalidates the placeholder flag automatically.
|
||||||
|
The ``zrepl test placeholder`` command can be used to check whether a filesystem is a placeholder.
|
||||||
|
|
||||||
.. _replication-cursor-and-last-received-hold:
|
.. _replication-cursor-and-last-received-hold:
|
||||||
|
|
||||||
Guaranteeing That Incremental Sends Are Always Possible
|
The **replication cursor** bookmark and **last-received-hold** are managed by zrepl to ensure that future replications can always be done incrementally.
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
**Replication cursor** bookmark and **last-received-hold** are managed by zrepl to ensure that future replications can always be done incrementally.
|
|
||||||
The replication cursor is a send-side bookmark of the most recent successfully replicated snapshot,
|
The replication cursor is a send-side bookmark of the most recent successfully replicated snapshot,
|
||||||
and the last-received-hold is a hold of that snapshot on the receiving side.
|
and the last-received-hold is a hold of that snapshot on the receiving side.
|
||||||
Both are moved aomically after the receiving side has confirmed that a replication step is complete.
|
Both are moved atomically after the receiving side has confirmed that a replication step is complete.
|
||||||
|
|
||||||
The replication cursor has the format ``#zrepl_CUSOR_G_<GUID>_J_<JOBNAME>``.
|
The replication cursor has the format ``#zrepl_CUSOR_G_<GUID>_J_<JOBNAME>``.
|
||||||
The last-received-hold tag has the format ``zrepl_last_received_J_<JOBNAME>``.
|
The last-received-hold tag has the format ``zrepl_last_received_J_<JOBNAME>``.
|
||||||
Encoding the job name in the names ensures that multiple sending jobs can replicate the same filesystem to different receivers without interference.
|
Encoding the job name in the names ensures that multiple sending jobs can replicate the same filesystem to different receivers without interference.
|
||||||
|
|
||||||
|
.. _tentative-replication-cursor-bookmarks:
|
||||||
|
|
||||||
|
**Tentative replication cursor bookmarks** are short-lived boomkarks that protect the atomic moving-forward of the replication cursor and last-received-hold (see :issue:`this issue <340>`).
|
||||||
|
They are only necessary if step holds are not used as per the :ref:`replication.protection <replication-option-protection>` setting.
|
||||||
|
The tentative replication cursor has the format ``#zrepl_CUSORTENTATIVE_G_<GUID>_J_<JOBNAME>``.
|
||||||
The ``zrepl zfs-abstraction list`` command provides a listing of all bookmarks and holds managed by zrepl.
|
The ``zrepl zfs-abstraction list`` command provides a listing of all bookmarks and holds managed by zrepl.
|
||||||
|
|
||||||
.. _step-holds-and-bookmarks:
|
.. _step-holds:
|
||||||
|
|
||||||
Guaranteeing That Sends Are Always Resumable
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
**Step holds** are zfs holds managed by zrepl to ensure that a replication step can always be resumed if it is interrupted, e.g., due to network outage.
|
**Step holds** are zfs holds managed by zrepl to ensure that a replication step can always be resumed if it is interrupted, e.g., due to network outage.
|
||||||
zrepl creates step holds before it attempts a replication step and releases them after the receiver confirms that the replication step is complete.
|
zrepl creates step holds before it attempts a replication step and releases them after the receiver confirms that the replication step is complete.
|
||||||
For an initial replication ``full @initial_snap``, zrepl puts a zfs hold on ``@initial_snap``.
|
For an initial replication ``full @initial_snap``, zrepl puts a zfs hold on ``@initial_snap``.
|
||||||
For an incremental send ``@from -> @to``, zrepl puts a zfs hold on both ``@from`` and ``@to``.
|
For an incremental send ``@from -> @to``, zrepl puts a zfs hold on both ``@from`` and ``@to``.
|
||||||
Note that ``@from`` is not strictly necessary for resumability -- a bookmark on the sending side would be sufficient --, but size-estimation in currently used OpenZFS versions only works if ``@from`` is a snapshot.
|
Note that ``@from`` is not strictly necessary for resumability -- a bookmark on the sending side would be sufficient --, but size-estimation in currently used OpenZFS versions only works if ``@from`` is a snapshot.
|
||||||
|
|
||||||
The hold tag has the format ``zrepl_STEP_J_<JOBNAME>``.
|
The hold tag has the format ``zrepl_STEP_J_<JOBNAME>``.
|
||||||
A job only ever has one active send per filesystem.
|
A job only ever has one active send per filesystem.
|
||||||
Thus, there are never more than two step holds for a given pair of ``(job,filesystem)``.
|
Thus, there are never more than two step holds for a given pair of ``(job,filesystem)``.
|
||||||
|
|
||||||
**Step bookmarks** are zrepl's equivalent for holds on bookmarks (ZFS does not support putting holds on bookmarks).
|
**Step bookmarks** are zrepl's equivalent for holds on bookmarks (ZFS does not support putting holds on bookmarks).
|
||||||
They are intended for a situation where a replication step uses a bookmark ``#bm`` as incremental ``from`` that is not managed by zrepl.
|
They are intended for a situation where a replication step uses a bookmark ``#bm`` as incremental ``from`` where ``#bm`` is not managed by zrepl.
|
||||||
To ensure resumability, zrepl copies ``#bm`` to step bookmark ``#zrepl_STEP_G_<GUID>_J_<JOBNAME>``.
|
To ensure resumability, zrepl copies ``#bm`` to step bookmark ``#zrepl_STEP_G_<GUID>_J_<JOBNAME>``.
|
||||||
If the replication is interrupted and ``#bm`` is deleted by the user, the step bookmark remains as an incremental source for the resumable send.
|
If the replication is interrupted and ``#bm`` is deleted by the user, the step bookmark remains as an incremental source for the resumable send.
|
||||||
Note that zrepl does not yet support creating step bookmarks because the `corresponding ZFS feature for copying bookmarks <https://github.com/openzfs/zfs/pull/9571>`_ is not yet widely available .
|
Note that zrepl does not yet support creating step bookmarks because the `corresponding ZFS feature for copying bookmarks <https://github.com/openzfs/zfs/pull/9571>`_ is not yet widely available .
|
||||||
|
47
docs/configuration/replication.rst
Normal file
47
docs/configuration/replication.rst
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
.. include:: ../global.rst.inc
|
||||||
|
|
||||||
|
|
||||||
|
Replication Options
|
||||||
|
===================
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
- type: push
|
||||||
|
filesystems: ...
|
||||||
|
replication:
|
||||||
|
protection:
|
||||||
|
initial: guarantee_resumability # guarantee_{resumability,incremental,nothing}
|
||||||
|
incremental: guarantee_resumability # guarantee_{resumability,incremental,nothing}
|
||||||
|
...
|
||||||
|
|
||||||
|
.. _replication-option-protection:
|
||||||
|
|
||||||
|
``protection`` option
|
||||||
|
--------------------------
|
||||||
|
|
||||||
|
The ``protection`` variable controls the degree to which a replicated filesystem is protected from getting out of sync through a zrepl pruner or external tools that destroy snapshots.
|
||||||
|
zrepl can guarantee :ref:`resumability <step-holds>` or just :ref:`incremental replication <replication-cursor-and-last-received-hold>`.
|
||||||
|
|
||||||
|
``guarantee_resumability`` is the **default** value and guarantees that a replication step is always resumable and that incremental replication will always be possible.
|
||||||
|
The implementation uses replication cursors, last-received-hold and step holds.
|
||||||
|
|
||||||
|
``guarantee_incremental`` only guarantees that incremental replication will always be possible.
|
||||||
|
If a step ``from -> to`` is interrupted and its `to` snapshot is destroyed, zrepl will remove the half-received ``to``'s resume state and start a new step ``from -> to2``.
|
||||||
|
The implementation uses replication cursors, tentative replication cursors and last-received-hold.
|
||||||
|
|
||||||
|
``guarantee_nothing`` does not make any guarantees with regards to keeping sending and receiving side in sync.
|
||||||
|
No bookmarks or holds are created to protect sender and receiver from diverging.
|
||||||
|
|
||||||
|
**Tradeoffs**
|
||||||
|
|
||||||
|
Using ``guarantee_incremental`` instead of ``guarantee_resumability`` obviously removes the resumability guarantee.
|
||||||
|
This means that replication progress is no longer monotonic which might lead to a replication setup that never makes progress if mid-step interruptions are too frequent (e.g. frequent network outages).
|
||||||
|
However, the advantage and :issue:`reason for existence <288>` of the ``incremental`` mode is that it allows the pruner to delete snapshots of interrupted replication steps
|
||||||
|
which is useful if replication happens so rarely (or fails so frequently) that the amount of disk space exclusively referenced by the step's snapshots becomes intolerable.
|
||||||
|
|
||||||
|
.. NOTE::
|
||||||
|
|
||||||
|
When changing this flag, obsoleted zrepl-managed bookmarks and holds will be destroyed on the next replication step that is attempted for each filesystem.
|
||||||
|
|
@ -16,8 +16,6 @@ Send Options
|
|||||||
filesystems: ...
|
filesystems: ...
|
||||||
send:
|
send:
|
||||||
encrypted: true
|
encrypted: true
|
||||||
step_holds:
|
|
||||||
disable_incremental: false
|
|
||||||
...
|
...
|
||||||
|
|
||||||
:ref:`Source<job-source>` and :ref:`push<job-push>` jobs have an optional ``send`` configuration section.
|
:ref:`Source<job-source>` and :ref:`push<job-push>` jobs have an optional ``send`` configuration section.
|
||||||
@ -36,24 +34,6 @@ Filesystems matched by ``filesystems`` that are not encrypted are not sent and w
|
|||||||
|
|
||||||
If ``encryption=false``, zrepl expects that filesystems matching ``filesystems`` are not encrypted or have loaded encryption keys.
|
If ``encryption=false``, zrepl expects that filesystems matching ``filesystems`` are not encrypted or have loaded encryption keys.
|
||||||
|
|
||||||
.. _job-send-option-step-holds-disable-incremental:
|
|
||||||
|
|
||||||
``step_holds.disable_incremental`` option
|
|
||||||
-----------------------------------------
|
|
||||||
|
|
||||||
The ``step_holds.disable_incremental`` variable controls whether the creation of :ref:`step holds <step-holds-and-bookmarks>` should be disabled for incremental replication.
|
|
||||||
The default value is ``false``.
|
|
||||||
|
|
||||||
Disabling step holds has the disadvantage that steps :ref:`might not be resumable <step-holds-and-bookmarks>` if interrupted.
|
|
||||||
Non-resumability means that replication progress is no longer monotonic which might result in a replication setup that never makes progress if mid-step interruptions are too frequent (e.g. frequent network outages).
|
|
||||||
|
|
||||||
However, the advantage and :issue:`reason for existence <288>` of this flag is that it allows the pruner to delete snapshots of interrupted replication steps
|
|
||||||
which is useful if replication happens so rarely (or fails so frequently) that the amount of disk space exclusively referenced by the step's snapshots becomes intolerable.
|
|
||||||
|
|
||||||
.. NOTE::
|
|
||||||
|
|
||||||
When setting this flag to ``true``, existing step holds for the job will be destroyed on the next replication attempt.
|
|
||||||
|
|
||||||
.. _job-recv-options:
|
.. _job-recv-options:
|
||||||
|
|
||||||
Recv Options
|
Recv Options
|
||||||
|
@ -25,7 +25,7 @@ A few additional requirements:
|
|||||||
* We want to be able to put off the backups for more than three weeks, i.e., longer than the lifetime of the automatically created snapshots on our workstation.
|
* We want to be able to put off the backups for more than three weeks, i.e., longer than the lifetime of the automatically created snapshots on our workstation.
|
||||||
**zrepl should use bookmarks and holds to achieve this goal**.
|
**zrepl should use bookmarks and holds to achieve this goal**.
|
||||||
* When we yank out the drive during replication and go on a long vacation, we do *not* want the partially replicated snapshot to stick around as it would hold on to too much disk space over time.
|
* When we yank out the drive during replication and go on a long vacation, we do *not* want the partially replicated snapshot to stick around as it would hold on to too much disk space over time.
|
||||||
Therefore, we want zrepl to deviate from its :ref:`default step-hold behavior <step-holds-and-bookmarks>` and sacrifice resumability, but nonetheless retain the ability to do incremental replication once we return from our vacation.
|
Therefore, we want zrepl to deviate from its :ref:`default behavior <replication-option-protection>` and sacrifice resumability, but nonetheless retain the ability to do incremental replication once we return from our vacation.
|
||||||
**zrepl should provide an easy config knob to disable step holds for incremental replication**.
|
**zrepl should provide an easy config knob to disable step holds for incremental replication**.
|
||||||
|
|
||||||
The following config snippet implements the setup described above.
|
The following config snippet implements the setup described above.
|
||||||
|
@ -21,10 +21,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type SenderConfig struct {
|
type SenderConfig struct {
|
||||||
FSF zfs.DatasetFilter
|
FSF zfs.DatasetFilter
|
||||||
Encrypt *zfs.NilBool
|
Encrypt *zfs.NilBool
|
||||||
DisableIncrementalStepHolds bool
|
JobID JobID
|
||||||
JobID JobID
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *SenderConfig) Validate() error {
|
func (c *SenderConfig) Validate() error {
|
||||||
@ -40,10 +39,9 @@ func (c *SenderConfig) Validate() error {
|
|||||||
|
|
||||||
// Sender implements replication.ReplicationEndpoint for a sending side
|
// Sender implements replication.ReplicationEndpoint for a sending side
|
||||||
type Sender struct {
|
type Sender struct {
|
||||||
FSFilter zfs.DatasetFilter
|
FSFilter zfs.DatasetFilter
|
||||||
encrypt *zfs.NilBool
|
encrypt *zfs.NilBool
|
||||||
disableIncrementalStepHolds bool
|
jobId JobID
|
||||||
jobId JobID
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewSender(conf SenderConfig) *Sender {
|
func NewSender(conf SenderConfig) *Sender {
|
||||||
@ -51,10 +49,9 @@ func NewSender(conf SenderConfig) *Sender {
|
|||||||
panic("invalid config" + err.Error())
|
panic("invalid config" + err.Error())
|
||||||
}
|
}
|
||||||
return &Sender{
|
return &Sender{
|
||||||
FSFilter: conf.FSF,
|
FSFilter: conf.FSF,
|
||||||
encrypt: conf.Encrypt,
|
encrypt: conf.Encrypt,
|
||||||
disableIncrementalStepHolds: conf.DisableIncrementalStepHolds,
|
jobId: conf.JobID,
|
||||||
jobId: conf.JobID,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -211,37 +208,27 @@ func (s *Sender) Send(ctx context.Context, r *pdu.SendReq) (*pdu.SendRes, io.Rea
|
|||||||
return res, nil, nil
|
return res, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// create a replication cursor for `From` (usually an idempotent no-op because SendCompleted already created it before)
|
// create holds or bookmarks of `From` and `To` to guarantee one of the following:
|
||||||
var fromReplicationCursor Abstraction
|
// - that the replication step can always be resumed (`holds`),
|
||||||
if sendArgs.From != nil {
|
// - that the replication step can be interrupted and a future replication
|
||||||
// For all but the first replication, this should always be a no-op because SendCompleted already moved the cursor
|
// step with same or different `To` but same `From` is still possible (`bookmarks`)
|
||||||
fromReplicationCursor, err = CreateReplicationCursor(ctx, sendArgs.FS, *sendArgs.FromVersion, s.jobId) // no shadow
|
// - nothing (`none`)
|
||||||
if err == zfs.ErrBookmarkCloningNotSupported {
|
//
|
||||||
getLogger(ctx).Debug("not creating replication cursor from bookmark because ZFS does not support it")
|
// ...
|
||||||
// fallthrough
|
//
|
||||||
} else if err != nil {
|
// ... actually create the abstractions
|
||||||
return nil, nil, errors.Wrap(err, "cannot set replication cursor to `from` version before starting send")
|
replicationGuaranteeOptions, err := replicationGuaranteeOptionsFromPDU(r.GetReplicationConfig().Protection)
|
||||||
}
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
replicationGuaranteeStrategy := replicationGuaranteeOptions.Strategy(sendArgs.From != nil)
|
||||||
takeStepHolds := sendArgs.FromVersion == nil || !s.disableIncrementalStepHolds
|
liveAbs, err := replicationGuaranteeStrategy.SenderPreSend(ctx, s.jobId, &sendArgs)
|
||||||
|
if err != nil {
|
||||||
var fromHold, toHold Abstraction
|
return nil, nil, err
|
||||||
// make sure `From` doesn't go away in order to make this step resumable
|
|
||||||
if sendArgs.From != nil && takeStepHolds {
|
|
||||||
fromHold, err = HoldStep(ctx, sendArgs.FS, *sendArgs.FromVersion, s.jobId) // no shadow
|
|
||||||
if err == zfs.ErrBookmarkCloningNotSupported {
|
|
||||||
getLogger(ctx).Debug("not creating step bookmark because ZFS does not support it")
|
|
||||||
// fallthrough
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, nil, errors.Wrapf(err, "cannot hold `from` version %q before starting send", *sendArgs.FromVersion)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if takeStepHolds {
|
for _, a := range liveAbs {
|
||||||
// make sure `To` doesn't go away in order to make this step resumable
|
if a != nil {
|
||||||
toHold, err = HoldStep(ctx, sendArgs.FS, sendArgs.ToVersion, s.jobId)
|
abstractionsCacheSingleton.Put(a)
|
||||||
if err != nil {
|
|
||||||
return nil, nil, errors.Wrapf(err, "cannot hold `to` version %q before starting send", sendArgs.ToVersion)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -261,7 +248,6 @@ func (s *Sender) Send(ctx context.Context, r *pdu.SendReq) (*pdu.SendRes, io.Rea
|
|||||||
//
|
//
|
||||||
// Note further that a resuming send, due to the idempotent nature of func CreateReplicationCursor and HoldStep,
|
// Note further that a resuming send, due to the idempotent nature of func CreateReplicationCursor and HoldStep,
|
||||||
// will never lose its step holds because we just (idempotently re-)created them above, before attempting the cleanup.
|
// will never lose its step holds because we just (idempotently re-)created them above, before attempting the cleanup.
|
||||||
liveAbs := []Abstraction{fromHold, toHold, fromReplicationCursor}
|
|
||||||
func() {
|
func() {
|
||||||
ctx, endSpan := trace.WithSpan(ctx, "cleanup-stale-abstractions")
|
ctx, endSpan := trace.WithSpan(ctx, "cleanup-stale-abstractions")
|
||||||
defer endSpan()
|
defer endSpan()
|
||||||
@ -283,22 +269,29 @@ func (s *Sender) Send(ctx context.Context, r *pdu.SendReq) (*pdu.SendRes, io.Rea
|
|||||||
for _, staleVersion := range obsoleteAbs {
|
for _, staleVersion := range obsoleteAbs {
|
||||||
for _, mustLiveVersion := range mustLiveVersions {
|
for _, mustLiveVersion := range mustLiveVersions {
|
||||||
isSendArg := zfs.FilesystemVersionEqualIdentity(mustLiveVersion, staleVersion.GetFilesystemVersion())
|
isSendArg := zfs.FilesystemVersionEqualIdentity(mustLiveVersion, staleVersion.GetFilesystemVersion())
|
||||||
isStepHoldWeMightHaveCreatedWithCurrentValueOf_takeStepHolds :=
|
stepHoldBasedGuaranteeStrategy := false
|
||||||
takeStepHolds && staleVersion.GetType() == AbstractionStepHold
|
k := replicationGuaranteeStrategy.Kind()
|
||||||
if isSendArg && isStepHoldWeMightHaveCreatedWithCurrentValueOf_takeStepHolds {
|
switch k {
|
||||||
|
case ReplicationGuaranteeKindResumability:
|
||||||
|
stepHoldBasedGuaranteeStrategy = true
|
||||||
|
case ReplicationGuaranteeKindIncremental:
|
||||||
|
case ReplicationGuaranteeKindNone:
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("this is supposed to be an exhaustive match, got %v", k))
|
||||||
|
}
|
||||||
|
isSnapshot := mustLiveVersion.IsSnapshot()
|
||||||
|
if isSendArg && (!isSnapshot || stepHoldBasedGuaranteeStrategy) {
|
||||||
panic(fmt.Sprintf("impl error: %q would be destroyed because it is considered stale but it is part of of sendArgs=%s", mustLiveVersion.String(), pretty.Sprint(sendArgs)))
|
panic(fmt.Sprintf("impl error: %q would be destroyed because it is considered stale but it is part of of sendArgs=%s", mustLiveVersion.String(), pretty.Sprint(sendArgs)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sendAbstractionsCacheSingleton.TryBatchDestroy(ctx, s.jobId, sendArgs.FS, keep, check)
|
destroyTypes := AbstractionTypeSet{
|
||||||
}()
|
AbstractionStepHold: true,
|
||||||
// now add the newly created abstractions to the cleaned-up cache
|
AbstractionTentativeReplicationCursorBookmark: true,
|
||||||
for _, a := range liveAbs {
|
|
||||||
if a != nil {
|
|
||||||
sendAbstractionsCacheSingleton.Put(a)
|
|
||||||
}
|
}
|
||||||
}
|
abstractionsCacheSingleton.TryBatchDestroy(ctx, s.jobId, sendArgs.FS, destroyTypes, keep, check)
|
||||||
|
}()
|
||||||
|
|
||||||
sendStream, err := zfs.ZFSSend(ctx, sendArgs)
|
sendStream, err := zfs.ZFSSend(ctx, sendArgs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -332,36 +325,32 @@ func (p *Sender) SendCompleted(ctx context.Context, r *pdu.SendCompletedReq) (*p
|
|||||||
return nil, errors.Wrap(err, "validate `to` exists")
|
return nil, errors.Wrap(err, "validate `to` exists")
|
||||||
}
|
}
|
||||||
|
|
||||||
log := func(ctx context.Context) Logger {
|
replicationGuaranteeOptions, err := replicationGuaranteeOptionsFromPDU(orig.GetReplicationConfig().Protection)
|
||||||
log := getLogger(ctx).WithField("to_guid", to.Guid).
|
|
||||||
WithField("fs", fs).
|
|
||||||
WithField("to", to.RelName)
|
|
||||||
if from != nil {
|
|
||||||
log = log.WithField("from", from.RelName).WithField("from_guid", from.Guid)
|
|
||||||
}
|
|
||||||
return log
|
|
||||||
}
|
|
||||||
|
|
||||||
toReplicationCursor, err := CreateReplicationCursor(ctx, fs, to, p.jobId)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == zfs.ErrBookmarkCloningNotSupported {
|
return nil, err
|
||||||
log(ctx).Debug("not setting replication cursor, bookmark cloning not supported")
|
}
|
||||||
} else {
|
liveAbs, err := replicationGuaranteeOptions.Strategy(from != nil).SenderPostRecvConfirmed(ctx, p.jobId, fs, to)
|
||||||
msg := "cannot move replication cursor, keeping hold on `to` until successful"
|
if err != nil {
|
||||||
log(ctx).WithError(err).Error(msg)
|
return nil, err
|
||||||
err = errors.Wrap(err, msg)
|
}
|
||||||
// it is correct to not destroy from and to step holds if we can't move the cursor!
|
for _, a := range liveAbs {
|
||||||
return &pdu.SendCompletedRes{}, err
|
if a != nil {
|
||||||
|
abstractionsCacheSingleton.Put(a)
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
sendAbstractionsCacheSingleton.Put(toReplicationCursor)
|
|
||||||
log(ctx).WithField("to_cursor", toReplicationCursor.String()).Info("successfully created `to` replication cursor")
|
|
||||||
}
|
}
|
||||||
|
keep := func(a Abstraction) (keep bool) {
|
||||||
keep := func(a Abstraction) bool {
|
keep = false
|
||||||
return AbstractionEquals(a, toReplicationCursor)
|
for _, k := range liveAbs {
|
||||||
|
keep = keep || AbstractionEquals(a, k)
|
||||||
|
}
|
||||||
|
return keep
|
||||||
}
|
}
|
||||||
sendAbstractionsCacheSingleton.TryBatchDestroy(ctx, p.jobId, fs, keep, nil)
|
destroyTypes := AbstractionTypeSet{
|
||||||
|
AbstractionStepHold: true,
|
||||||
|
AbstractionTentativeReplicationCursorBookmark: true,
|
||||||
|
AbstractionReplicationCursorBookmarkV2: true,
|
||||||
|
}
|
||||||
|
abstractionsCacheSingleton.TryBatchDestroy(ctx, p.jobId, fs, destroyTypes, keep, nil)
|
||||||
|
|
||||||
return &pdu.SendCompletedRes{}, nil
|
return &pdu.SendCompletedRes{}, nil
|
||||||
|
|
||||||
@ -437,8 +426,6 @@ type ReceiverConfig struct {
|
|||||||
|
|
||||||
RootWithoutClientComponent *zfs.DatasetPath // TODO use
|
RootWithoutClientComponent *zfs.DatasetPath // TODO use
|
||||||
AppendClientIdentity bool
|
AppendClientIdentity bool
|
||||||
|
|
||||||
UpdateLastReceivedHold bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *ReceiverConfig) copyIn() {
|
func (c *ReceiverConfig) copyIn() {
|
||||||
@ -863,12 +850,38 @@ func (s *Receiver) Receive(ctx context.Context, req *pdu.ReceiveReq, receive io.
|
|||||||
return nil, errors.Wrap(err, msg)
|
return nil, errors.Wrap(err, msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.conf.UpdateLastReceivedHold {
|
replicationGuaranteeOptions, err := replicationGuaranteeOptionsFromPDU(req.GetReplicationConfig().Protection)
|
||||||
log.Debug("move last-received-hold")
|
if err != nil {
|
||||||
if err := MoveLastReceivedHold(ctx, lp.ToString(), toRecvd, s.conf.JobID); err != nil {
|
return nil, err
|
||||||
return nil, errors.Wrap(err, "cannot move last-received-hold")
|
}
|
||||||
|
replicationGuaranteeStrategy := replicationGuaranteeOptions.Strategy(ph.FSExists)
|
||||||
|
liveAbs, err := replicationGuaranteeStrategy.ReceiverPostRecv(ctx, s.conf.JobID, lp.ToString(), toRecvd)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for _, a := range liveAbs {
|
||||||
|
if a != nil {
|
||||||
|
abstractionsCacheSingleton.Put(a)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
keep := func(a Abstraction) (keep bool) {
|
||||||
|
keep = false
|
||||||
|
for _, k := range liveAbs {
|
||||||
|
keep = keep || AbstractionEquals(a, k)
|
||||||
|
}
|
||||||
|
return keep
|
||||||
|
}
|
||||||
|
check := func(obsoleteAbs []Abstraction) {
|
||||||
|
for _, abs := range obsoleteAbs {
|
||||||
|
if zfs.FilesystemVersionEqualIdentity(abs.GetFilesystemVersion(), toRecvd) {
|
||||||
|
panic(fmt.Sprintf("would destroy endpoint abstraction around the filesystem version we just received %s", abs))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
destroyTypes := AbstractionTypeSet{
|
||||||
|
AbstractionLastReceivedHold: true,
|
||||||
|
}
|
||||||
|
abstractionsCacheSingleton.TryBatchDestroy(ctx, s.conf.JobID, lp.ToString(), destroyTypes, keep, check)
|
||||||
|
|
||||||
return &pdu.ReceiveRes{}, nil
|
return &pdu.ReceiveRes{}, nil
|
||||||
}
|
}
|
||||||
|
@ -10,49 +10,49 @@ import (
|
|||||||
"github.com/zrepl/zrepl/util/chainlock"
|
"github.com/zrepl/zrepl/util/chainlock"
|
||||||
)
|
)
|
||||||
|
|
||||||
var sendAbstractionsCacheMetrics struct {
|
var abstractionsCacheMetrics struct {
|
||||||
count prometheus.Gauge
|
count prometheus.Gauge
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
sendAbstractionsCacheMetrics.count = prometheus.NewGauge(prometheus.GaugeOpts{
|
abstractionsCacheMetrics.count = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||||
Namespace: "zrepl",
|
Namespace: "zrepl",
|
||||||
Subsystem: "endpoint",
|
Subsystem: "endpoint",
|
||||||
Name: "send_abstractions_cache_entry_count",
|
Name: "abstractions_cache_entry_count",
|
||||||
Help: "number of send abstractions tracked in the sendAbstractionsCache data structure",
|
Help: "number of abstractions tracked in the abstractionsCache data structure",
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
var sendAbstractionsCacheSingleton = newSendAbstractionsCache()
|
var abstractionsCacheSingleton = newAbstractionsCache()
|
||||||
|
|
||||||
func SendAbstractionsCacheInvalidate(fs string) {
|
func AbstractionsCacheInvalidate(fs string) {
|
||||||
sendAbstractionsCacheSingleton.InvalidateFSCache(fs)
|
abstractionsCacheSingleton.InvalidateFSCache(fs)
|
||||||
}
|
}
|
||||||
|
|
||||||
type sendAbstractionsCacheDidLoadFSState int
|
type abstractionsCacheDidLoadFSState int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
sendAbstractionsCacheDidLoadFSStateNo sendAbstractionsCacheDidLoadFSState = iota // 0-value has meaning
|
abstractionsCacheDidLoadFSStateNo abstractionsCacheDidLoadFSState = iota // 0-value has meaning
|
||||||
sendAbstractionsCacheDidLoadFSStateInProgress
|
abstractionsCacheDidLoadFSStateInProgress
|
||||||
sendAbstractionsCacheDidLoadFSStateDone
|
abstractionsCacheDidLoadFSStateDone
|
||||||
)
|
)
|
||||||
|
|
||||||
type sendAbstractionsCache struct {
|
type abstractionsCache struct {
|
||||||
mtx chainlock.L
|
mtx chainlock.L
|
||||||
abstractions []Abstraction
|
abstractions []Abstraction
|
||||||
didLoadFS map[string]sendAbstractionsCacheDidLoadFSState
|
didLoadFS map[string]abstractionsCacheDidLoadFSState
|
||||||
didLoadFSChanged *sync.Cond
|
didLoadFSChanged *sync.Cond
|
||||||
}
|
}
|
||||||
|
|
||||||
func newSendAbstractionsCache() *sendAbstractionsCache {
|
func newAbstractionsCache() *abstractionsCache {
|
||||||
c := &sendAbstractionsCache{
|
c := &abstractionsCache{
|
||||||
didLoadFS: make(map[string]sendAbstractionsCacheDidLoadFSState),
|
didLoadFS: make(map[string]abstractionsCacheDidLoadFSState),
|
||||||
}
|
}
|
||||||
c.didLoadFSChanged = c.mtx.NewCond()
|
c.didLoadFSChanged = c.mtx.NewCond()
|
||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *sendAbstractionsCache) Put(a Abstraction) {
|
func (s *abstractionsCache) Put(a Abstraction) {
|
||||||
defer s.mtx.Lock().Unlock()
|
defer s.mtx.Lock().Unlock()
|
||||||
|
|
||||||
var zeroJobId JobID
|
var zeroJobId JobID
|
||||||
@ -63,10 +63,10 @@ func (s *sendAbstractionsCache) Put(a Abstraction) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
s.abstractions = append(s.abstractions, a)
|
s.abstractions = append(s.abstractions, a)
|
||||||
sendAbstractionsCacheMetrics.count.Set(float64(len(s.abstractions)))
|
abstractionsCacheMetrics.count.Set(float64(len(s.abstractions)))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *sendAbstractionsCache) InvalidateFSCache(fs string) {
|
func (s *abstractionsCache) InvalidateFSCache(fs string) {
|
||||||
// FIXME: O(n)
|
// FIXME: O(n)
|
||||||
newAbs := make([]Abstraction, 0, len(s.abstractions))
|
newAbs := make([]Abstraction, 0, len(s.abstractions))
|
||||||
for _, a := range s.abstractions {
|
for _, a := range s.abstractions {
|
||||||
@ -75,9 +75,9 @@ func (s *sendAbstractionsCache) InvalidateFSCache(fs string) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.abstractions = newAbs
|
s.abstractions = newAbs
|
||||||
sendAbstractionsCacheMetrics.count.Set(float64(len(s.abstractions)))
|
abstractionsCacheMetrics.count.Set(float64(len(s.abstractions)))
|
||||||
|
|
||||||
s.didLoadFS[fs] = sendAbstractionsCacheDidLoadFSStateNo
|
s.didLoadFS[fs] = abstractionsCacheDidLoadFSStateNo
|
||||||
s.didLoadFSChanged.Broadcast()
|
s.didLoadFSChanged.Broadcast()
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -86,7 +86,7 @@ func (s *sendAbstractionsCache) InvalidateFSCache(fs string) {
|
|||||||
// - only fetches on-disk abstractions once, but every time from the in-memory store
|
// - only fetches on-disk abstractions once, but every time from the in-memory store
|
||||||
//
|
//
|
||||||
// That means that for precise results, all abstractions created by the endpoint must be .Put into this cache.
|
// That means that for precise results, all abstractions created by the endpoint must be .Put into this cache.
|
||||||
func (s *sendAbstractionsCache) GetAndDeleteByJobIDAndFS(ctx context.Context, jobID JobID, fs string, keep func(a Abstraction) bool) (ret []Abstraction) {
|
func (s *abstractionsCache) GetAndDeleteByJobIDAndFS(ctx context.Context, jobID JobID, fs string, types AbstractionTypeSet, keep func(a Abstraction) bool) (ret []Abstraction) {
|
||||||
defer s.mtx.Lock().Unlock()
|
defer s.mtx.Lock().Unlock()
|
||||||
defer trace.WithSpanFromStackUpdateCtx(&ctx)()
|
defer trace.WithSpanFromStackUpdateCtx(&ctx)()
|
||||||
var zeroJobId JobID
|
var zeroJobId JobID
|
||||||
@ -97,50 +97,50 @@ func (s *sendAbstractionsCache) GetAndDeleteByJobIDAndFS(ctx context.Context, jo
|
|||||||
panic("must not pass zero-value fs")
|
panic("must not pass zero-value fs")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.tryLoadOnDiskSendAbstractions(ctx, fs)
|
s.tryLoadOnDiskAbstractions(ctx, fs)
|
||||||
|
|
||||||
// FIXME O(n)
|
// FIXME O(n)
|
||||||
var remaining []Abstraction
|
var remaining []Abstraction
|
||||||
for _, a := range s.abstractions {
|
for _, a := range s.abstractions {
|
||||||
aJobId := *a.GetJobID()
|
aJobId := *a.GetJobID()
|
||||||
aFS := a.GetFS()
|
aFS := a.GetFS()
|
||||||
if aJobId == jobID && aFS == fs && !keep(a) {
|
if aJobId == jobID && aFS == fs && types[a.GetType()] && !keep(a) {
|
||||||
ret = append(ret, a)
|
ret = append(ret, a)
|
||||||
} else {
|
} else {
|
||||||
remaining = append(remaining, a)
|
remaining = append(remaining, a)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
s.abstractions = remaining
|
s.abstractions = remaining
|
||||||
sendAbstractionsCacheMetrics.count.Set(float64(len(s.abstractions)))
|
abstractionsCacheMetrics.count.Set(float64(len(s.abstractions)))
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
// caller must hold s.mtx
|
// caller must hold s.mtx
|
||||||
func (s *sendAbstractionsCache) tryLoadOnDiskSendAbstractions(ctx context.Context, fs string) {
|
func (s *abstractionsCache) tryLoadOnDiskAbstractions(ctx context.Context, fs string) {
|
||||||
for s.didLoadFS[fs] != sendAbstractionsCacheDidLoadFSStateDone {
|
for s.didLoadFS[fs] != abstractionsCacheDidLoadFSStateDone {
|
||||||
if s.didLoadFS[fs] == sendAbstractionsCacheDidLoadFSStateInProgress {
|
if s.didLoadFS[fs] == abstractionsCacheDidLoadFSStateInProgress {
|
||||||
s.didLoadFSChanged.Wait()
|
s.didLoadFSChanged.Wait()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if s.didLoadFS[fs] != sendAbstractionsCacheDidLoadFSStateNo {
|
if s.didLoadFS[fs] != abstractionsCacheDidLoadFSStateNo {
|
||||||
panic(fmt.Sprintf("unreachable: %v", s.didLoadFS[fs]))
|
panic(fmt.Sprintf("unreachable: %v", s.didLoadFS[fs]))
|
||||||
}
|
}
|
||||||
|
|
||||||
s.didLoadFS[fs] = sendAbstractionsCacheDidLoadFSStateInProgress
|
s.didLoadFS[fs] = abstractionsCacheDidLoadFSStateInProgress
|
||||||
defer s.didLoadFSChanged.Broadcast()
|
defer s.didLoadFSChanged.Broadcast()
|
||||||
|
|
||||||
var onDiskAbs []Abstraction
|
var onDiskAbs []Abstraction
|
||||||
var err error
|
var err error
|
||||||
s.mtx.DropWhile(func() {
|
s.mtx.DropWhile(func() {
|
||||||
onDiskAbs, err = s.tryLoadOnDiskSendAbstractionsImpl(ctx, fs) // no shadow
|
onDiskAbs, err = s.tryLoadOnDiskAbstractionsImpl(ctx, fs) // no shadow
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.didLoadFS[fs] = sendAbstractionsCacheDidLoadFSStateNo
|
s.didLoadFS[fs] = abstractionsCacheDidLoadFSStateNo
|
||||||
getLogger(ctx).WithField("fs", fs).WithError(err).Error("cannot list send step abstractions for filesystem")
|
getLogger(ctx).WithField("fs", fs).WithError(err).Error("cannot list abstractions for filesystem")
|
||||||
} else {
|
} else {
|
||||||
s.didLoadFS[fs] = sendAbstractionsCacheDidLoadFSStateDone
|
s.didLoadFS[fs] = abstractionsCacheDidLoadFSStateDone
|
||||||
s.abstractions = append(s.abstractions, onDiskAbs...)
|
s.abstractions = append(s.abstractions, onDiskAbs...)
|
||||||
getLogger(ctx).WithField("fs", fs).WithField("abstractions", onDiskAbs).Debug("loaded step abstractions for filesystem")
|
getLogger(ctx).WithField("fs", fs).WithField("abstractions", onDiskAbs).Debug("loaded step abstractions for filesystem")
|
||||||
}
|
}
|
||||||
@ -149,7 +149,7 @@ func (s *sendAbstractionsCache) tryLoadOnDiskSendAbstractions(ctx context.Contex
|
|||||||
}
|
}
|
||||||
|
|
||||||
// caller should _not hold s.mtx
|
// caller should _not hold s.mtx
|
||||||
func (s *sendAbstractionsCache) tryLoadOnDiskSendAbstractionsImpl(ctx context.Context, fs string) ([]Abstraction, error) {
|
func (s *abstractionsCache) tryLoadOnDiskAbstractionsImpl(ctx context.Context, fs string) ([]Abstraction, error) {
|
||||||
defer trace.WithSpanFromStackUpdateCtx(&ctx)()
|
defer trace.WithSpanFromStackUpdateCtx(&ctx)()
|
||||||
|
|
||||||
q := ListZFSHoldsAndBookmarksQuery{
|
q := ListZFSHoldsAndBookmarksQuery{
|
||||||
@ -158,9 +158,10 @@ func (s *sendAbstractionsCache) tryLoadOnDiskSendAbstractionsImpl(ctx context.Co
|
|||||||
},
|
},
|
||||||
JobID: nil,
|
JobID: nil,
|
||||||
What: AbstractionTypeSet{
|
What: AbstractionTypeSet{
|
||||||
AbstractionStepHold: true,
|
AbstractionStepHold: true,
|
||||||
AbstractionStepBookmark: true,
|
AbstractionTentativeReplicationCursorBookmark: true,
|
||||||
AbstractionReplicationCursorBookmarkV2: true,
|
AbstractionReplicationCursorBookmarkV2: true,
|
||||||
|
AbstractionLastReceivedHold: true,
|
||||||
},
|
},
|
||||||
Concurrency: 1,
|
Concurrency: 1,
|
||||||
}
|
}
|
||||||
@ -175,12 +176,12 @@ func (s *sendAbstractionsCache) tryLoadOnDiskSendAbstractionsImpl(ctx context.Co
|
|||||||
return abs, nil
|
return abs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *sendAbstractionsCache) TryBatchDestroy(ctx context.Context, jobId JobID, fs string, keep func(a Abstraction) bool, check func(willDestroy []Abstraction)) {
|
func (s *abstractionsCache) TryBatchDestroy(ctx context.Context, jobId JobID, fs string, types AbstractionTypeSet, keep func(a Abstraction) bool, check func(willDestroy []Abstraction)) {
|
||||||
// no s.mtx, we only use the public interface in this function
|
// no s.mtx, we only use the public interface in this function
|
||||||
|
|
||||||
defer trace.WithSpanFromStackUpdateCtx(&ctx)()
|
defer trace.WithSpanFromStackUpdateCtx(&ctx)()
|
||||||
|
|
||||||
obsoleteAbs := s.GetAndDeleteByJobIDAndFS(ctx, jobId, fs, keep)
|
obsoleteAbs := s.GetAndDeleteByJobIDAndFS(ctx, jobId, fs, types, keep)
|
||||||
|
|
||||||
if check != nil {
|
if check != nil {
|
||||||
check(obsoleteAbs)
|
check(obsoleteAbs)
|
||||||
@ -193,11 +194,11 @@ func (s *sendAbstractionsCache) TryBatchDestroy(ctx context.Context, jobId JobID
|
|||||||
getLogger(ctx).
|
getLogger(ctx).
|
||||||
WithField("abstraction", res.Abstraction).
|
WithField("abstraction", res.Abstraction).
|
||||||
WithError(res.DestroyErr).
|
WithError(res.DestroyErr).
|
||||||
Error("cannot destroy stale send step abstraction")
|
Error("cannot destroy abstraction")
|
||||||
} else {
|
} else {
|
||||||
getLogger(ctx).
|
getLogger(ctx).
|
||||||
WithField("abstraction", res.Abstraction).
|
WithField("abstraction", res.Abstraction).
|
||||||
Info("destroyed stale send step abstraction")
|
Info("destroyed abstraction")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if hadErr {
|
if hadErr {
|
213
endpoint/endpoint_guarantees.go
Normal file
213
endpoint/endpoint_guarantees.go
Normal file
@ -0,0 +1,213 @@
|
|||||||
|
package endpoint
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/zrepl/zrepl/replication/logic/pdu"
|
||||||
|
"github.com/zrepl/zrepl/zfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ReplicationGuaranteeOptions struct {
|
||||||
|
Initial ReplicationGuaranteeKind
|
||||||
|
Incremental ReplicationGuaranteeKind
|
||||||
|
}
|
||||||
|
|
||||||
|
func replicationGuaranteeOptionsFromPDU(in *pdu.ReplicationConfigProtection) (o ReplicationGuaranteeOptions, _ error) {
|
||||||
|
if in == nil {
|
||||||
|
return o, errors.New("pdu.ReplicationConfigProtection must not be nil")
|
||||||
|
}
|
||||||
|
initial, err := replicationGuaranteeKindFromPDU(in.GetInitial())
|
||||||
|
if err != nil {
|
||||||
|
return o, errors.Wrap(err, "pdu.ReplicationConfigProtection: field Initial")
|
||||||
|
}
|
||||||
|
incremental, err := replicationGuaranteeKindFromPDU(in.GetIncremental())
|
||||||
|
if err != nil {
|
||||||
|
return o, errors.Wrap(err, "pdu.ReplicationConfigProtection: field Incremental")
|
||||||
|
}
|
||||||
|
o = ReplicationGuaranteeOptions{
|
||||||
|
Initial: initial,
|
||||||
|
Incremental: incremental,
|
||||||
|
}
|
||||||
|
return o, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func replicationGuaranteeKindFromPDU(in pdu.ReplicationGuaranteeKind) (k ReplicationGuaranteeKind, _ error) {
|
||||||
|
switch in {
|
||||||
|
case pdu.ReplicationGuaranteeKind_GuaranteeNothing:
|
||||||
|
return ReplicationGuaranteeKindNone, nil
|
||||||
|
case pdu.ReplicationGuaranteeKind_GuaranteeIncrementalReplication:
|
||||||
|
return ReplicationGuaranteeKindIncremental, nil
|
||||||
|
case pdu.ReplicationGuaranteeKind_GuaranteeResumability:
|
||||||
|
return ReplicationGuaranteeKindResumability, nil
|
||||||
|
|
||||||
|
case pdu.ReplicationGuaranteeKind_GuaranteeInvalid:
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
return k, errors.Errorf("%q", in.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o ReplicationGuaranteeOptions) Strategy(incremental bool) ReplicationGuaranteeStrategy {
|
||||||
|
g := o.Initial
|
||||||
|
if incremental {
|
||||||
|
g = o.Incremental
|
||||||
|
}
|
||||||
|
return ReplicationGuaranteeFromKind(g)
|
||||||
|
}
|
||||||
|
|
||||||
|
//go:generate enumer -type=ReplicationGuaranteeKind -json -transform=snake -trimprefix=ReplicationGuaranteeKind
|
||||||
|
type ReplicationGuaranteeKind int
|
||||||
|
|
||||||
|
const (
|
||||||
|
ReplicationGuaranteeKindResumability ReplicationGuaranteeKind = 1 << iota
|
||||||
|
ReplicationGuaranteeKindIncremental
|
||||||
|
ReplicationGuaranteeKindNone
|
||||||
|
)
|
||||||
|
|
||||||
|
type ReplicationGuaranteeStrategy interface {
|
||||||
|
Kind() ReplicationGuaranteeKind
|
||||||
|
SenderPreSend(ctx context.Context, jid JobID, sendArgs *zfs.ZFSSendArgsValidated) (keep []Abstraction, err error)
|
||||||
|
ReceiverPostRecv(ctx context.Context, jid JobID, fs string, toRecvd zfs.FilesystemVersion) (keep []Abstraction, err error)
|
||||||
|
SenderPostRecvConfirmed(ctx context.Context, jid JobID, fs string, to zfs.FilesystemVersion) (keep []Abstraction, err error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReplicationGuaranteeFromKind(k ReplicationGuaranteeKind) ReplicationGuaranteeStrategy {
|
||||||
|
switch k {
|
||||||
|
case ReplicationGuaranteeKindNone:
|
||||||
|
return ReplicationGuaranteeNone{}
|
||||||
|
case ReplicationGuaranteeKindIncremental:
|
||||||
|
return ReplicationGuaranteeIncremental{}
|
||||||
|
case ReplicationGuaranteeKindResumability:
|
||||||
|
return ReplicationGuaranteeResumability{}
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unreachable: %q %T", k, k))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type ReplicationGuaranteeNone struct{}
|
||||||
|
|
||||||
|
func (g ReplicationGuaranteeNone) Kind() ReplicationGuaranteeKind {
|
||||||
|
return ReplicationGuaranteeKindNone
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g ReplicationGuaranteeNone) SenderPreSend(ctx context.Context, jid JobID, sendArgs *zfs.ZFSSendArgsValidated) (keep []Abstraction, err error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g ReplicationGuaranteeNone) ReceiverPostRecv(ctx context.Context, jid JobID, fs string, toRecvd zfs.FilesystemVersion) (keep []Abstraction, err error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g ReplicationGuaranteeNone) SenderPostRecvConfirmed(ctx context.Context, jid JobID, fs string, to zfs.FilesystemVersion) (keep []Abstraction, err error) {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ReplicationGuaranteeIncremental struct{}
|
||||||
|
|
||||||
|
func (g ReplicationGuaranteeIncremental) Kind() ReplicationGuaranteeKind {
|
||||||
|
return ReplicationGuaranteeKindIncremental
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g ReplicationGuaranteeIncremental) SenderPreSend(ctx context.Context, jid JobID, sendArgs *zfs.ZFSSendArgsValidated) (keep []Abstraction, err error) {
|
||||||
|
if sendArgs.FromVersion != nil {
|
||||||
|
from, err := CreateTentativeReplicationCursor(ctx, sendArgs.FS, *sendArgs.FromVersion, jid)
|
||||||
|
if err != nil {
|
||||||
|
if err == zfs.ErrBookmarkCloningNotSupported {
|
||||||
|
getLogger(ctx).WithField("replication_guarantee", g).
|
||||||
|
WithField("bookmark", sendArgs.From.FullPath(sendArgs.FS)).
|
||||||
|
Info("bookmark cloning is not supported, speculating that `from` will not be destroyed until step is done")
|
||||||
|
} else {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
keep = append(keep, from)
|
||||||
|
}
|
||||||
|
to, err := CreateTentativeReplicationCursor(ctx, sendArgs.FS, sendArgs.ToVersion, jid)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
keep = append(keep, to)
|
||||||
|
|
||||||
|
return keep, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g ReplicationGuaranteeIncremental) ReceiverPostRecv(ctx context.Context, jid JobID, fs string, toRecvd zfs.FilesystemVersion) (keep []Abstraction, err error) {
|
||||||
|
return receiverPostRecvCommon(ctx, jid, fs, toRecvd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g ReplicationGuaranteeIncremental) SenderPostRecvConfirmed(ctx context.Context, jid JobID, fs string, to zfs.FilesystemVersion) (keep []Abstraction, err error) {
|
||||||
|
return senderPostRecvConfirmedCommon(ctx, jid, fs, to)
|
||||||
|
}
|
||||||
|
|
||||||
|
type ReplicationGuaranteeResumability struct{}
|
||||||
|
|
||||||
|
func (g ReplicationGuaranteeResumability) Kind() ReplicationGuaranteeKind {
|
||||||
|
return ReplicationGuaranteeKindResumability
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g ReplicationGuaranteeResumability) SenderPreSend(ctx context.Context, jid JobID, sendArgs *zfs.ZFSSendArgsValidated) (keep []Abstraction, err error) {
|
||||||
|
// try to hold the FromVersion
|
||||||
|
if sendArgs.FromVersion != nil {
|
||||||
|
if sendArgs.FromVersion.Type == zfs.Bookmark {
|
||||||
|
getLogger(ctx).WithField("replication_guarantee", g).WithField("fromVersion", sendArgs.FromVersion.FullPath(sendArgs.FS)).
|
||||||
|
Debug("cannot hold a bookmark, speculating that `from` will not be destroyed until step is done")
|
||||||
|
} else {
|
||||||
|
from, err := HoldStep(ctx, sendArgs.FS, *sendArgs.FromVersion, jid)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
keep = append(keep, from)
|
||||||
|
}
|
||||||
|
// fallthrough
|
||||||
|
}
|
||||||
|
|
||||||
|
to, err := HoldStep(ctx, sendArgs.FS, sendArgs.ToVersion, jid)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
keep = append(keep, to)
|
||||||
|
|
||||||
|
return keep, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g ReplicationGuaranteeResumability) ReceiverPostRecv(ctx context.Context, jid JobID, fs string, toRecvd zfs.FilesystemVersion) (keep []Abstraction, err error) {
|
||||||
|
return receiverPostRecvCommon(ctx, jid, fs, toRecvd)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g ReplicationGuaranteeResumability) SenderPostRecvConfirmed(ctx context.Context, jid JobID, fs string, to zfs.FilesystemVersion) (keep []Abstraction, err error) {
|
||||||
|
return senderPostRecvConfirmedCommon(ctx, jid, fs, to)
|
||||||
|
}
|
||||||
|
|
||||||
|
// helper function used by multiple strategies
|
||||||
|
func senderPostRecvConfirmedCommon(ctx context.Context, jid JobID, fs string, to zfs.FilesystemVersion) (keep []Abstraction, err error) {
|
||||||
|
|
||||||
|
log := getLogger(ctx).WithField("toVersion", to.FullPath(fs))
|
||||||
|
|
||||||
|
toReplicationCursor, err := CreateReplicationCursor(ctx, fs, to, jid)
|
||||||
|
if err != nil {
|
||||||
|
if err == zfs.ErrBookmarkCloningNotSupported {
|
||||||
|
log.Debug("not setting replication cursor, bookmark cloning not supported")
|
||||||
|
} else {
|
||||||
|
msg := "cannot move replication cursor, keeping hold on `to` until successful"
|
||||||
|
log.WithError(err).Error(msg)
|
||||||
|
err = errors.Wrap(err, msg)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.WithField("to_cursor", toReplicationCursor.String()).Info("successfully created `to` replication cursor")
|
||||||
|
}
|
||||||
|
|
||||||
|
return []Abstraction{toReplicationCursor}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// helper function used by multiple strategies
|
||||||
|
func receiverPostRecvCommon(ctx context.Context, jid JobID, fs string, toRecvd zfs.FilesystemVersion) (keep []Abstraction, err error) {
|
||||||
|
getLogger(ctx).Debug("create new last-received-hold")
|
||||||
|
lrh, err := CreateLastReceivedHold(ctx, fs, toRecvd, jid)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return []Abstraction{lrh}, nil
|
||||||
|
}
|
@ -3,5 +3,5 @@ package endpoint
|
|||||||
import "github.com/prometheus/client_golang/prometheus"
|
import "github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
func RegisterMetrics(r prometheus.Registerer) {
|
func RegisterMetrics(r prometheus.Registerer) {
|
||||||
r.MustRegister(sendAbstractionsCacheMetrics.count)
|
r.MustRegister(abstractionsCacheMetrics.count)
|
||||||
}
|
}
|
||||||
|
@ -23,19 +23,19 @@ type AbstractionType string
|
|||||||
// There are a lot of exhaustive switches on AbstractionType in the code base.
|
// There are a lot of exhaustive switches on AbstractionType in the code base.
|
||||||
// When adding a new abstraction type, make sure to search and update them!
|
// When adding a new abstraction type, make sure to search and update them!
|
||||||
const (
|
const (
|
||||||
AbstractionStepBookmark AbstractionType = "step-bookmark"
|
AbstractionStepHold AbstractionType = "step-hold"
|
||||||
AbstractionStepHold AbstractionType = "step-hold"
|
AbstractionLastReceivedHold AbstractionType = "last-received-hold"
|
||||||
AbstractionLastReceivedHold AbstractionType = "last-received-hold"
|
AbstractionTentativeReplicationCursorBookmark AbstractionType = "tentative-replication-cursor-bookmark-v2"
|
||||||
AbstractionReplicationCursorBookmarkV1 AbstractionType = "replication-cursor-bookmark-v1"
|
AbstractionReplicationCursorBookmarkV1 AbstractionType = "replication-cursor-bookmark-v1"
|
||||||
AbstractionReplicationCursorBookmarkV2 AbstractionType = "replication-cursor-bookmark-v2"
|
AbstractionReplicationCursorBookmarkV2 AbstractionType = "replication-cursor-bookmark-v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
var AbstractionTypesAll = map[AbstractionType]bool{
|
var AbstractionTypesAll = map[AbstractionType]bool{
|
||||||
AbstractionStepBookmark: true,
|
AbstractionStepHold: true,
|
||||||
AbstractionStepHold: true,
|
AbstractionLastReceivedHold: true,
|
||||||
AbstractionLastReceivedHold: true,
|
AbstractionTentativeReplicationCursorBookmark: true,
|
||||||
AbstractionReplicationCursorBookmarkV1: true,
|
AbstractionReplicationCursorBookmarkV1: true,
|
||||||
AbstractionReplicationCursorBookmarkV2: true,
|
AbstractionReplicationCursorBookmarkV2: true,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Implementation Note:
|
// Implementation Note:
|
||||||
@ -80,12 +80,12 @@ func AbstractionEquals(a, b Abstraction) bool {
|
|||||||
|
|
||||||
func (t AbstractionType) Validate() error {
|
func (t AbstractionType) Validate() error {
|
||||||
switch t {
|
switch t {
|
||||||
case AbstractionStepBookmark:
|
|
||||||
return nil
|
|
||||||
case AbstractionStepHold:
|
case AbstractionStepHold:
|
||||||
return nil
|
return nil
|
||||||
case AbstractionLastReceivedHold:
|
case AbstractionLastReceivedHold:
|
||||||
return nil
|
return nil
|
||||||
|
case AbstractionTentativeReplicationCursorBookmark:
|
||||||
|
return nil
|
||||||
case AbstractionReplicationCursorBookmarkV1:
|
case AbstractionReplicationCursorBookmarkV1:
|
||||||
return nil
|
return nil
|
||||||
case AbstractionReplicationCursorBookmarkV2:
|
case AbstractionReplicationCursorBookmarkV2:
|
||||||
@ -185,8 +185,8 @@ type BookmarkExtractor func(fs *zfs.DatasetPath, v zfs.FilesystemVersion) Abstra
|
|||||||
// returns nil if the abstraction type is not bookmark-based
|
// returns nil if the abstraction type is not bookmark-based
|
||||||
func (t AbstractionType) BookmarkExtractor() BookmarkExtractor {
|
func (t AbstractionType) BookmarkExtractor() BookmarkExtractor {
|
||||||
switch t {
|
switch t {
|
||||||
case AbstractionStepBookmark:
|
case AbstractionTentativeReplicationCursorBookmark:
|
||||||
return StepBookmarkExtractor
|
return TentativeReplicationCursorExtractor
|
||||||
case AbstractionReplicationCursorBookmarkV1:
|
case AbstractionReplicationCursorBookmarkV1:
|
||||||
return ReplicationCursorV1Extractor
|
return ReplicationCursorV1Extractor
|
||||||
case AbstractionReplicationCursorBookmarkV2:
|
case AbstractionReplicationCursorBookmarkV2:
|
||||||
@ -205,7 +205,7 @@ type HoldExtractor = func(fs *zfs.DatasetPath, v zfs.FilesystemVersion, tag stri
|
|||||||
// returns nil if the abstraction type is not hold-based
|
// returns nil if the abstraction type is not hold-based
|
||||||
func (t AbstractionType) HoldExtractor() HoldExtractor {
|
func (t AbstractionType) HoldExtractor() HoldExtractor {
|
||||||
switch t {
|
switch t {
|
||||||
case AbstractionStepBookmark:
|
case AbstractionTentativeReplicationCursorBookmark:
|
||||||
return nil
|
return nil
|
||||||
case AbstractionReplicationCursorBookmarkV1:
|
case AbstractionReplicationCursorBookmarkV1:
|
||||||
return nil
|
return nil
|
||||||
@ -220,6 +220,23 @@ func (t AbstractionType) HoldExtractor() HoldExtractor {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (t AbstractionType) BookmarkNamer() func(fs string, guid uint64, jobId JobID) (string, error) {
|
||||||
|
switch t {
|
||||||
|
case AbstractionTentativeReplicationCursorBookmark:
|
||||||
|
return TentativeReplicationCursorBookmarkName
|
||||||
|
case AbstractionReplicationCursorBookmarkV1:
|
||||||
|
panic("shouldn't be creating new ones")
|
||||||
|
case AbstractionReplicationCursorBookmarkV2:
|
||||||
|
return ReplicationCursorBookmarkName
|
||||||
|
case AbstractionStepHold:
|
||||||
|
return nil
|
||||||
|
case AbstractionLastReceivedHold:
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unimpl: %q", t))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
type ListZFSHoldsAndBookmarksQuery struct {
|
type ListZFSHoldsAndBookmarksQuery struct {
|
||||||
FS ListZFSHoldsAndBookmarksQueryFilesystemFilter
|
FS ListZFSHoldsAndBookmarksQueryFilesystemFilter
|
||||||
// What abstraction types should match (any contained in the set)
|
// What abstraction types should match (any contained in the set)
|
||||||
@ -697,11 +714,9 @@ func ListStale(ctx context.Context, q ListZFSHoldsAndBookmarksQuery) (*Staleness
|
|||||||
return nil, &ListStaleQueryError{errors.New("ListStale cannot have Until != nil set on query")}
|
return nil, &ListStaleQueryError{errors.New("ListStale cannot have Until != nil set on query")}
|
||||||
}
|
}
|
||||||
|
|
||||||
// if asking for step holds, must also as for step bookmarks (same kind of abstraction)
|
// if asking for step holds must also ask for replication cursor bookmarks (for firstNotStale)
|
||||||
// as well as replication cursor bookmarks (for firstNotStale)
|
|
||||||
ifAnyThenAll := AbstractionTypeSet{
|
ifAnyThenAll := AbstractionTypeSet{
|
||||||
AbstractionStepHold: true,
|
AbstractionStepHold: true,
|
||||||
AbstractionStepBookmark: true,
|
|
||||||
AbstractionReplicationCursorBookmarkV2: true,
|
AbstractionReplicationCursorBookmarkV2: true,
|
||||||
}
|
}
|
||||||
if q.What.ContainsAnyOf(ifAnyThenAll) && !q.What.ContainsAll(ifAnyThenAll) {
|
if q.What.ContainsAnyOf(ifAnyThenAll) && !q.What.ContainsAll(ifAnyThenAll) {
|
||||||
@ -730,7 +745,7 @@ type fsAjobAtype struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// For step holds and bookmarks, only those older than the most recent replication cursor
|
// For step holds and bookmarks, only those older than the most recent replication cursor
|
||||||
// of their (filesystem,job) is considered because younger ones cannot be stale by definition
|
// of their (filesystem,job) are considered because younger ones cannot be stale by definition
|
||||||
// (if we destroy them, we might actually lose the hold on the `To` for an ongoing incremental replication)
|
// (if we destroy them, we might actually lose the hold on the `To` for an ongoing incremental replication)
|
||||||
//
|
//
|
||||||
// For replication cursors and last-received-holds, only the most recent one is kept.
|
// For replication cursors and last-received-holds, only the most recent one is kept.
|
||||||
@ -772,8 +787,6 @@ func listStaleFiltering(abs []Abstraction, sinceBound *CreateTXGRangeBound) *Sta
|
|||||||
}
|
}
|
||||||
|
|
||||||
// stepFirstNotStaleCandidate.step
|
// stepFirstNotStaleCandidate.step
|
||||||
case AbstractionStepBookmark:
|
|
||||||
fallthrough
|
|
||||||
case AbstractionStepHold:
|
case AbstractionStepHold:
|
||||||
if c.step == nil || (*c.step).GetCreateTXG() < a.GetCreateTXG() {
|
if c.step == nil || (*c.step).GetCreateTXG() < a.GetCreateTXG() {
|
||||||
a := a
|
a := a
|
||||||
@ -797,7 +810,7 @@ func listStaleFiltering(abs []Abstraction, sinceBound *CreateTXGRangeBound) *Sta
|
|||||||
for k := range by {
|
for k := range by {
|
||||||
l := by[k]
|
l := by[k]
|
||||||
|
|
||||||
if k.Type == AbstractionStepHold || k.Type == AbstractionStepBookmark {
|
if k.Type == AbstractionStepHold {
|
||||||
// all older than the most recent cursor are stale, others are always live
|
// all older than the most recent cursor are stale, others are always live
|
||||||
|
|
||||||
// if we don't have a replication cursor yet, use untilBound = nil
|
// if we don't have a replication cursor yet, use untilBound = nil
|
||||||
|
91
endpoint/endpoint_zfs_abstraction_last_received_hold.go
Normal file
91
endpoint/endpoint_zfs_abstraction_last_received_hold.go
Normal file
@ -0,0 +1,91 @@
|
|||||||
|
package endpoint
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/zrepl/zrepl/zfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
LastReceivedHoldTagNamePrefix = "zrepl_last_received_J_"
|
||||||
|
)
|
||||||
|
|
||||||
|
var lastReceivedHoldTagRE = regexp.MustCompile("^zrepl_last_received_J_(.+)$")
|
||||||
|
|
||||||
|
var _ HoldExtractor = LastReceivedHoldExtractor
|
||||||
|
|
||||||
|
func LastReceivedHoldExtractor(fs *zfs.DatasetPath, v zfs.FilesystemVersion, holdTag string) Abstraction {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
if v.Type != zfs.Snapshot {
|
||||||
|
panic("impl error")
|
||||||
|
}
|
||||||
|
|
||||||
|
jobID, err := ParseLastReceivedHoldTag(holdTag)
|
||||||
|
if err == nil {
|
||||||
|
return &holdBasedAbstraction{
|
||||||
|
Type: AbstractionLastReceivedHold,
|
||||||
|
FS: fs.ToString(),
|
||||||
|
FilesystemVersion: v,
|
||||||
|
Tag: holdTag,
|
||||||
|
JobID: jobID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// err != nil always means that the bookmark is not a step bookmark
|
||||||
|
func ParseLastReceivedHoldTag(tag string) (JobID, error) {
|
||||||
|
match := lastReceivedHoldTagRE.FindStringSubmatch(tag)
|
||||||
|
if match == nil {
|
||||||
|
return JobID{}, errors.Errorf("parse last-received-hold tag: does not match regex %s", lastReceivedHoldTagRE.String())
|
||||||
|
}
|
||||||
|
jobId, err := MakeJobID(match[1])
|
||||||
|
if err != nil {
|
||||||
|
return JobID{}, errors.Wrap(err, "parse last-received-hold tag: invalid job id field")
|
||||||
|
}
|
||||||
|
return jobId, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func LastReceivedHoldTag(jobID JobID) (string, error) {
|
||||||
|
return lastReceivedHoldImpl(jobID.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func lastReceivedHoldImpl(jobid string) (string, error) {
|
||||||
|
tag := fmt.Sprintf("%s%s", LastReceivedHoldTagNamePrefix, jobid)
|
||||||
|
if err := zfs.ValidHoldTag(tag); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return tag, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func CreateLastReceivedHold(ctx context.Context, fs string, to zfs.FilesystemVersion, jobID JobID) (Abstraction, error) {
|
||||||
|
|
||||||
|
if !to.IsSnapshot() {
|
||||||
|
return nil, errors.Errorf("last-received-hold: target must be a snapshot: %s", to.FullPath(fs))
|
||||||
|
}
|
||||||
|
|
||||||
|
tag, err := LastReceivedHoldTag(jobID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "last-received-hold: hold tag")
|
||||||
|
}
|
||||||
|
|
||||||
|
// we never want to be without a hold
|
||||||
|
// => hold new one before releasing old hold
|
||||||
|
|
||||||
|
err = zfs.ZFSHold(ctx, fs, to, tag)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "last-received-hold: hold newly received")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &holdBasedAbstraction{
|
||||||
|
Type: AbstractionLastReceivedHold,
|
||||||
|
FS: fs,
|
||||||
|
FilesystemVersion: to,
|
||||||
|
JobID: jobID,
|
||||||
|
Tag: tag,
|
||||||
|
}, nil
|
||||||
|
}
|
@ -4,12 +4,9 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"regexp"
|
|
||||||
"sort"
|
"sort"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
"github.com/zrepl/zrepl/util/errorarray"
|
|
||||||
"github.com/zrepl/zrepl/zfs"
|
"github.com/zrepl/zrepl/zfs"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -53,6 +50,28 @@ func ParseReplicationCursorBookmarkName(fullname string) (uint64, JobID, error)
|
|||||||
return guid, jobID, err
|
return guid, jobID, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const tentativeReplicationCursorBookmarkNamePrefix = "zrepl_CURSORTENTATIVE_"
|
||||||
|
|
||||||
|
// v must be validated by caller
|
||||||
|
func TentativeReplicationCursorBookmarkName(fs string, guid uint64, id JobID) (string, error) {
|
||||||
|
return tentativeReplicationCursorBookmarkNameImpl(fs, guid, id.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func tentativeReplicationCursorBookmarkNameImpl(fs string, guid uint64, jobid string) (string, error) {
|
||||||
|
return makeJobAndGuidBookmarkName(tentativeReplicationCursorBookmarkNamePrefix, fs, guid, jobid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// name is the full bookmark name, including dataset path
|
||||||
|
//
|
||||||
|
// err != nil always means that the bookmark is not a step bookmark
|
||||||
|
func ParseTentativeReplicationCursorBookmarkName(fullname string) (guid uint64, jobID JobID, err error) {
|
||||||
|
guid, jobID, err = parseJobAndGuidBookmarkName(fullname, tentativeReplicationCursorBookmarkNamePrefix)
|
||||||
|
if err != nil {
|
||||||
|
err = errors.Wrap(err, "parse step bookmark name") // no shadow!
|
||||||
|
}
|
||||||
|
return guid, jobID, err
|
||||||
|
}
|
||||||
|
|
||||||
// may return nil for both values, indicating there is no cursor
|
// may return nil for both values, indicating there is no cursor
|
||||||
func GetMostRecentReplicationCursorOfJob(ctx context.Context, fs string, jobID JobID) (*zfs.FilesystemVersion, error) {
|
func GetMostRecentReplicationCursorOfJob(ctx context.Context, fs string, jobID JobID) (*zfs.FilesystemVersion, error) {
|
||||||
fsp, err := zfs.NewDatasetPath(fs)
|
fsp, err := zfs.NewDatasetPath(fs)
|
||||||
@ -122,23 +141,23 @@ func GetReplicationCursors(ctx context.Context, dp *zfs.DatasetPath, jobID JobID
|
|||||||
//
|
//
|
||||||
// returns ErrBookmarkCloningNotSupported if version is a bookmark and bookmarking bookmarks is not supported by ZFS
|
// returns ErrBookmarkCloningNotSupported if version is a bookmark and bookmarking bookmarks is not supported by ZFS
|
||||||
func CreateReplicationCursor(ctx context.Context, fs string, target zfs.FilesystemVersion, jobID JobID) (a Abstraction, err error) {
|
func CreateReplicationCursor(ctx context.Context, fs string, target zfs.FilesystemVersion, jobID JobID) (a Abstraction, err error) {
|
||||||
|
return createBookmarkAbstraction(ctx, AbstractionReplicationCursorBookmarkV2, fs, target, jobID)
|
||||||
|
}
|
||||||
|
|
||||||
bookmarkname, err := ReplicationCursorBookmarkName(fs, target.GetGuid(), jobID)
|
func CreateTentativeReplicationCursor(ctx context.Context, fs string, target zfs.FilesystemVersion, jobID JobID) (a Abstraction, err error) {
|
||||||
|
return createBookmarkAbstraction(ctx, AbstractionTentativeReplicationCursorBookmark, fs, target, jobID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func createBookmarkAbstraction(ctx context.Context, abstractionType AbstractionType, fs string, target zfs.FilesystemVersion, jobID JobID) (a Abstraction, err error) {
|
||||||
|
|
||||||
|
bookmarkNamer := abstractionType.BookmarkNamer()
|
||||||
|
if bookmarkNamer == nil {
|
||||||
|
panic(abstractionType)
|
||||||
|
}
|
||||||
|
|
||||||
|
bookmarkname, err := bookmarkNamer(fs, target.GetGuid(), jobID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "determine replication cursor name")
|
return nil, errors.Wrapf(err, "determine %s name", abstractionType)
|
||||||
}
|
|
||||||
|
|
||||||
if target.IsBookmark() && target.GetName() == bookmarkname {
|
|
||||||
return &bookmarkBasedAbstraction{
|
|
||||||
Type: AbstractionReplicationCursorBookmarkV2,
|
|
||||||
FS: fs,
|
|
||||||
FilesystemVersion: target,
|
|
||||||
JobID: jobID,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if !target.IsSnapshot() {
|
|
||||||
return nil, zfs.ErrBookmarkCloningNotSupported
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// idempotently create bookmark (guid is encoded in it)
|
// idempotently create bookmark (guid is encoded in it)
|
||||||
@ -152,125 +171,13 @@ func CreateReplicationCursor(ctx context.Context, fs string, target zfs.Filesyst
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &bookmarkBasedAbstraction{
|
return &bookmarkBasedAbstraction{
|
||||||
Type: AbstractionReplicationCursorBookmarkV2,
|
Type: abstractionType,
|
||||||
FS: fs,
|
FS: fs,
|
||||||
FilesystemVersion: cursorBookmark,
|
FilesystemVersion: cursorBookmark,
|
||||||
JobID: jobID,
|
JobID: jobID,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
|
||||||
ReplicationCursorBookmarkNamePrefix = "zrepl_last_received_J_"
|
|
||||||
)
|
|
||||||
|
|
||||||
var lastReceivedHoldTagRE = regexp.MustCompile("^zrepl_last_received_J_(.+)$")
|
|
||||||
|
|
||||||
// err != nil always means that the bookmark is not a step bookmark
|
|
||||||
func ParseLastReceivedHoldTag(tag string) (JobID, error) {
|
|
||||||
match := lastReceivedHoldTagRE.FindStringSubmatch(tag)
|
|
||||||
if match == nil {
|
|
||||||
return JobID{}, errors.Errorf("parse last-received-hold tag: does not match regex %s", lastReceivedHoldTagRE.String())
|
|
||||||
}
|
|
||||||
jobId, err := MakeJobID(match[1])
|
|
||||||
if err != nil {
|
|
||||||
return JobID{}, errors.Wrap(err, "parse last-received-hold tag: invalid job id field")
|
|
||||||
}
|
|
||||||
return jobId, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func LastReceivedHoldTag(jobID JobID) (string, error) {
|
|
||||||
return lastReceivedHoldImpl(jobID.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func lastReceivedHoldImpl(jobid string) (string, error) {
|
|
||||||
tag := fmt.Sprintf("%s%s", ReplicationCursorBookmarkNamePrefix, jobid)
|
|
||||||
if err := zfs.ValidHoldTag(tag); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return tag, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func CreateLastReceivedHold(ctx context.Context, fs string, to zfs.FilesystemVersion, jobID JobID) (Abstraction, error) {
|
|
||||||
|
|
||||||
if !to.IsSnapshot() {
|
|
||||||
return nil, errors.Errorf("last-received-hold: target must be a snapshot: %s", to.FullPath(fs))
|
|
||||||
}
|
|
||||||
|
|
||||||
tag, err := LastReceivedHoldTag(jobID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "last-received-hold: hold tag")
|
|
||||||
}
|
|
||||||
|
|
||||||
// we never want to be without a hold
|
|
||||||
// => hold new one before releasing old hold
|
|
||||||
|
|
||||||
err = zfs.ZFSHold(ctx, fs, to, tag)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "last-received-hold: hold newly received")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &holdBasedAbstraction{
|
|
||||||
Type: AbstractionLastReceivedHold,
|
|
||||||
FS: fs,
|
|
||||||
FilesystemVersion: to,
|
|
||||||
JobID: jobID,
|
|
||||||
Tag: tag,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func MoveLastReceivedHold(ctx context.Context, fs string, to zfs.FilesystemVersion, jobID JobID) error {
|
|
||||||
|
|
||||||
_, err := CreateLastReceivedHold(ctx, fs, to, jobID)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
q := ListZFSHoldsAndBookmarksQuery{
|
|
||||||
What: AbstractionTypeSet{
|
|
||||||
AbstractionLastReceivedHold: true,
|
|
||||||
},
|
|
||||||
FS: ListZFSHoldsAndBookmarksQueryFilesystemFilter{
|
|
||||||
FS: &fs,
|
|
||||||
},
|
|
||||||
JobID: &jobID,
|
|
||||||
CreateTXG: CreateTXGRange{
|
|
||||||
Since: nil,
|
|
||||||
Until: &CreateTXGRangeBound{
|
|
||||||
CreateTXG: to.GetCreateTXG(),
|
|
||||||
Inclusive: &zfs.NilBool{B: false},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Concurrency: 1,
|
|
||||||
}
|
|
||||||
abs, absErrs, err := ListAbstractions(ctx, q)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "last-received-hold: list")
|
|
||||||
}
|
|
||||||
if len(absErrs) > 0 {
|
|
||||||
return errors.Wrap(ListAbstractionsErrors(absErrs), "last-received-hold: list")
|
|
||||||
}
|
|
||||||
|
|
||||||
getLogger(ctx).WithField("last-received-holds", fmt.Sprintf("%s", abs)).Debug("releasing last-received-holds")
|
|
||||||
|
|
||||||
var errs []error
|
|
||||||
for res := range BatchDestroy(ctx, abs) {
|
|
||||||
log := getLogger(ctx).
|
|
||||||
WithField("last-received-hold", res.Abstraction)
|
|
||||||
if res.DestroyErr != nil {
|
|
||||||
errs = append(errs, res.DestroyErr)
|
|
||||||
log.WithError(err).
|
|
||||||
Error("cannot release last-received-hold")
|
|
||||||
} else {
|
|
||||||
log.Info("released last-received-hold")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(errs) == 0 {
|
|
||||||
return nil
|
|
||||||
} else {
|
|
||||||
return errorarray.Wrap(errs, "last-received-hold: release")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func ReplicationCursorV2Extractor(fs *zfs.DatasetPath, v zfs.FilesystemVersion) (_ Abstraction) {
|
func ReplicationCursorV2Extractor(fs *zfs.DatasetPath, v zfs.FilesystemVersion) (_ Abstraction) {
|
||||||
if v.Type != zfs.Bookmark {
|
if v.Type != zfs.Bookmark {
|
||||||
panic("impl error")
|
panic("impl error")
|
||||||
@ -308,24 +215,28 @@ func ReplicationCursorV1Extractor(fs *zfs.DatasetPath, v zfs.FilesystemVersion)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ HoldExtractor = LastReceivedHoldExtractor
|
var _ BookmarkExtractor = TentativeReplicationCursorExtractor
|
||||||
|
|
||||||
func LastReceivedHoldExtractor(fs *zfs.DatasetPath, v zfs.FilesystemVersion, holdTag string) Abstraction {
|
func TentativeReplicationCursorExtractor(fs *zfs.DatasetPath, v zfs.FilesystemVersion) (_ Abstraction) {
|
||||||
var err error
|
if v.Type != zfs.Bookmark {
|
||||||
|
|
||||||
if v.Type != zfs.Snapshot {
|
|
||||||
panic("impl error")
|
panic("impl error")
|
||||||
}
|
}
|
||||||
|
|
||||||
jobID, err := ParseLastReceivedHoldTag(holdTag)
|
fullname := v.ToAbsPath(fs)
|
||||||
|
|
||||||
|
guid, jobid, err := ParseTentativeReplicationCursorBookmarkName(fullname)
|
||||||
|
if guid != v.Guid {
|
||||||
|
// TODO log this possibly tinkered-with bookmark
|
||||||
|
return nil
|
||||||
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return &holdBasedAbstraction{
|
bm := &bookmarkBasedAbstraction{
|
||||||
Type: AbstractionLastReceivedHold,
|
Type: AbstractionTentativeReplicationCursorBookmark,
|
||||||
FS: fs.ToString(),
|
FS: fs.ToString(),
|
||||||
FilesystemVersion: v,
|
FilesystemVersion: v,
|
||||||
Tag: holdTag,
|
JobID: jobid,
|
||||||
JobID: jobID,
|
|
||||||
}
|
}
|
||||||
|
return bm
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
@ -1,159 +0,0 @@
|
|||||||
package endpoint
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
"regexp"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
|
|
||||||
"github.com/zrepl/zrepl/zfs"
|
|
||||||
)
|
|
||||||
|
|
||||||
var stepHoldTagRE = regexp.MustCompile("^zrepl_STEP_J_(.+)")
|
|
||||||
|
|
||||||
func StepHoldTag(jobid JobID) (string, error) {
|
|
||||||
return stepHoldTagImpl(jobid.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func stepHoldTagImpl(jobid string) (string, error) {
|
|
||||||
t := fmt.Sprintf("zrepl_STEP_J_%s", jobid)
|
|
||||||
if err := zfs.ValidHoldTag(t); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return t, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// err != nil always means that the bookmark is not a step bookmark
|
|
||||||
func ParseStepHoldTag(tag string) (JobID, error) {
|
|
||||||
match := stepHoldTagRE.FindStringSubmatch(tag)
|
|
||||||
if match == nil {
|
|
||||||
return JobID{}, fmt.Errorf("parse hold tag: match regex %q", stepHoldTagRE)
|
|
||||||
}
|
|
||||||
jobID, err := MakeJobID(match[1])
|
|
||||||
if err != nil {
|
|
||||||
return JobID{}, errors.Wrap(err, "parse hold tag: invalid job id field")
|
|
||||||
}
|
|
||||||
return jobID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const stepBookmarkNamePrefix = "zrepl_STEP"
|
|
||||||
|
|
||||||
// v must be validated by caller
|
|
||||||
func StepBookmarkName(fs string, guid uint64, id JobID) (string, error) {
|
|
||||||
return stepBookmarkNameImpl(fs, guid, id.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func stepBookmarkNameImpl(fs string, guid uint64, jobid string) (string, error) {
|
|
||||||
return makeJobAndGuidBookmarkName(stepBookmarkNamePrefix, fs, guid, jobid)
|
|
||||||
}
|
|
||||||
|
|
||||||
// name is the full bookmark name, including dataset path
|
|
||||||
//
|
|
||||||
// err != nil always means that the bookmark is not a step bookmark
|
|
||||||
func ParseStepBookmarkName(fullname string) (guid uint64, jobID JobID, err error) {
|
|
||||||
guid, jobID, err = parseJobAndGuidBookmarkName(fullname, stepBookmarkNamePrefix)
|
|
||||||
if err != nil {
|
|
||||||
err = errors.Wrap(err, "parse step bookmark name") // no shadow!
|
|
||||||
}
|
|
||||||
return guid, jobID, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// idempotently hold / step-bookmark `version`
|
|
||||||
//
|
|
||||||
// returns ErrBookmarkCloningNotSupported if version is a bookmark and bookmarking bookmarks is not supported by ZFS
|
|
||||||
func HoldStep(ctx context.Context, fs string, v zfs.FilesystemVersion, jobID JobID) (Abstraction, error) {
|
|
||||||
if v.IsSnapshot() {
|
|
||||||
|
|
||||||
tag, err := StepHoldTag(jobID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "step hold tag")
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := zfs.ZFSHold(ctx, fs, v, tag); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "step hold: zfs")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &holdBasedAbstraction{
|
|
||||||
Type: AbstractionStepHold,
|
|
||||||
FS: fs,
|
|
||||||
Tag: tag,
|
|
||||||
JobID: jobID,
|
|
||||||
FilesystemVersion: v,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if !v.IsBookmark() {
|
|
||||||
panic(fmt.Sprintf("version must bei either snapshot or bookmark, got %#v", v))
|
|
||||||
}
|
|
||||||
|
|
||||||
bmname, err := StepBookmarkName(fs, v.Guid, jobID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "create step bookmark: determine bookmark name")
|
|
||||||
}
|
|
||||||
// idempotently create bookmark
|
|
||||||
stepBookmark, err := zfs.ZFSBookmark(ctx, fs, v, bmname)
|
|
||||||
if err != nil {
|
|
||||||
if err == zfs.ErrBookmarkCloningNotSupported {
|
|
||||||
// TODO we could actually try to find a local snapshot that has the requested GUID
|
|
||||||
// however, the replication algorithm prefers snapshots anyways, so this quest
|
|
||||||
// is most likely not going to be successful. Also, there's the possibility that
|
|
||||||
// the caller might want to filter what snapshots are eligibile, and this would
|
|
||||||
// complicate things even further.
|
|
||||||
return nil, err // TODO go1.13 use wrapping
|
|
||||||
}
|
|
||||||
return nil, errors.Wrap(err, "create step bookmark: zfs")
|
|
||||||
}
|
|
||||||
return &bookmarkBasedAbstraction{
|
|
||||||
Type: AbstractionStepBookmark,
|
|
||||||
FS: fs,
|
|
||||||
FilesystemVersion: stepBookmark,
|
|
||||||
JobID: jobID,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ BookmarkExtractor = StepBookmarkExtractor
|
|
||||||
|
|
||||||
func StepBookmarkExtractor(fs *zfs.DatasetPath, v zfs.FilesystemVersion) (_ Abstraction) {
|
|
||||||
if v.Type != zfs.Bookmark {
|
|
||||||
panic("impl error")
|
|
||||||
}
|
|
||||||
|
|
||||||
fullname := v.ToAbsPath(fs)
|
|
||||||
|
|
||||||
guid, jobid, err := ParseStepBookmarkName(fullname)
|
|
||||||
if guid != v.Guid {
|
|
||||||
// TODO log this possibly tinkered-with bookmark
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
bm := &bookmarkBasedAbstraction{
|
|
||||||
Type: AbstractionStepBookmark,
|
|
||||||
FS: fs.ToString(),
|
|
||||||
FilesystemVersion: v,
|
|
||||||
JobID: jobid,
|
|
||||||
}
|
|
||||||
return bm
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ HoldExtractor = StepHoldExtractor
|
|
||||||
|
|
||||||
func StepHoldExtractor(fs *zfs.DatasetPath, v zfs.FilesystemVersion, holdTag string) Abstraction {
|
|
||||||
if v.Type != zfs.Snapshot {
|
|
||||||
panic("impl error")
|
|
||||||
}
|
|
||||||
|
|
||||||
jobID, err := ParseStepHoldTag(holdTag)
|
|
||||||
if err == nil {
|
|
||||||
return &holdBasedAbstraction{
|
|
||||||
Type: AbstractionStepHold,
|
|
||||||
FS: fs.ToString(),
|
|
||||||
Tag: holdTag,
|
|
||||||
FilesystemVersion: v,
|
|
||||||
JobID: jobID,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
83
endpoint/endpoint_zfs_abstraction_step_hold.go
Normal file
83
endpoint/endpoint_zfs_abstraction_step_hold.go
Normal file
@ -0,0 +1,83 @@
|
|||||||
|
package endpoint
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"regexp"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
|
"github.com/zrepl/zrepl/zfs"
|
||||||
|
)
|
||||||
|
|
||||||
|
var stepHoldTagRE = regexp.MustCompile("^zrepl_STEP_J_(.+)")
|
||||||
|
|
||||||
|
func StepHoldTag(jobid JobID) (string, error) {
|
||||||
|
return stepHoldTagImpl(jobid.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func stepHoldTagImpl(jobid string) (string, error) {
|
||||||
|
t := fmt.Sprintf("zrepl_STEP_J_%s", jobid)
|
||||||
|
if err := zfs.ValidHoldTag(t); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// err != nil always means that the bookmark is not a step bookmark
|
||||||
|
func ParseStepHoldTag(tag string) (JobID, error) {
|
||||||
|
match := stepHoldTagRE.FindStringSubmatch(tag)
|
||||||
|
if match == nil {
|
||||||
|
return JobID{}, fmt.Errorf("parse hold tag: match regex %q", stepHoldTagRE)
|
||||||
|
}
|
||||||
|
jobID, err := MakeJobID(match[1])
|
||||||
|
if err != nil {
|
||||||
|
return JobID{}, errors.Wrap(err, "parse hold tag: invalid job id field")
|
||||||
|
}
|
||||||
|
return jobID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// idempotently hold `version`
|
||||||
|
func HoldStep(ctx context.Context, fs string, v zfs.FilesystemVersion, jobID JobID) (Abstraction, error) {
|
||||||
|
if !v.IsSnapshot() {
|
||||||
|
panic(fmt.Sprintf("version must be a snapshot got %#v", v))
|
||||||
|
}
|
||||||
|
|
||||||
|
tag, err := StepHoldTag(jobID)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "step hold tag")
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := zfs.ZFSHold(ctx, fs, v, tag); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "step hold: zfs")
|
||||||
|
}
|
||||||
|
|
||||||
|
return &holdBasedAbstraction{
|
||||||
|
Type: AbstractionStepHold,
|
||||||
|
FS: fs,
|
||||||
|
Tag: tag,
|
||||||
|
JobID: jobID,
|
||||||
|
FilesystemVersion: v,
|
||||||
|
}, nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ HoldExtractor = StepHoldExtractor
|
||||||
|
|
||||||
|
func StepHoldExtractor(fs *zfs.DatasetPath, v zfs.FilesystemVersion, holdTag string) Abstraction {
|
||||||
|
if v.Type != zfs.Snapshot {
|
||||||
|
panic("impl error")
|
||||||
|
}
|
||||||
|
|
||||||
|
jobID, err := ParseStepHoldTag(holdTag)
|
||||||
|
if err == nil {
|
||||||
|
return &holdBasedAbstraction{
|
||||||
|
Type: AbstractionStepHold,
|
||||||
|
FS: fs.ToString(),
|
||||||
|
Tag: holdTag,
|
||||||
|
FilesystemVersion: v,
|
||||||
|
JobID: jobID,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
@ -24,9 +24,9 @@ func MakeJobID(s string) (JobID, error) {
|
|||||||
return JobID{}, errors.Wrap(err, "must be usable as a dataset path component")
|
return JobID{}, errors.Wrap(err, "must be usable as a dataset path component")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := stepBookmarkNameImpl("pool/ds", 0xface601d, s); err != nil {
|
if _, err := tentativeReplicationCursorBookmarkNameImpl("pool/ds", 0xface601d, s); err != nil {
|
||||||
// note that this might still fail due to total maximum name length, but we can't enforce that
|
// note that this might still fail due to total maximum name length, but we can't enforce that
|
||||||
return JobID{}, errors.Wrap(err, "must be usable for a step bookmark")
|
return JobID{}, errors.Wrap(err, "must be usable for a tentative replication cursor bookmark")
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := stepHoldTagImpl(s); err != nil {
|
if _, err := stepHoldTagImpl(s); err != nil {
|
||||||
|
80
endpoint/replicationguaranteekind_enumer.go
Normal file
80
endpoint/replicationguaranteekind_enumer.go
Normal file
@ -0,0 +1,80 @@
|
|||||||
|
// Code generated by "enumer -type=ReplicationGuaranteeKind -json -transform=snake -trimprefix=ReplicationGuaranteeKind"; DO NOT EDIT.
|
||||||
|
|
||||||
|
//
|
||||||
|
package endpoint
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
_ReplicationGuaranteeKindName_0 = "resumabilityincremental"
|
||||||
|
_ReplicationGuaranteeKindName_1 = "none"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ReplicationGuaranteeKindIndex_0 = [...]uint8{0, 12, 23}
|
||||||
|
_ReplicationGuaranteeKindIndex_1 = [...]uint8{0, 4}
|
||||||
|
)
|
||||||
|
|
||||||
|
func (i ReplicationGuaranteeKind) String() string {
|
||||||
|
switch {
|
||||||
|
case 1 <= i && i <= 2:
|
||||||
|
i -= 1
|
||||||
|
return _ReplicationGuaranteeKindName_0[_ReplicationGuaranteeKindIndex_0[i]:_ReplicationGuaranteeKindIndex_0[i+1]]
|
||||||
|
case i == 4:
|
||||||
|
return _ReplicationGuaranteeKindName_1
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("ReplicationGuaranteeKind(%d)", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ReplicationGuaranteeKindValues = []ReplicationGuaranteeKind{1, 2, 4}
|
||||||
|
|
||||||
|
var _ReplicationGuaranteeKindNameToValueMap = map[string]ReplicationGuaranteeKind{
|
||||||
|
_ReplicationGuaranteeKindName_0[0:12]: 1,
|
||||||
|
_ReplicationGuaranteeKindName_0[12:23]: 2,
|
||||||
|
_ReplicationGuaranteeKindName_1[0:4]: 4,
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplicationGuaranteeKindString retrieves an enum value from the enum constants string name.
|
||||||
|
// Throws an error if the param is not part of the enum.
|
||||||
|
func ReplicationGuaranteeKindString(s string) (ReplicationGuaranteeKind, error) {
|
||||||
|
if val, ok := _ReplicationGuaranteeKindNameToValueMap[s]; ok {
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
return 0, fmt.Errorf("%s does not belong to ReplicationGuaranteeKind values", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplicationGuaranteeKindValues returns all values of the enum
|
||||||
|
func ReplicationGuaranteeKindValues() []ReplicationGuaranteeKind {
|
||||||
|
return _ReplicationGuaranteeKindValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsAReplicationGuaranteeKind returns "true" if the value is listed in the enum definition. "false" otherwise
|
||||||
|
func (i ReplicationGuaranteeKind) IsAReplicationGuaranteeKind() bool {
|
||||||
|
for _, v := range _ReplicationGuaranteeKindValues {
|
||||||
|
if i == v {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON implements the json.Marshaler interface for ReplicationGuaranteeKind
|
||||||
|
func (i ReplicationGuaranteeKind) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(i.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON implements the json.Unmarshaler interface for ReplicationGuaranteeKind
|
||||||
|
func (i *ReplicationGuaranteeKind) UnmarshalJSON(data []byte) error {
|
||||||
|
var s string
|
||||||
|
if err := json.Unmarshal(data, &s); err != nil {
|
||||||
|
return fmt.Errorf("ReplicationGuaranteeKind should be a string, got %s", data)
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
*i, err = ReplicationGuaranteeKindString(s)
|
||||||
|
return err
|
||||||
|
}
|
@ -20,8 +20,11 @@ var Cases = []Case{BatchDestroy,
|
|||||||
ReplicationIncrementalCleansUpStaleAbstractionsWithoutCacheOnSecondReplication,
|
ReplicationIncrementalCleansUpStaleAbstractionsWithoutCacheOnSecondReplication,
|
||||||
ReplicationIncrementalDestroysStepHoldsIffIncrementalStepHoldsAreDisabledButStepHoldsExist,
|
ReplicationIncrementalDestroysStepHoldsIffIncrementalStepHoldsAreDisabledButStepHoldsExist,
|
||||||
ReplicationIncrementalIsPossibleIfCommonSnapshotIsDestroyed,
|
ReplicationIncrementalIsPossibleIfCommonSnapshotIsDestroyed,
|
||||||
ReplicationIsResumableFullSend__DisableIncrementalStepHolds_False,
|
ReplicationIsResumableFullSend__both_GuaranteeResumability,
|
||||||
ReplicationIsResumableFullSend__DisableIncrementalStepHolds_True,
|
ReplicationIsResumableFullSend__initial_GuaranteeIncrementalReplication_incremental_GuaranteeIncrementalReplication,
|
||||||
|
ReplicationIsResumableFullSend__initial_GuaranteeResumability_incremental_GuaranteeIncrementalReplication,
|
||||||
|
ReplicationStepCompletedLostBehavior__GuaranteeIncrementalReplication,
|
||||||
|
ReplicationStepCompletedLostBehavior__GuaranteeResumability,
|
||||||
ResumableRecvAndTokenHandling,
|
ResumableRecvAndTokenHandling,
|
||||||
ResumeTokenParsing,
|
ResumeTokenParsing,
|
||||||
SendArgsValidationEncryptedSendOfUnencryptedDatasetForbidden,
|
SendArgsValidationEncryptedSendOfUnencryptedDatasetForbidden,
|
||||||
|
@ -27,11 +27,11 @@ import (
|
|||||||
// of a new sender and receiver instance and one blocking invocation
|
// of a new sender and receiver instance and one blocking invocation
|
||||||
// of the replication engine without encryption
|
// of the replication engine without encryption
|
||||||
type replicationInvocation struct {
|
type replicationInvocation struct {
|
||||||
sjid, rjid endpoint.JobID
|
sjid, rjid endpoint.JobID
|
||||||
sfs string
|
sfs string
|
||||||
rfsRoot string
|
rfsRoot string
|
||||||
interceptSender func(e *endpoint.Sender) logic.Sender
|
interceptSender func(e *endpoint.Sender) logic.Sender
|
||||||
disableIncrementalStepHolds bool
|
guarantee pdu.ReplicationConfigProtection
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i replicationInvocation) Do(ctx *platformtest.Context) *report.Report {
|
func (i replicationInvocation) Do(ctx *platformtest.Context) *report.Report {
|
||||||
@ -44,19 +44,20 @@ func (i replicationInvocation) Do(ctx *platformtest.Context) *report.Report {
|
|||||||
err := sfilter.Add(i.sfs, "ok")
|
err := sfilter.Add(i.sfs, "ok")
|
||||||
require.NoError(ctx, err)
|
require.NoError(ctx, err)
|
||||||
sender := i.interceptSender(endpoint.NewSender(endpoint.SenderConfig{
|
sender := i.interceptSender(endpoint.NewSender(endpoint.SenderConfig{
|
||||||
FSF: sfilter.AsFilter(),
|
FSF: sfilter.AsFilter(),
|
||||||
Encrypt: &zfs.NilBool{B: false},
|
Encrypt: &zfs.NilBool{B: false},
|
||||||
DisableIncrementalStepHolds: i.disableIncrementalStepHolds,
|
JobID: i.sjid,
|
||||||
JobID: i.sjid,
|
|
||||||
}))
|
}))
|
||||||
receiver := endpoint.NewReceiver(endpoint.ReceiverConfig{
|
receiver := endpoint.NewReceiver(endpoint.ReceiverConfig{
|
||||||
JobID: i.rjid,
|
JobID: i.rjid,
|
||||||
AppendClientIdentity: false,
|
AppendClientIdentity: false,
|
||||||
RootWithoutClientComponent: mustDatasetPath(i.rfsRoot),
|
RootWithoutClientComponent: mustDatasetPath(i.rfsRoot),
|
||||||
UpdateLastReceivedHold: true,
|
|
||||||
})
|
})
|
||||||
plannerPolicy := logic.PlannerPolicy{
|
plannerPolicy := logic.PlannerPolicy{
|
||||||
EncryptedSend: logic.TriFromBool(false),
|
EncryptedSend: logic.TriFromBool(false),
|
||||||
|
ReplicationConfig: pdu.ReplicationConfig{
|
||||||
|
Protection: &i.guarantee,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
report, wait := replication.Do(
|
report, wait := replication.Do(
|
||||||
@ -89,11 +90,11 @@ func ReplicationIncrementalIsPossibleIfCommonSnapshotIsDestroyed(ctx *platformte
|
|||||||
snap1 := fsversion(ctx, sfs, "@1")
|
snap1 := fsversion(ctx, sfs, "@1")
|
||||||
|
|
||||||
rep := replicationInvocation{
|
rep := replicationInvocation{
|
||||||
sjid: sjid,
|
sjid: sjid,
|
||||||
rjid: rjid,
|
rjid: rjid,
|
||||||
sfs: sfs,
|
sfs: sfs,
|
||||||
rfsRoot: rfsRoot,
|
rfsRoot: rfsRoot,
|
||||||
disableIncrementalStepHolds: false,
|
guarantee: *pdu.ReplicationConfigProtectionWithKind(pdu.ReplicationGuaranteeKind_GuaranteeResumability),
|
||||||
}
|
}
|
||||||
rfs := rep.ReceiveSideFilesystem()
|
rfs := rep.ReceiveSideFilesystem()
|
||||||
|
|
||||||
@ -153,11 +154,11 @@ func implReplicationIncrementalCleansUpStaleAbstractions(ctx *platformtest.Conte
|
|||||||
rfsRoot := ctx.RootDataset + "/receiver"
|
rfsRoot := ctx.RootDataset + "/receiver"
|
||||||
|
|
||||||
rep := replicationInvocation{
|
rep := replicationInvocation{
|
||||||
sjid: sjid,
|
sjid: sjid,
|
||||||
rjid: rjid,
|
rjid: rjid,
|
||||||
sfs: sfs,
|
sfs: sfs,
|
||||||
rfsRoot: rfsRoot,
|
rfsRoot: rfsRoot,
|
||||||
disableIncrementalStepHolds: false,
|
guarantee: *pdu.ReplicationConfigProtectionWithKind(pdu.ReplicationGuaranteeKind_GuaranteeResumability),
|
||||||
}
|
}
|
||||||
rfs := rep.ReceiveSideFilesystem()
|
rfs := rep.ReceiveSideFilesystem()
|
||||||
|
|
||||||
@ -207,7 +208,7 @@ func implReplicationIncrementalCleansUpStaleAbstractions(ctx *platformtest.Conte
|
|||||||
snap5 := fsversion(ctx, sfs, "@5")
|
snap5 := fsversion(ctx, sfs, "@5")
|
||||||
|
|
||||||
if invalidateCacheBeforeSecondReplication {
|
if invalidateCacheBeforeSecondReplication {
|
||||||
endpoint.SendAbstractionsCacheInvalidate(sfs)
|
endpoint.AbstractionsCacheInvalidate(sfs)
|
||||||
}
|
}
|
||||||
|
|
||||||
// do another replication
|
// do another replication
|
||||||
@ -327,15 +328,59 @@ func (s *PartialSender) Send(ctx context.Context, r *pdu.SendReq) (r1 *pdu.SendR
|
|||||||
return r1, r2, r3
|
return r1, r2, r3
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReplicationIsResumableFullSend__DisableIncrementalStepHolds_False(ctx *platformtest.Context) {
|
func ReplicationIsResumableFullSend__both_GuaranteeResumability(ctx *platformtest.Context) {
|
||||||
implReplicationIsResumableFullSend(ctx, false)
|
|
||||||
|
setup := replicationIsResumableFullSendSetup{
|
||||||
|
protection: pdu.ReplicationConfigProtection{
|
||||||
|
Initial: pdu.ReplicationGuaranteeKind_GuaranteeResumability,
|
||||||
|
Incremental: pdu.ReplicationGuaranteeKind_GuaranteeResumability,
|
||||||
|
},
|
||||||
|
expectDatasetIsBusyErrorWhenDestroySnapshotWhilePartiallyReplicated: true,
|
||||||
|
expectAllThreeSnapshotsToThreeBePresentAfterLoop: true,
|
||||||
|
expectNoSnapshotsOnReceiverAfterLoop: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
implReplicationIsResumableFullSend(ctx, setup)
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReplicationIsResumableFullSend__DisableIncrementalStepHolds_True(ctx *platformtest.Context) {
|
func ReplicationIsResumableFullSend__initial_GuaranteeResumability_incremental_GuaranteeIncrementalReplication(ctx *platformtest.Context) {
|
||||||
implReplicationIsResumableFullSend(ctx, true)
|
|
||||||
|
setup := replicationIsResumableFullSendSetup{
|
||||||
|
protection: pdu.ReplicationConfigProtection{
|
||||||
|
Initial: pdu.ReplicationGuaranteeKind_GuaranteeResumability,
|
||||||
|
Incremental: pdu.ReplicationGuaranteeKind_GuaranteeIncrementalReplication,
|
||||||
|
},
|
||||||
|
expectDatasetIsBusyErrorWhenDestroySnapshotWhilePartiallyReplicated: true,
|
||||||
|
expectAllThreeSnapshotsToThreeBePresentAfterLoop: true,
|
||||||
|
expectNoSnapshotsOnReceiverAfterLoop: false,
|
||||||
|
}
|
||||||
|
|
||||||
|
implReplicationIsResumableFullSend(ctx, setup)
|
||||||
}
|
}
|
||||||
|
|
||||||
func implReplicationIsResumableFullSend(ctx *platformtest.Context, disableIncrementalStepHolds bool) {
|
func ReplicationIsResumableFullSend__initial_GuaranteeIncrementalReplication_incremental_GuaranteeIncrementalReplication(ctx *platformtest.Context) {
|
||||||
|
|
||||||
|
setup := replicationIsResumableFullSendSetup{
|
||||||
|
protection: pdu.ReplicationConfigProtection{
|
||||||
|
Initial: pdu.ReplicationGuaranteeKind_GuaranteeIncrementalReplication,
|
||||||
|
Incremental: pdu.ReplicationGuaranteeKind_GuaranteeIncrementalReplication,
|
||||||
|
},
|
||||||
|
expectDatasetIsBusyErrorWhenDestroySnapshotWhilePartiallyReplicated: false,
|
||||||
|
expectAllThreeSnapshotsToThreeBePresentAfterLoop: false,
|
||||||
|
expectNoSnapshotsOnReceiverAfterLoop: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
implReplicationIsResumableFullSend(ctx, setup)
|
||||||
|
}
|
||||||
|
|
||||||
|
type replicationIsResumableFullSendSetup struct {
|
||||||
|
protection pdu.ReplicationConfigProtection
|
||||||
|
expectDatasetIsBusyErrorWhenDestroySnapshotWhilePartiallyReplicated bool
|
||||||
|
expectAllThreeSnapshotsToThreeBePresentAfterLoop bool
|
||||||
|
expectNoSnapshotsOnReceiverAfterLoop bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func implReplicationIsResumableFullSend(ctx *platformtest.Context, setup replicationIsResumableFullSendSetup) {
|
||||||
|
|
||||||
platformtest.Run(ctx, platformtest.PanicErr, ctx.RootDataset, `
|
platformtest.Run(ctx, platformtest.PanicErr, ctx.RootDataset, `
|
||||||
CREATEROOT
|
CREATEROOT
|
||||||
@ -366,8 +411,9 @@ func implReplicationIsResumableFullSend(ctx *platformtest.Context, disableIncrem
|
|||||||
interceptSender: func(e *endpoint.Sender) logic.Sender {
|
interceptSender: func(e *endpoint.Sender) logic.Sender {
|
||||||
return &PartialSender{Sender: e, failAfterByteCount: 1 << 20}
|
return &PartialSender{Sender: e, failAfterByteCount: 1 << 20}
|
||||||
},
|
},
|
||||||
disableIncrementalStepHolds: disableIncrementalStepHolds,
|
guarantee: setup.protection,
|
||||||
}
|
}
|
||||||
|
|
||||||
rfs := rep.ReceiveSideFilesystem()
|
rfs := rep.ReceiveSideFilesystem()
|
||||||
|
|
||||||
for i := 2; i < 10; i++ {
|
for i := 2; i < 10; i++ {
|
||||||
@ -381,8 +427,11 @@ func implReplicationIsResumableFullSend(ctx *platformtest.Context, disableIncrem
|
|||||||
// and we wrote dummy data 1<<22 bytes, thus at least
|
// and we wrote dummy data 1<<22 bytes, thus at least
|
||||||
// for the first 4 times this should not be possible
|
// for the first 4 times this should not be possible
|
||||||
// due to step holds
|
// due to step holds
|
||||||
require.Error(ctx, err)
|
if setup.expectDatasetIsBusyErrorWhenDestroySnapshotWhilePartiallyReplicated {
|
||||||
require.Contains(ctx, err.Error(), "dataset is busy")
|
ctx.Logf("i=%v", i)
|
||||||
|
require.Error(ctx, err)
|
||||||
|
require.Contains(ctx, err.Error(), "dataset is busy")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// and create some additional snapshots that could
|
// and create some additional snapshots that could
|
||||||
@ -401,11 +450,19 @@ func implReplicationIsResumableFullSend(ctx *platformtest.Context, disableIncrem
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// make sure all the filesystem versions we created
|
if setup.expectAllThreeSnapshotsToThreeBePresentAfterLoop {
|
||||||
// were replicated by the replication loop
|
// make sure all the filesystem versions we created
|
||||||
_ = fsversion(ctx, rfs, "@1")
|
// were replicated by the replication loop
|
||||||
_ = fsversion(ctx, rfs, "@2")
|
_ = fsversion(ctx, rfs, "@1")
|
||||||
_ = fsversion(ctx, rfs, "@3")
|
_ = fsversion(ctx, rfs, "@2")
|
||||||
|
_ = fsversion(ctx, rfs, "@3")
|
||||||
|
}
|
||||||
|
|
||||||
|
if setup.expectNoSnapshotsOnReceiverAfterLoop {
|
||||||
|
versions, err := zfs.ZFSListFilesystemVersions(ctx, mustDatasetPath(rfs), zfs.ListFilesystemVersionsOptions{})
|
||||||
|
require.NoError(ctx, err)
|
||||||
|
require.Empty(ctx, versions)
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -428,11 +485,11 @@ func ReplicationIncrementalDestroysStepHoldsIffIncrementalStepHoldsAreDisabledBu
|
|||||||
{
|
{
|
||||||
mustSnapshot(ctx, sfs+"@1")
|
mustSnapshot(ctx, sfs+"@1")
|
||||||
rep := replicationInvocation{
|
rep := replicationInvocation{
|
||||||
sjid: sjid,
|
sjid: sjid,
|
||||||
rjid: rjid,
|
rjid: rjid,
|
||||||
sfs: sfs,
|
sfs: sfs,
|
||||||
rfsRoot: rfsRoot,
|
rfsRoot: rfsRoot,
|
||||||
disableIncrementalStepHolds: false,
|
guarantee: *pdu.ReplicationConfigProtectionWithKind(pdu.ReplicationGuaranteeKind_GuaranteeResumability),
|
||||||
}
|
}
|
||||||
rfs := rep.ReceiveSideFilesystem()
|
rfs := rep.ReceiveSideFilesystem()
|
||||||
report := rep.Do(ctx)
|
report := rep.Do(ctx)
|
||||||
@ -455,11 +512,11 @@ func ReplicationIncrementalDestroysStepHoldsIffIncrementalStepHoldsAreDisabledBu
|
|||||||
// to effect a step-holds situation
|
// to effect a step-holds situation
|
||||||
{
|
{
|
||||||
rep := replicationInvocation{
|
rep := replicationInvocation{
|
||||||
sjid: sjid,
|
sjid: sjid,
|
||||||
rjid: rjid,
|
rjid: rjid,
|
||||||
sfs: sfs,
|
sfs: sfs,
|
||||||
rfsRoot: rfsRoot,
|
rfsRoot: rfsRoot,
|
||||||
disableIncrementalStepHolds: false, // !
|
guarantee: *pdu.ReplicationConfigProtectionWithKind(pdu.ReplicationGuaranteeKind_GuaranteeResumability), // !
|
||||||
interceptSender: func(e *endpoint.Sender) logic.Sender {
|
interceptSender: func(e *endpoint.Sender) logic.Sender {
|
||||||
return &PartialSender{Sender: e, failAfterByteCount: 1 << 20}
|
return &PartialSender{Sender: e, failAfterByteCount: 1 << 20}
|
||||||
},
|
},
|
||||||
@ -495,17 +552,17 @@ func ReplicationIncrementalDestroysStepHoldsIffIncrementalStepHoldsAreDisabledBu
|
|||||||
// end of test setup
|
// end of test setup
|
||||||
//
|
//
|
||||||
|
|
||||||
// retry replication with incremental step holds disabled
|
// retry replication with incremental step holds disabled (set to bookmarks-only in this case)
|
||||||
// - replication should not fail due to holds-related stuff
|
// - replication should not fail due to holds-related stuff
|
||||||
// - replication should fail intermittently due to partial sender being fully read
|
// - replication should fail intermittently due to partial sender being fully read
|
||||||
// - the partial sender is 1/4th the length of the stream, thus expect
|
// - the partial sender is 1/4th the length of the stream, thus expect
|
||||||
// successful replication after 5 more attempts
|
// successful replication after 5 more attempts
|
||||||
rep := replicationInvocation{
|
rep := replicationInvocation{
|
||||||
sjid: sjid,
|
sjid: sjid,
|
||||||
rjid: rjid,
|
rjid: rjid,
|
||||||
sfs: sfs,
|
sfs: sfs,
|
||||||
rfsRoot: rfsRoot,
|
rfsRoot: rfsRoot,
|
||||||
disableIncrementalStepHolds: true, // !
|
guarantee: *pdu.ReplicationConfigProtectionWithKind(pdu.ReplicationGuaranteeKind_GuaranteeIncrementalReplication), // !
|
||||||
interceptSender: func(e *endpoint.Sender) logic.Sender {
|
interceptSender: func(e *endpoint.Sender) logic.Sender {
|
||||||
return &PartialSender{Sender: e, failAfterByteCount: 1 << 20}
|
return &PartialSender{Sender: e, failAfterByteCount: 1 << 20}
|
||||||
},
|
},
|
||||||
@ -551,3 +608,148 @@ func ReplicationIncrementalDestroysStepHoldsIffIncrementalStepHoldsAreDisabledBu
|
|||||||
require.Len(ctx, abs, 1)
|
require.Len(ctx, abs, 1)
|
||||||
require.True(ctx, zfs.FilesystemVersionEqualIdentity(abs[0].GetFilesystemVersion(), snap2sfs))
|
require.True(ctx, zfs.FilesystemVersionEqualIdentity(abs[0].GetFilesystemVersion(), snap2sfs))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ReplicationStepCompletedLostBehavior__GuaranteeResumability(ctx *platformtest.Context) {
|
||||||
|
scenario := replicationStepCompletedLostBehavior_impl(ctx, pdu.ReplicationGuaranteeKind_GuaranteeResumability)
|
||||||
|
|
||||||
|
require.Error(ctx, scenario.deleteSfs1Err, "protected by holds")
|
||||||
|
require.Contains(ctx, scenario.deleteSfs1Err.Error(), "dataset is busy")
|
||||||
|
|
||||||
|
require.Error(ctx, scenario.deleteSfs2Err, "protected by holds")
|
||||||
|
require.Contains(ctx, scenario.deleteSfs2Err.Error(), "dataset is busy")
|
||||||
|
|
||||||
|
require.Nil(ctx, scenario.finalReport.Error())
|
||||||
|
_ = fsversion(ctx, scenario.rfs, "@3") // @3 ade it to the other side
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReplicationStepCompletedLostBehavior__GuaranteeIncrementalReplication(ctx *platformtest.Context) {
|
||||||
|
scenario := replicationStepCompletedLostBehavior_impl(ctx, pdu.ReplicationGuaranteeKind_GuaranteeIncrementalReplication)
|
||||||
|
|
||||||
|
require.NoError(ctx, scenario.deleteSfs1Err, "not protected by holds")
|
||||||
|
require.NoError(ctx, scenario.deleteSfs2Err, "not protected by holds")
|
||||||
|
|
||||||
|
// step bookmarks should protect against loss of StepCompleted message
|
||||||
|
require.Nil(ctx, scenario.finalReport.Error())
|
||||||
|
_ = fsversion(ctx, scenario.rfs, "@3") // @3 ade it to the other side
|
||||||
|
}
|
||||||
|
|
||||||
|
type FailSendCompletedSender struct {
|
||||||
|
*endpoint.Sender
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ logic.Sender = (*FailSendCompletedSender)(nil)
|
||||||
|
|
||||||
|
func (p *FailSendCompletedSender) SendCompleted(ctx context.Context, r *pdu.SendCompletedReq) (*pdu.SendCompletedRes, error) {
|
||||||
|
return nil, fmt.Errorf("[mock] SendCompleted not delivered to actual endpoint")
|
||||||
|
}
|
||||||
|
|
||||||
|
type replicationStepCompletedLost_scenario struct {
|
||||||
|
rfs string
|
||||||
|
deleteSfs1Err, deleteSfs2Err error
|
||||||
|
finalReport *report.FilesystemReport
|
||||||
|
}
|
||||||
|
|
||||||
|
func replicationStepCompletedLostBehavior_impl(ctx *platformtest.Context, guaranteeKind pdu.ReplicationGuaranteeKind) *replicationStepCompletedLost_scenario {
|
||||||
|
|
||||||
|
platformtest.Run(ctx, platformtest.PanicErr, ctx.RootDataset, `
|
||||||
|
CREATEROOT
|
||||||
|
+ "sender"
|
||||||
|
+ "receiver"
|
||||||
|
R zfs create -p "${ROOTDS}/receiver/${ROOTDS}"
|
||||||
|
`)
|
||||||
|
|
||||||
|
sjid := endpoint.MustMakeJobID("sender-job")
|
||||||
|
rjid := endpoint.MustMakeJobID("receiver-job")
|
||||||
|
|
||||||
|
sfs := ctx.RootDataset + "/sender"
|
||||||
|
rfsRoot := ctx.RootDataset + "/receiver"
|
||||||
|
|
||||||
|
// fully replicate snapshots @1
|
||||||
|
{
|
||||||
|
mustSnapshot(ctx, sfs+"@1")
|
||||||
|
rep := replicationInvocation{
|
||||||
|
sjid: sjid,
|
||||||
|
rjid: rjid,
|
||||||
|
sfs: sfs,
|
||||||
|
rfsRoot: rfsRoot,
|
||||||
|
guarantee: *pdu.ReplicationConfigProtectionWithKind(guaranteeKind),
|
||||||
|
}
|
||||||
|
rfs := rep.ReceiveSideFilesystem()
|
||||||
|
report := rep.Do(ctx)
|
||||||
|
ctx.Logf("\n%s", pretty.Sprint(report))
|
||||||
|
// assert this worked (not the main subject of the test)
|
||||||
|
_ = fsversion(ctx, rfs, "@1")
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a second snapshot @2
|
||||||
|
mustSnapshot(ctx, sfs+"@2")
|
||||||
|
|
||||||
|
// fake loss of stepcompleted message
|
||||||
|
rep := replicationInvocation{
|
||||||
|
sjid: sjid,
|
||||||
|
rjid: rjid,
|
||||||
|
sfs: sfs,
|
||||||
|
rfsRoot: rfsRoot,
|
||||||
|
guarantee: *pdu.ReplicationConfigProtectionWithKind(guaranteeKind),
|
||||||
|
interceptSender: func(e *endpoint.Sender) logic.Sender {
|
||||||
|
return &FailSendCompletedSender{e}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
rfs := rep.ReceiveSideFilesystem()
|
||||||
|
report := rep.Do(ctx)
|
||||||
|
ctx.Logf("\n%s", pretty.Sprint(report))
|
||||||
|
|
||||||
|
// assert the replication worked
|
||||||
|
_ = fsversion(ctx, rfs, "@2")
|
||||||
|
// and that we hold it using a last-received-hold
|
||||||
|
abs, absErrs, err := endpoint.ListAbstractions(ctx, endpoint.ListZFSHoldsAndBookmarksQuery{
|
||||||
|
FS: endpoint.ListZFSHoldsAndBookmarksQueryFilesystemFilter{
|
||||||
|
FS: &rfs,
|
||||||
|
},
|
||||||
|
Concurrency: 1,
|
||||||
|
JobID: &rjid,
|
||||||
|
What: endpoint.AbstractionTypeSet{endpoint.AbstractionLastReceivedHold: true},
|
||||||
|
})
|
||||||
|
require.NoError(ctx, err)
|
||||||
|
require.Empty(ctx, absErrs)
|
||||||
|
require.Len(ctx, abs, 1)
|
||||||
|
require.True(ctx, zfs.FilesystemVersionEqualIdentity(abs[0].GetFilesystemVersion(), fsversion(ctx, rfs, "@2")))
|
||||||
|
|
||||||
|
// now try to delete @2 on the sender, this should work because don't have step holds on it
|
||||||
|
deleteSfs2Err := zfs.ZFSDestroy(ctx, sfs+"@2")
|
||||||
|
// defer check to caller
|
||||||
|
|
||||||
|
// and create a new snapshot on the sender
|
||||||
|
mustSnapshot(ctx, sfs+"@3")
|
||||||
|
|
||||||
|
// now we have: sender @1, @3
|
||||||
|
// recver @1, @2
|
||||||
|
|
||||||
|
// delete @1 on both sides to demonstrate that, if we didn't have bookmarks, we would be out of sync
|
||||||
|
deleteSfs1Err := zfs.ZFSDestroy(ctx, sfs+"@1")
|
||||||
|
// defer check to caller
|
||||||
|
err = zfs.ZFSDestroy(ctx, rfs+"@1")
|
||||||
|
require.NoError(ctx, err)
|
||||||
|
|
||||||
|
// attempt replication and return the filesystem report report
|
||||||
|
{
|
||||||
|
rep := replicationInvocation{
|
||||||
|
sjid: sjid,
|
||||||
|
rjid: rjid,
|
||||||
|
sfs: sfs,
|
||||||
|
rfsRoot: rfsRoot,
|
||||||
|
guarantee: *pdu.ReplicationConfigProtectionWithKind(guaranteeKind),
|
||||||
|
}
|
||||||
|
report := rep.Do(ctx)
|
||||||
|
ctx.Logf("expecting failure:\n%s", pretty.Sprint(report))
|
||||||
|
require.Len(ctx, report.Attempts, 1)
|
||||||
|
require.Len(ctx, report.Attempts[0].Filesystems, 1)
|
||||||
|
return &replicationStepCompletedLost_scenario{
|
||||||
|
rfs: rfs,
|
||||||
|
deleteSfs1Err: deleteSfs1Err,
|
||||||
|
deleteSfs2Err: deleteSfs2Err,
|
||||||
|
finalReport: report.Attempts[0].Filesystems[0],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
@ -22,6 +22,7 @@ func CreateReplicationCursor(ctx *platformtest.Context) {
|
|||||||
R zfs bookmark "${ROOTDS}/foo bar@2 with space" "${ROOTDS}/foo bar#2 with space"
|
R zfs bookmark "${ROOTDS}/foo bar@2 with space" "${ROOTDS}/foo bar#2 with space"
|
||||||
+ "foo bar@3 with space"
|
+ "foo bar@3 with space"
|
||||||
R zfs bookmark "${ROOTDS}/foo bar@3 with space" "${ROOTDS}/foo bar#3 with space"
|
R zfs bookmark "${ROOTDS}/foo bar@3 with space" "${ROOTDS}/foo bar#3 with space"
|
||||||
|
- "foo bar@3 with space"
|
||||||
`)
|
`)
|
||||||
|
|
||||||
jobid := endpoint.MustMakeJobID("zreplplatformtest")
|
jobid := endpoint.MustMakeJobID("zreplplatformtest")
|
||||||
@ -42,6 +43,7 @@ func CreateReplicationCursor(ctx *platformtest.Context) {
|
|||||||
|
|
||||||
snap := fsversion(ctx, fs, "@1 with space")
|
snap := fsversion(ctx, fs, "@1 with space")
|
||||||
book := fsversion(ctx, fs, "#1 with space")
|
book := fsversion(ctx, fs, "#1 with space")
|
||||||
|
book3 := fsversion(ctx, fs, "#3 with space")
|
||||||
|
|
||||||
// create first cursor
|
// create first cursor
|
||||||
cursorOfSnap, err := endpoint.CreateReplicationCursor(ctx, fs, snap, jobid)
|
cursorOfSnap, err := endpoint.CreateReplicationCursor(ctx, fs, snap, jobid)
|
||||||
@ -49,8 +51,11 @@ func CreateReplicationCursor(ctx *platformtest.Context) {
|
|||||||
// check CreateReplicationCursor is idempotent (for snapshot target)
|
// check CreateReplicationCursor is idempotent (for snapshot target)
|
||||||
cursorOfSnapIdemp, err := endpoint.CreateReplicationCursor(ctx, fs, snap, jobid)
|
cursorOfSnapIdemp, err := endpoint.CreateReplicationCursor(ctx, fs, snap, jobid)
|
||||||
checkCreateCursor(err, cursorOfSnap, snap)
|
checkCreateCursor(err, cursorOfSnap, snap)
|
||||||
|
// check CreateReplicationCursor is idempotent (for bookmark target of snapshot)
|
||||||
|
cursorOfBook, err := endpoint.CreateReplicationCursor(ctx, fs, book, jobid)
|
||||||
|
checkCreateCursor(err, cursorOfBook, snap)
|
||||||
// ... for target = non-cursor bookmark
|
// ... for target = non-cursor bookmark
|
||||||
_, err = endpoint.CreateReplicationCursor(ctx, fs, book, jobid)
|
_, err = endpoint.CreateReplicationCursor(ctx, fs, book3, jobid)
|
||||||
assert.Equal(ctx, zfs.ErrBookmarkCloningNotSupported, err)
|
assert.Equal(ctx, zfs.ErrBookmarkCloningNotSupported, err)
|
||||||
// ... for target = replication cursor bookmark to be created
|
// ... for target = replication cursor bookmark to be created
|
||||||
cursorOfCursor, err := endpoint.CreateReplicationCursor(ctx, fs, cursorOfSnapIdemp.GetFilesystemVersion(), jobid)
|
cursorOfCursor, err := endpoint.CreateReplicationCursor(ctx, fs, cursorOfSnapIdemp.GetFilesystemVersion(), jobid)
|
||||||
|
@ -46,7 +46,36 @@ func (x Tri) String() string {
|
|||||||
return proto.EnumName(Tri_name, int32(x))
|
return proto.EnumName(Tri_name, int32(x))
|
||||||
}
|
}
|
||||||
func (Tri) EnumDescriptor() ([]byte, []int) {
|
func (Tri) EnumDescriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{0}
|
return fileDescriptor_pdu_616c27178643eca4, []int{0}
|
||||||
|
}
|
||||||
|
|
||||||
|
type ReplicationGuaranteeKind int32
|
||||||
|
|
||||||
|
const (
|
||||||
|
ReplicationGuaranteeKind_GuaranteeInvalid ReplicationGuaranteeKind = 0
|
||||||
|
ReplicationGuaranteeKind_GuaranteeResumability ReplicationGuaranteeKind = 1
|
||||||
|
ReplicationGuaranteeKind_GuaranteeIncrementalReplication ReplicationGuaranteeKind = 2
|
||||||
|
ReplicationGuaranteeKind_GuaranteeNothing ReplicationGuaranteeKind = 3
|
||||||
|
)
|
||||||
|
|
||||||
|
var ReplicationGuaranteeKind_name = map[int32]string{
|
||||||
|
0: "GuaranteeInvalid",
|
||||||
|
1: "GuaranteeResumability",
|
||||||
|
2: "GuaranteeIncrementalReplication",
|
||||||
|
3: "GuaranteeNothing",
|
||||||
|
}
|
||||||
|
var ReplicationGuaranteeKind_value = map[string]int32{
|
||||||
|
"GuaranteeInvalid": 0,
|
||||||
|
"GuaranteeResumability": 1,
|
||||||
|
"GuaranteeIncrementalReplication": 2,
|
||||||
|
"GuaranteeNothing": 3,
|
||||||
|
}
|
||||||
|
|
||||||
|
func (x ReplicationGuaranteeKind) String() string {
|
||||||
|
return proto.EnumName(ReplicationGuaranteeKind_name, int32(x))
|
||||||
|
}
|
||||||
|
func (ReplicationGuaranteeKind) EnumDescriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_pdu_616c27178643eca4, []int{1}
|
||||||
}
|
}
|
||||||
|
|
||||||
type FilesystemVersion_VersionType int32
|
type FilesystemVersion_VersionType int32
|
||||||
@ -69,7 +98,7 @@ func (x FilesystemVersion_VersionType) String() string {
|
|||||||
return proto.EnumName(FilesystemVersion_VersionType_name, int32(x))
|
return proto.EnumName(FilesystemVersion_VersionType_name, int32(x))
|
||||||
}
|
}
|
||||||
func (FilesystemVersion_VersionType) EnumDescriptor() ([]byte, []int) {
|
func (FilesystemVersion_VersionType) EnumDescriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{5, 0}
|
return fileDescriptor_pdu_616c27178643eca4, []int{5, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
type ListFilesystemReq struct {
|
type ListFilesystemReq struct {
|
||||||
@ -82,7 +111,7 @@ func (m *ListFilesystemReq) Reset() { *m = ListFilesystemReq{} }
|
|||||||
func (m *ListFilesystemReq) String() string { return proto.CompactTextString(m) }
|
func (m *ListFilesystemReq) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ListFilesystemReq) ProtoMessage() {}
|
func (*ListFilesystemReq) ProtoMessage() {}
|
||||||
func (*ListFilesystemReq) Descriptor() ([]byte, []int) {
|
func (*ListFilesystemReq) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{0}
|
return fileDescriptor_pdu_616c27178643eca4, []int{0}
|
||||||
}
|
}
|
||||||
func (m *ListFilesystemReq) XXX_Unmarshal(b []byte) error {
|
func (m *ListFilesystemReq) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_ListFilesystemReq.Unmarshal(m, b)
|
return xxx_messageInfo_ListFilesystemReq.Unmarshal(m, b)
|
||||||
@ -113,7 +142,7 @@ func (m *ListFilesystemRes) Reset() { *m = ListFilesystemRes{} }
|
|||||||
func (m *ListFilesystemRes) String() string { return proto.CompactTextString(m) }
|
func (m *ListFilesystemRes) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ListFilesystemRes) ProtoMessage() {}
|
func (*ListFilesystemRes) ProtoMessage() {}
|
||||||
func (*ListFilesystemRes) Descriptor() ([]byte, []int) {
|
func (*ListFilesystemRes) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{1}
|
return fileDescriptor_pdu_616c27178643eca4, []int{1}
|
||||||
}
|
}
|
||||||
func (m *ListFilesystemRes) XXX_Unmarshal(b []byte) error {
|
func (m *ListFilesystemRes) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_ListFilesystemRes.Unmarshal(m, b)
|
return xxx_messageInfo_ListFilesystemRes.Unmarshal(m, b)
|
||||||
@ -154,7 +183,7 @@ func (m *Filesystem) Reset() { *m = Filesystem{} }
|
|||||||
func (m *Filesystem) String() string { return proto.CompactTextString(m) }
|
func (m *Filesystem) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Filesystem) ProtoMessage() {}
|
func (*Filesystem) ProtoMessage() {}
|
||||||
func (*Filesystem) Descriptor() ([]byte, []int) {
|
func (*Filesystem) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{2}
|
return fileDescriptor_pdu_616c27178643eca4, []int{2}
|
||||||
}
|
}
|
||||||
func (m *Filesystem) XXX_Unmarshal(b []byte) error {
|
func (m *Filesystem) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_Filesystem.Unmarshal(m, b)
|
return xxx_messageInfo_Filesystem.Unmarshal(m, b)
|
||||||
@ -213,7 +242,7 @@ func (m *ListFilesystemVersionsReq) Reset() { *m = ListFilesystemVersion
|
|||||||
func (m *ListFilesystemVersionsReq) String() string { return proto.CompactTextString(m) }
|
func (m *ListFilesystemVersionsReq) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ListFilesystemVersionsReq) ProtoMessage() {}
|
func (*ListFilesystemVersionsReq) ProtoMessage() {}
|
||||||
func (*ListFilesystemVersionsReq) Descriptor() ([]byte, []int) {
|
func (*ListFilesystemVersionsReq) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{3}
|
return fileDescriptor_pdu_616c27178643eca4, []int{3}
|
||||||
}
|
}
|
||||||
func (m *ListFilesystemVersionsReq) XXX_Unmarshal(b []byte) error {
|
func (m *ListFilesystemVersionsReq) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_ListFilesystemVersionsReq.Unmarshal(m, b)
|
return xxx_messageInfo_ListFilesystemVersionsReq.Unmarshal(m, b)
|
||||||
@ -251,7 +280,7 @@ func (m *ListFilesystemVersionsRes) Reset() { *m = ListFilesystemVersion
|
|||||||
func (m *ListFilesystemVersionsRes) String() string { return proto.CompactTextString(m) }
|
func (m *ListFilesystemVersionsRes) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ListFilesystemVersionsRes) ProtoMessage() {}
|
func (*ListFilesystemVersionsRes) ProtoMessage() {}
|
||||||
func (*ListFilesystemVersionsRes) Descriptor() ([]byte, []int) {
|
func (*ListFilesystemVersionsRes) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{4}
|
return fileDescriptor_pdu_616c27178643eca4, []int{4}
|
||||||
}
|
}
|
||||||
func (m *ListFilesystemVersionsRes) XXX_Unmarshal(b []byte) error {
|
func (m *ListFilesystemVersionsRes) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_ListFilesystemVersionsRes.Unmarshal(m, b)
|
return xxx_messageInfo_ListFilesystemVersionsRes.Unmarshal(m, b)
|
||||||
@ -293,7 +322,7 @@ func (m *FilesystemVersion) Reset() { *m = FilesystemVersion{} }
|
|||||||
func (m *FilesystemVersion) String() string { return proto.CompactTextString(m) }
|
func (m *FilesystemVersion) String() string { return proto.CompactTextString(m) }
|
||||||
func (*FilesystemVersion) ProtoMessage() {}
|
func (*FilesystemVersion) ProtoMessage() {}
|
||||||
func (*FilesystemVersion) Descriptor() ([]byte, []int) {
|
func (*FilesystemVersion) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{5}
|
return fileDescriptor_pdu_616c27178643eca4, []int{5}
|
||||||
}
|
}
|
||||||
func (m *FilesystemVersion) XXX_Unmarshal(b []byte) error {
|
func (m *FilesystemVersion) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_FilesystemVersion.Unmarshal(m, b)
|
return xxx_messageInfo_FilesystemVersion.Unmarshal(m, b)
|
||||||
@ -359,19 +388,20 @@ type SendReq struct {
|
|||||||
// SHOULD clear the resume token on their side and use From and To instead If
|
// SHOULD clear the resume token on their side and use From and To instead If
|
||||||
// ResumeToken is not empty, the GUIDs of From and To MUST correspond to those
|
// ResumeToken is not empty, the GUIDs of From and To MUST correspond to those
|
||||||
// encoded in the ResumeToken. Otherwise, the Sender MUST return an error.
|
// encoded in the ResumeToken. Otherwise, the Sender MUST return an error.
|
||||||
ResumeToken string `protobuf:"bytes,4,opt,name=ResumeToken,proto3" json:"ResumeToken,omitempty"`
|
ResumeToken string `protobuf:"bytes,4,opt,name=ResumeToken,proto3" json:"ResumeToken,omitempty"`
|
||||||
Encrypted Tri `protobuf:"varint,5,opt,name=Encrypted,proto3,enum=Tri" json:"Encrypted,omitempty"`
|
Encrypted Tri `protobuf:"varint,5,opt,name=Encrypted,proto3,enum=Tri" json:"Encrypted,omitempty"`
|
||||||
DryRun bool `protobuf:"varint,6,opt,name=DryRun,proto3" json:"DryRun,omitempty"`
|
DryRun bool `protobuf:"varint,6,opt,name=DryRun,proto3" json:"DryRun,omitempty"`
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
ReplicationConfig *ReplicationConfig `protobuf:"bytes,7,opt,name=ReplicationConfig,proto3" json:"ReplicationConfig,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
XXX_sizecache int32 `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *SendReq) Reset() { *m = SendReq{} }
|
func (m *SendReq) Reset() { *m = SendReq{} }
|
||||||
func (m *SendReq) String() string { return proto.CompactTextString(m) }
|
func (m *SendReq) String() string { return proto.CompactTextString(m) }
|
||||||
func (*SendReq) ProtoMessage() {}
|
func (*SendReq) ProtoMessage() {}
|
||||||
func (*SendReq) Descriptor() ([]byte, []int) {
|
func (*SendReq) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{6}
|
return fileDescriptor_pdu_616c27178643eca4, []int{6}
|
||||||
}
|
}
|
||||||
func (m *SendReq) XXX_Unmarshal(b []byte) error {
|
func (m *SendReq) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_SendReq.Unmarshal(m, b)
|
return xxx_messageInfo_SendReq.Unmarshal(m, b)
|
||||||
@ -433,6 +463,97 @@ func (m *SendReq) GetDryRun() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *SendReq) GetReplicationConfig() *ReplicationConfig {
|
||||||
|
if m != nil {
|
||||||
|
return m.ReplicationConfig
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ReplicationConfig struct {
|
||||||
|
Protection *ReplicationConfigProtection `protobuf:"bytes,1,opt,name=protection,proto3" json:"protection,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ReplicationConfig) Reset() { *m = ReplicationConfig{} }
|
||||||
|
func (m *ReplicationConfig) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*ReplicationConfig) ProtoMessage() {}
|
||||||
|
func (*ReplicationConfig) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_pdu_616c27178643eca4, []int{7}
|
||||||
|
}
|
||||||
|
func (m *ReplicationConfig) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_ReplicationConfig.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *ReplicationConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_ReplicationConfig.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *ReplicationConfig) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_ReplicationConfig.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *ReplicationConfig) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_ReplicationConfig.Size(m)
|
||||||
|
}
|
||||||
|
func (m *ReplicationConfig) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_ReplicationConfig.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_ReplicationConfig proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *ReplicationConfig) GetProtection() *ReplicationConfigProtection {
|
||||||
|
if m != nil {
|
||||||
|
return m.Protection
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type ReplicationConfigProtection struct {
|
||||||
|
Initial ReplicationGuaranteeKind `protobuf:"varint,1,opt,name=Initial,proto3,enum=ReplicationGuaranteeKind" json:"Initial,omitempty"`
|
||||||
|
Incremental ReplicationGuaranteeKind `protobuf:"varint,2,opt,name=Incremental,proto3,enum=ReplicationGuaranteeKind" json:"Incremental,omitempty"`
|
||||||
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ReplicationConfigProtection) Reset() { *m = ReplicationConfigProtection{} }
|
||||||
|
func (m *ReplicationConfigProtection) String() string { return proto.CompactTextString(m) }
|
||||||
|
func (*ReplicationConfigProtection) ProtoMessage() {}
|
||||||
|
func (*ReplicationConfigProtection) Descriptor() ([]byte, []int) {
|
||||||
|
return fileDescriptor_pdu_616c27178643eca4, []int{8}
|
||||||
|
}
|
||||||
|
func (m *ReplicationConfigProtection) XXX_Unmarshal(b []byte) error {
|
||||||
|
return xxx_messageInfo_ReplicationConfigProtection.Unmarshal(m, b)
|
||||||
|
}
|
||||||
|
func (m *ReplicationConfigProtection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
|
return xxx_messageInfo_ReplicationConfigProtection.Marshal(b, m, deterministic)
|
||||||
|
}
|
||||||
|
func (dst *ReplicationConfigProtection) XXX_Merge(src proto.Message) {
|
||||||
|
xxx_messageInfo_ReplicationConfigProtection.Merge(dst, src)
|
||||||
|
}
|
||||||
|
func (m *ReplicationConfigProtection) XXX_Size() int {
|
||||||
|
return xxx_messageInfo_ReplicationConfigProtection.Size(m)
|
||||||
|
}
|
||||||
|
func (m *ReplicationConfigProtection) XXX_DiscardUnknown() {
|
||||||
|
xxx_messageInfo_ReplicationConfigProtection.DiscardUnknown(m)
|
||||||
|
}
|
||||||
|
|
||||||
|
var xxx_messageInfo_ReplicationConfigProtection proto.InternalMessageInfo
|
||||||
|
|
||||||
|
func (m *ReplicationConfigProtection) GetInitial() ReplicationGuaranteeKind {
|
||||||
|
if m != nil {
|
||||||
|
return m.Initial
|
||||||
|
}
|
||||||
|
return ReplicationGuaranteeKind_GuaranteeInvalid
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *ReplicationConfigProtection) GetIncremental() ReplicationGuaranteeKind {
|
||||||
|
if m != nil {
|
||||||
|
return m.Incremental
|
||||||
|
}
|
||||||
|
return ReplicationGuaranteeKind_GuaranteeInvalid
|
||||||
|
}
|
||||||
|
|
||||||
type Property struct {
|
type Property struct {
|
||||||
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"`
|
Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"`
|
||||||
Value string `protobuf:"bytes,2,opt,name=Value,proto3" json:"Value,omitempty"`
|
Value string `protobuf:"bytes,2,opt,name=Value,proto3" json:"Value,omitempty"`
|
||||||
@ -445,7 +566,7 @@ func (m *Property) Reset() { *m = Property{} }
|
|||||||
func (m *Property) String() string { return proto.CompactTextString(m) }
|
func (m *Property) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Property) ProtoMessage() {}
|
func (*Property) ProtoMessage() {}
|
||||||
func (*Property) Descriptor() ([]byte, []int) {
|
func (*Property) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{7}
|
return fileDescriptor_pdu_616c27178643eca4, []int{9}
|
||||||
}
|
}
|
||||||
func (m *Property) XXX_Unmarshal(b []byte) error {
|
func (m *Property) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_Property.Unmarshal(m, b)
|
return xxx_messageInfo_Property.Unmarshal(m, b)
|
||||||
@ -496,7 +617,7 @@ func (m *SendRes) Reset() { *m = SendRes{} }
|
|||||||
func (m *SendRes) String() string { return proto.CompactTextString(m) }
|
func (m *SendRes) String() string { return proto.CompactTextString(m) }
|
||||||
func (*SendRes) ProtoMessage() {}
|
func (*SendRes) ProtoMessage() {}
|
||||||
func (*SendRes) Descriptor() ([]byte, []int) {
|
func (*SendRes) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{8}
|
return fileDescriptor_pdu_616c27178643eca4, []int{10}
|
||||||
}
|
}
|
||||||
func (m *SendRes) XXX_Unmarshal(b []byte) error {
|
func (m *SendRes) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_SendRes.Unmarshal(m, b)
|
return xxx_messageInfo_SendRes.Unmarshal(m, b)
|
||||||
@ -548,7 +669,7 @@ func (m *SendCompletedReq) Reset() { *m = SendCompletedReq{} }
|
|||||||
func (m *SendCompletedReq) String() string { return proto.CompactTextString(m) }
|
func (m *SendCompletedReq) String() string { return proto.CompactTextString(m) }
|
||||||
func (*SendCompletedReq) ProtoMessage() {}
|
func (*SendCompletedReq) ProtoMessage() {}
|
||||||
func (*SendCompletedReq) Descriptor() ([]byte, []int) {
|
func (*SendCompletedReq) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{9}
|
return fileDescriptor_pdu_616c27178643eca4, []int{11}
|
||||||
}
|
}
|
||||||
func (m *SendCompletedReq) XXX_Unmarshal(b []byte) error {
|
func (m *SendCompletedReq) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_SendCompletedReq.Unmarshal(m, b)
|
return xxx_messageInfo_SendCompletedReq.Unmarshal(m, b)
|
||||||
@ -585,7 +706,7 @@ func (m *SendCompletedRes) Reset() { *m = SendCompletedRes{} }
|
|||||||
func (m *SendCompletedRes) String() string { return proto.CompactTextString(m) }
|
func (m *SendCompletedRes) String() string { return proto.CompactTextString(m) }
|
||||||
func (*SendCompletedRes) ProtoMessage() {}
|
func (*SendCompletedRes) ProtoMessage() {}
|
||||||
func (*SendCompletedRes) Descriptor() ([]byte, []int) {
|
func (*SendCompletedRes) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{10}
|
return fileDescriptor_pdu_616c27178643eca4, []int{12}
|
||||||
}
|
}
|
||||||
func (m *SendCompletedRes) XXX_Unmarshal(b []byte) error {
|
func (m *SendCompletedRes) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_SendCompletedRes.Unmarshal(m, b)
|
return xxx_messageInfo_SendCompletedRes.Unmarshal(m, b)
|
||||||
@ -610,17 +731,18 @@ type ReceiveReq struct {
|
|||||||
To *FilesystemVersion `protobuf:"bytes,2,opt,name=To,proto3" json:"To,omitempty"`
|
To *FilesystemVersion `protobuf:"bytes,2,opt,name=To,proto3" json:"To,omitempty"`
|
||||||
// If true, the receiver should clear the resume token before performing the
|
// If true, the receiver should clear the resume token before performing the
|
||||||
// zfs recv of the stream in the request
|
// zfs recv of the stream in the request
|
||||||
ClearResumeToken bool `protobuf:"varint,3,opt,name=ClearResumeToken,proto3" json:"ClearResumeToken,omitempty"`
|
ClearResumeToken bool `protobuf:"varint,3,opt,name=ClearResumeToken,proto3" json:"ClearResumeToken,omitempty"`
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
ReplicationConfig *ReplicationConfig `protobuf:"bytes,4,opt,name=ReplicationConfig,proto3" json:"ReplicationConfig,omitempty"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
XXX_sizecache int32 `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
|
XXX_sizecache int32 `json:"-"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ReceiveReq) Reset() { *m = ReceiveReq{} }
|
func (m *ReceiveReq) Reset() { *m = ReceiveReq{} }
|
||||||
func (m *ReceiveReq) String() string { return proto.CompactTextString(m) }
|
func (m *ReceiveReq) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ReceiveReq) ProtoMessage() {}
|
func (*ReceiveReq) ProtoMessage() {}
|
||||||
func (*ReceiveReq) Descriptor() ([]byte, []int) {
|
func (*ReceiveReq) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{11}
|
return fileDescriptor_pdu_616c27178643eca4, []int{13}
|
||||||
}
|
}
|
||||||
func (m *ReceiveReq) XXX_Unmarshal(b []byte) error {
|
func (m *ReceiveReq) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_ReceiveReq.Unmarshal(m, b)
|
return xxx_messageInfo_ReceiveReq.Unmarshal(m, b)
|
||||||
@ -661,6 +783,13 @@ func (m *ReceiveReq) GetClearResumeToken() bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (m *ReceiveReq) GetReplicationConfig() *ReplicationConfig {
|
||||||
|
if m != nil {
|
||||||
|
return m.ReplicationConfig
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
type ReceiveRes struct {
|
type ReceiveRes struct {
|
||||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||||
XXX_unrecognized []byte `json:"-"`
|
XXX_unrecognized []byte `json:"-"`
|
||||||
@ -671,7 +800,7 @@ func (m *ReceiveRes) Reset() { *m = ReceiveRes{} }
|
|||||||
func (m *ReceiveRes) String() string { return proto.CompactTextString(m) }
|
func (m *ReceiveRes) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ReceiveRes) ProtoMessage() {}
|
func (*ReceiveRes) ProtoMessage() {}
|
||||||
func (*ReceiveRes) Descriptor() ([]byte, []int) {
|
func (*ReceiveRes) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{12}
|
return fileDescriptor_pdu_616c27178643eca4, []int{14}
|
||||||
}
|
}
|
||||||
func (m *ReceiveRes) XXX_Unmarshal(b []byte) error {
|
func (m *ReceiveRes) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_ReceiveRes.Unmarshal(m, b)
|
return xxx_messageInfo_ReceiveRes.Unmarshal(m, b)
|
||||||
@ -704,7 +833,7 @@ func (m *DestroySnapshotsReq) Reset() { *m = DestroySnapshotsReq{} }
|
|||||||
func (m *DestroySnapshotsReq) String() string { return proto.CompactTextString(m) }
|
func (m *DestroySnapshotsReq) String() string { return proto.CompactTextString(m) }
|
||||||
func (*DestroySnapshotsReq) ProtoMessage() {}
|
func (*DestroySnapshotsReq) ProtoMessage() {}
|
||||||
func (*DestroySnapshotsReq) Descriptor() ([]byte, []int) {
|
func (*DestroySnapshotsReq) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{13}
|
return fileDescriptor_pdu_616c27178643eca4, []int{15}
|
||||||
}
|
}
|
||||||
func (m *DestroySnapshotsReq) XXX_Unmarshal(b []byte) error {
|
func (m *DestroySnapshotsReq) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_DestroySnapshotsReq.Unmarshal(m, b)
|
return xxx_messageInfo_DestroySnapshotsReq.Unmarshal(m, b)
|
||||||
@ -750,7 +879,7 @@ func (m *DestroySnapshotRes) Reset() { *m = DestroySnapshotRes{} }
|
|||||||
func (m *DestroySnapshotRes) String() string { return proto.CompactTextString(m) }
|
func (m *DestroySnapshotRes) String() string { return proto.CompactTextString(m) }
|
||||||
func (*DestroySnapshotRes) ProtoMessage() {}
|
func (*DestroySnapshotRes) ProtoMessage() {}
|
||||||
func (*DestroySnapshotRes) Descriptor() ([]byte, []int) {
|
func (*DestroySnapshotRes) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{14}
|
return fileDescriptor_pdu_616c27178643eca4, []int{16}
|
||||||
}
|
}
|
||||||
func (m *DestroySnapshotRes) XXX_Unmarshal(b []byte) error {
|
func (m *DestroySnapshotRes) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_DestroySnapshotRes.Unmarshal(m, b)
|
return xxx_messageInfo_DestroySnapshotRes.Unmarshal(m, b)
|
||||||
@ -795,7 +924,7 @@ func (m *DestroySnapshotsRes) Reset() { *m = DestroySnapshotsRes{} }
|
|||||||
func (m *DestroySnapshotsRes) String() string { return proto.CompactTextString(m) }
|
func (m *DestroySnapshotsRes) String() string { return proto.CompactTextString(m) }
|
||||||
func (*DestroySnapshotsRes) ProtoMessage() {}
|
func (*DestroySnapshotsRes) ProtoMessage() {}
|
||||||
func (*DestroySnapshotsRes) Descriptor() ([]byte, []int) {
|
func (*DestroySnapshotsRes) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{15}
|
return fileDescriptor_pdu_616c27178643eca4, []int{17}
|
||||||
}
|
}
|
||||||
func (m *DestroySnapshotsRes) XXX_Unmarshal(b []byte) error {
|
func (m *DestroySnapshotsRes) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_DestroySnapshotsRes.Unmarshal(m, b)
|
return xxx_messageInfo_DestroySnapshotsRes.Unmarshal(m, b)
|
||||||
@ -833,7 +962,7 @@ func (m *ReplicationCursorReq) Reset() { *m = ReplicationCursorReq{} }
|
|||||||
func (m *ReplicationCursorReq) String() string { return proto.CompactTextString(m) }
|
func (m *ReplicationCursorReq) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ReplicationCursorReq) ProtoMessage() {}
|
func (*ReplicationCursorReq) ProtoMessage() {}
|
||||||
func (*ReplicationCursorReq) Descriptor() ([]byte, []int) {
|
func (*ReplicationCursorReq) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{16}
|
return fileDescriptor_pdu_616c27178643eca4, []int{18}
|
||||||
}
|
}
|
||||||
func (m *ReplicationCursorReq) XXX_Unmarshal(b []byte) error {
|
func (m *ReplicationCursorReq) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_ReplicationCursorReq.Unmarshal(m, b)
|
return xxx_messageInfo_ReplicationCursorReq.Unmarshal(m, b)
|
||||||
@ -874,7 +1003,7 @@ func (m *ReplicationCursorRes) Reset() { *m = ReplicationCursorRes{} }
|
|||||||
func (m *ReplicationCursorRes) String() string { return proto.CompactTextString(m) }
|
func (m *ReplicationCursorRes) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ReplicationCursorRes) ProtoMessage() {}
|
func (*ReplicationCursorRes) ProtoMessage() {}
|
||||||
func (*ReplicationCursorRes) Descriptor() ([]byte, []int) {
|
func (*ReplicationCursorRes) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{17}
|
return fileDescriptor_pdu_616c27178643eca4, []int{19}
|
||||||
}
|
}
|
||||||
func (m *ReplicationCursorRes) XXX_Unmarshal(b []byte) error {
|
func (m *ReplicationCursorRes) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_ReplicationCursorRes.Unmarshal(m, b)
|
return xxx_messageInfo_ReplicationCursorRes.Unmarshal(m, b)
|
||||||
@ -1010,7 +1139,7 @@ func (m *PingReq) Reset() { *m = PingReq{} }
|
|||||||
func (m *PingReq) String() string { return proto.CompactTextString(m) }
|
func (m *PingReq) String() string { return proto.CompactTextString(m) }
|
||||||
func (*PingReq) ProtoMessage() {}
|
func (*PingReq) ProtoMessage() {}
|
||||||
func (*PingReq) Descriptor() ([]byte, []int) {
|
func (*PingReq) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{18}
|
return fileDescriptor_pdu_616c27178643eca4, []int{20}
|
||||||
}
|
}
|
||||||
func (m *PingReq) XXX_Unmarshal(b []byte) error {
|
func (m *PingReq) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_PingReq.Unmarshal(m, b)
|
return xxx_messageInfo_PingReq.Unmarshal(m, b)
|
||||||
@ -1049,7 +1178,7 @@ func (m *PingRes) Reset() { *m = PingRes{} }
|
|||||||
func (m *PingRes) String() string { return proto.CompactTextString(m) }
|
func (m *PingRes) String() string { return proto.CompactTextString(m) }
|
||||||
func (*PingRes) ProtoMessage() {}
|
func (*PingRes) ProtoMessage() {}
|
||||||
func (*PingRes) Descriptor() ([]byte, []int) {
|
func (*PingRes) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_pdu_483c6918b7b3d747, []int{19}
|
return fileDescriptor_pdu_616c27178643eca4, []int{21}
|
||||||
}
|
}
|
||||||
func (m *PingRes) XXX_Unmarshal(b []byte) error {
|
func (m *PingRes) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_PingRes.Unmarshal(m, b)
|
return xxx_messageInfo_PingRes.Unmarshal(m, b)
|
||||||
@ -1084,6 +1213,8 @@ func init() {
|
|||||||
proto.RegisterType((*ListFilesystemVersionsRes)(nil), "ListFilesystemVersionsRes")
|
proto.RegisterType((*ListFilesystemVersionsRes)(nil), "ListFilesystemVersionsRes")
|
||||||
proto.RegisterType((*FilesystemVersion)(nil), "FilesystemVersion")
|
proto.RegisterType((*FilesystemVersion)(nil), "FilesystemVersion")
|
||||||
proto.RegisterType((*SendReq)(nil), "SendReq")
|
proto.RegisterType((*SendReq)(nil), "SendReq")
|
||||||
|
proto.RegisterType((*ReplicationConfig)(nil), "ReplicationConfig")
|
||||||
|
proto.RegisterType((*ReplicationConfigProtection)(nil), "ReplicationConfigProtection")
|
||||||
proto.RegisterType((*Property)(nil), "Property")
|
proto.RegisterType((*Property)(nil), "Property")
|
||||||
proto.RegisterType((*SendRes)(nil), "SendRes")
|
proto.RegisterType((*SendRes)(nil), "SendRes")
|
||||||
proto.RegisterType((*SendCompletedReq)(nil), "SendCompletedReq")
|
proto.RegisterType((*SendCompletedReq)(nil), "SendCompletedReq")
|
||||||
@ -1098,6 +1229,7 @@ func init() {
|
|||||||
proto.RegisterType((*PingReq)(nil), "PingReq")
|
proto.RegisterType((*PingReq)(nil), "PingReq")
|
||||||
proto.RegisterType((*PingRes)(nil), "PingRes")
|
proto.RegisterType((*PingRes)(nil), "PingRes")
|
||||||
proto.RegisterEnum("Tri", Tri_name, Tri_value)
|
proto.RegisterEnum("Tri", Tri_name, Tri_value)
|
||||||
|
proto.RegisterEnum("ReplicationGuaranteeKind", ReplicationGuaranteeKind_name, ReplicationGuaranteeKind_value)
|
||||||
proto.RegisterEnum("FilesystemVersion_VersionType", FilesystemVersion_VersionType_name, FilesystemVersion_VersionType_value)
|
proto.RegisterEnum("FilesystemVersion_VersionType", FilesystemVersion_VersionType_name, FilesystemVersion_VersionType_value)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1338,61 +1470,71 @@ var _Replication_serviceDesc = grpc.ServiceDesc{
|
|||||||
Metadata: "pdu.proto",
|
Metadata: "pdu.proto",
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() { proto.RegisterFile("pdu.proto", fileDescriptor_pdu_483c6918b7b3d747) }
|
func init() { proto.RegisterFile("pdu.proto", fileDescriptor_pdu_616c27178643eca4) }
|
||||||
|
|
||||||
var fileDescriptor_pdu_483c6918b7b3d747 = []byte{
|
var fileDescriptor_pdu_616c27178643eca4 = []byte{
|
||||||
// 833 bytes of a gzipped FileDescriptorProto
|
// 995 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x56, 0x5f, 0x6f, 0xe3, 0x44,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xcf, 0x6e, 0xdb, 0xc6,
|
||||||
0x10, 0xaf, 0x13, 0xa7, 0x75, 0x26, 0x3d, 0x2e, 0x9d, 0x96, 0x93, 0xb1, 0xe0, 0x54, 0x2d, 0x08,
|
0x13, 0x36, 0x25, 0xda, 0xa2, 0x46, 0xce, 0x2f, 0xf4, 0xd8, 0x09, 0x68, 0xfd, 0xd2, 0xd4, 0xd8,
|
||||||
0xe5, 0x2a, 0x61, 0xa1, 0xf2, 0x47, 0x42, 0x48, 0x27, 0xd1, 0xb4, 0xbd, 0x3b, 0x01, 0x47, 0xb4,
|
0x14, 0x85, 0x63, 0xa0, 0x44, 0xe1, 0xb4, 0x05, 0x8a, 0x14, 0x41, 0xeb, 0xbf, 0x31, 0xd2, 0xba,
|
||||||
0x35, 0x27, 0x74, 0x6f, 0x26, 0x19, 0xb5, 0x56, 0x1d, 0xaf, 0xbb, 0xe3, 0xa0, 0x0b, 0xe2, 0x89,
|
0xea, 0x5a, 0x0d, 0x8a, 0xdc, 0x18, 0x69, 0x2a, 0x2f, 0x4c, 0x71, 0xe9, 0x5d, 0x2a, 0x88, 0x7a,
|
||||||
0x47, 0xbe, 0x1e, 0x7c, 0x10, 0x3e, 0x02, 0xf2, 0xc6, 0x4e, 0x9c, 0xd8, 0x41, 0x79, 0xca, 0xce,
|
0xec, 0xa1, 0x87, 0x5e, 0x7a, 0xea, 0xeb, 0xf4, 0x29, 0xfa, 0x20, 0x7d, 0x84, 0x82, 0x6b, 0x92,
|
||||||
0x6f, 0x66, 0x77, 0x67, 0x7f, 0xf3, 0x9b, 0x71, 0xa0, 0x9b, 0x4e, 0x66, 0x7e, 0xaa, 0x55, 0xa6,
|
0xa2, 0x44, 0xca, 0x70, 0x4f, 0xda, 0xf9, 0xe6, 0xdb, 0xd9, 0xd9, 0xd1, 0x37, 0xb3, 0x84, 0x76,
|
||||||
0xc4, 0x31, 0x1c, 0xfd, 0x10, 0x71, 0x76, 0x1d, 0xc5, 0xc4, 0x73, 0xce, 0x68, 0x2a, 0xe9, 0x41,
|
0x3c, 0x9c, 0xf8, 0xb1, 0x92, 0x89, 0x64, 0x9b, 0xb0, 0xf1, 0xad, 0xd0, 0xc9, 0x89, 0x08, 0x49,
|
||||||
0x5c, 0xd4, 0x41, 0xc6, 0xcf, 0xa0, 0xb7, 0x02, 0xd8, 0xb5, 0x4e, 0xdb, 0x83, 0xde, 0x79, 0xcf,
|
0x4f, 0x75, 0x42, 0x63, 0x4e, 0xd7, 0xec, 0xa0, 0x0a, 0x6a, 0xfc, 0x04, 0x3a, 0x33, 0x40, 0x7b,
|
||||||
0xaf, 0x04, 0x55, 0xfd, 0xe2, 0x2f, 0x0b, 0x60, 0x65, 0x23, 0x82, 0x3d, 0x0a, 0xb3, 0x3b, 0xd7,
|
0xd6, 0x4e, 0x73, 0xb7, 0xb3, 0xdf, 0xf1, 0x4b, 0xa4, 0xb2, 0x9f, 0xfd, 0x6e, 0x01, 0xcc, 0x6c,
|
||||||
0x3a, 0xb5, 0x06, 0x5d, 0x69, 0xd6, 0x78, 0x0a, 0x3d, 0x49, 0x3c, 0x9b, 0x52, 0xa0, 0xee, 0x29,
|
0x44, 0xb0, 0x7b, 0x41, 0x72, 0xe9, 0x59, 0x3b, 0xd6, 0x6e, 0x9b, 0x9b, 0x35, 0xee, 0x40, 0x87,
|
||||||
0x71, 0x5b, 0xc6, 0x55, 0x85, 0xf0, 0x13, 0x78, 0xf4, 0x8a, 0x47, 0x71, 0x38, 0xa6, 0x3b, 0x15,
|
0x93, 0x9e, 0x8c, 0xa9, 0x2f, 0xaf, 0x28, 0xf2, 0x1a, 0xc6, 0x55, 0x86, 0xf0, 0x23, 0xb8, 0x77,
|
||||||
0x4f, 0x48, 0xbb, 0xed, 0x53, 0x6b, 0xe0, 0xc8, 0x75, 0x30, 0x3f, 0xe7, 0x15, 0x5f, 0x25, 0x63,
|
0xa6, 0x7b, 0x61, 0x30, 0xa0, 0x4b, 0x19, 0x0e, 0x49, 0x79, 0xcd, 0x1d, 0x6b, 0xd7, 0xe1, 0xf3,
|
||||||
0x3d, 0x4f, 0x33, 0x9a, 0xb8, 0xb6, 0x89, 0xa9, 0x42, 0xe2, 0x5b, 0xf8, 0x60, 0xfd, 0x41, 0x6f,
|
0x60, 0x1a, 0xe7, 0x4c, 0x1f, 0x47, 0x03, 0x35, 0x8d, 0x13, 0x1a, 0x7a, 0xb6, 0xe1, 0x94, 0x21,
|
||||||
0x48, 0x73, 0xa4, 0x12, 0x96, 0xf4, 0x80, 0x4f, 0xab, 0x89, 0x16, 0x09, 0x56, 0x10, 0xf1, 0xfd,
|
0xf6, 0x1c, 0xb6, 0xe7, 0x2f, 0xf4, 0x9a, 0x94, 0x16, 0x32, 0xd2, 0x9c, 0xae, 0xf1, 0x71, 0x39,
|
||||||
0xf6, 0xcd, 0x8c, 0x3e, 0x38, 0xa5, 0x59, 0x50, 0x82, 0x7e, 0x2d, 0x52, 0x2e, 0x63, 0xc4, 0x3f,
|
0xd1, 0x2c, 0xc1, 0x12, 0xc2, 0x5e, 0x2d, 0xdf, 0xac, 0xd1, 0x07, 0x27, 0x37, 0xb3, 0x92, 0xa0,
|
||||||
0x16, 0x1c, 0xd5, 0xfc, 0x78, 0x0e, 0x76, 0x30, 0x4f, 0xc9, 0x5c, 0xfe, 0xde, 0xf9, 0xd3, 0xfa,
|
0x5f, 0x61, 0xf2, 0x82, 0xc3, 0xfe, 0xb6, 0x60, 0xa3, 0xe2, 0xc7, 0x7d, 0xb0, 0xfb, 0xd3, 0x98,
|
||||||
0x09, 0x7e, 0xf1, 0x9b, 0x47, 0x49, 0x13, 0x9b, 0x33, 0xfa, 0x3a, 0x9c, 0x52, 0x41, 0x9b, 0x59,
|
0xcc, 0xe1, 0xff, 0xdb, 0x7f, 0x5c, 0x8d, 0xe0, 0x67, 0xbf, 0x29, 0x8b, 0x1b, 0x6e, 0x5a, 0xd1,
|
||||||
0xe7, 0xd8, 0x8b, 0x59, 0x34, 0x31, 0x34, 0xd9, 0xd2, 0xac, 0xf1, 0x43, 0xe8, 0x0e, 0x35, 0x85,
|
0xf3, 0x60, 0x4c, 0x59, 0xd9, 0xcc, 0x3a, 0xc5, 0x4e, 0x27, 0x62, 0x68, 0xca, 0x64, 0x73, 0xb3,
|
||||||
0x19, 0x05, 0xbf, 0xbc, 0x30, 0xdc, 0xd8, 0x72, 0x05, 0xa0, 0x07, 0x8e, 0x31, 0x22, 0x95, 0xb8,
|
0xc6, 0x47, 0xd0, 0x3e, 0x54, 0x14, 0x24, 0xd4, 0xff, 0xe9, 0xd4, 0xd4, 0xc6, 0xe6, 0x33, 0x00,
|
||||||
0x1d, 0x73, 0xd2, 0xd2, 0x16, 0xcf, 0xa0, 0x57, 0xb9, 0x16, 0x0f, 0xc1, 0xb9, 0x49, 0xc2, 0x94,
|
0xbb, 0xe0, 0x18, 0x43, 0xc8, 0xc8, 0x5b, 0x35, 0x91, 0x0a, 0x9b, 0x3d, 0x85, 0x4e, 0xe9, 0x58,
|
||||||
0xef, 0x54, 0xd6, 0xdf, 0xcb, 0xad, 0x0b, 0xa5, 0xee, 0xa7, 0xa1, 0xbe, 0xef, 0x5b, 0xe2, 0x6f,
|
0x5c, 0x07, 0xe7, 0x22, 0x0a, 0x62, 0x7d, 0x29, 0x13, 0x77, 0x25, 0xb5, 0x0e, 0xa4, 0xbc, 0x1a,
|
||||||
0x0b, 0x0e, 0x6e, 0x28, 0x99, 0xec, 0xc0, 0x27, 0x7e, 0x0a, 0xf6, 0xb5, 0x56, 0x53, 0x93, 0x78,
|
0x07, 0xea, 0xca, 0xb5, 0xd8, 0x9f, 0x0d, 0x68, 0x5d, 0x50, 0x34, 0xbc, 0x43, 0x3d, 0xf1, 0x63,
|
||||||
0x33, 0x5d, 0xc6, 0x8f, 0x02, 0x5a, 0x81, 0x32, 0x4f, 0x69, 0x8e, 0x6a, 0x05, 0x6a, 0x53, 0x42,
|
0xb0, 0x4f, 0x94, 0x1c, 0x9b, 0xc4, 0xeb, 0xcb, 0x65, 0xfc, 0xc8, 0xa0, 0xd1, 0x97, 0xe6, 0x2a,
|
||||||
0x76, 0x5d, 0x42, 0x02, 0xba, 0x2b, 0x69, 0x74, 0x0c, 0xbf, 0xb6, 0x1f, 0xe8, 0x48, 0xae, 0x60,
|
0xf5, 0xac, 0x46, 0x5f, 0x2e, 0x4a, 0xc8, 0xae, 0x4a, 0x88, 0x41, 0x7b, 0x26, 0x8d, 0x55, 0x53,
|
||||||
0x7c, 0x02, 0xfb, 0x97, 0x7a, 0x2e, 0x67, 0x89, 0xbb, 0x6f, 0xb4, 0x53, 0x58, 0xe2, 0x4b, 0x70,
|
0x5f, 0xdb, 0xef, 0x2b, 0xc1, 0x67, 0x30, 0x3e, 0x84, 0xb5, 0x23, 0x35, 0xe5, 0x93, 0xc8, 0x5b,
|
||||||
0x46, 0x5a, 0xa5, 0xa4, 0xb3, 0xf9, 0x92, 0x6e, 0xab, 0x42, 0xf7, 0x09, 0x74, 0xde, 0x84, 0xf1,
|
0x33, 0xda, 0xc9, 0x2c, 0xfc, 0x1a, 0x36, 0x38, 0xc5, 0xa1, 0x18, 0x98, 0x7a, 0x1c, 0xca, 0xe8,
|
||||||
0xac, 0xac, 0xc1, 0xc2, 0x10, 0x7f, 0x2e, 0xb9, 0x60, 0x1c, 0xc0, 0xe3, 0x9f, 0x99, 0x26, 0x9b,
|
0x67, 0x31, 0xf2, 0x5a, 0x59, 0x42, 0x15, 0x0f, 0xaf, 0x92, 0xd9, 0x0f, 0x35, 0x11, 0xf0, 0x2b,
|
||||||
0x32, 0x77, 0xe4, 0x26, 0x8c, 0x02, 0x0e, 0xaf, 0xde, 0xa5, 0x34, 0xce, 0x68, 0x72, 0x13, 0xfd,
|
0x80, 0xb4, 0xf9, 0x68, 0x60, 0xaa, 0x6e, 0x99, 0x78, 0x8f, 0xaa, 0xf1, 0x7a, 0x05, 0x87, 0x97,
|
||||||
0x4e, 0xe6, 0xdd, 0x6d, 0xb9, 0x86, 0xe1, 0x33, 0x80, 0x22, 0x9f, 0x88, 0xd8, 0xb5, 0x8d, 0xdc,
|
0xf8, 0xec, 0x0f, 0x0b, 0xfe, 0x7f, 0x0b, 0x17, 0x9f, 0x41, 0xeb, 0x2c, 0x12, 0x89, 0x08, 0xc2,
|
||||||
0xba, 0x7e, 0x99, 0xa2, 0xac, 0x38, 0xc5, 0x73, 0xe8, 0xe7, 0x39, 0x0c, 0xd5, 0x34, 0x8d, 0x29,
|
0x4c, 0x4e, 0xdb, 0xe5, 0xd0, 0xa7, 0x93, 0x40, 0x05, 0x51, 0x42, 0xf4, 0x4a, 0x44, 0x43, 0x9e,
|
||||||
0x23, 0x53, 0x98, 0x33, 0xe8, 0xfd, 0xa4, 0xa3, 0xdb, 0x28, 0x09, 0x63, 0x49, 0x0f, 0x05, 0xff,
|
0x33, 0xf1, 0x39, 0x74, 0xce, 0xa2, 0x81, 0xa2, 0x31, 0x45, 0x49, 0x10, 0x9a, 0xbf, 0xe6, 0xd6,
|
||||||
0x8e, 0x5f, 0xd4, 0x4d, 0x56, 0x9d, 0x02, 0x6b, 0xfb, 0x59, 0xfc, 0x01, 0x20, 0x69, 0x4c, 0xd1,
|
0x8d, 0x65, 0x36, 0xfb, 0x0c, 0x9c, 0x9e, 0x92, 0x31, 0xa9, 0x64, 0x5a, 0xa8, 0xd2, 0x2a, 0xa9,
|
||||||
0x6f, 0xb4, 0x4b, 0x99, 0x17, 0xe5, 0x6b, 0xfd, 0x6f, 0xf9, 0xce, 0xa0, 0x3f, 0x8c, 0x29, 0xd4,
|
0x72, 0x0b, 0x56, 0x5f, 0x07, 0xe1, 0x24, 0x97, 0xea, 0x8d, 0xc1, 0x7e, 0xb5, 0x72, 0xc9, 0x68,
|
||||||
0x55, 0x7e, 0x16, 0x2d, 0x5e, 0xc3, 0xc5, 0x61, 0xe5, 0x76, 0x16, 0xb7, 0x70, 0x7c, 0x49, 0x9c,
|
0xdc, 0x85, 0xfb, 0x3f, 0x6a, 0x1a, 0x2e, 0x4e, 0x03, 0x87, 0x2f, 0xc2, 0xc8, 0x60, 0xfd, 0xf8,
|
||||||
0x69, 0x35, 0x2f, 0x35, 0xb9, 0x4b, 0x2f, 0xe3, 0xe7, 0xd0, 0x5d, 0xc6, 0xbb, 0xad, 0xad, 0xfd,
|
0x7d, 0x4c, 0x83, 0x84, 0x86, 0x17, 0xe2, 0x17, 0x32, 0xf2, 0x68, 0xf2, 0x39, 0x0c, 0x9f, 0x02,
|
||||||
0xba, 0x0a, 0x12, 0x6f, 0x01, 0x37, 0x2e, 0x2a, 0xda, 0xbe, 0x34, 0xcd, 0x2d, 0x5b, 0xda, 0xbe,
|
0x64, 0xf9, 0x08, 0xd2, 0x9e, 0x6d, 0xba, 0xb2, 0xed, 0xe7, 0x29, 0xf2, 0x92, 0x93, 0xbd, 0x00,
|
||||||
0x8c, 0xc9, 0x95, 0x72, 0xa5, 0xb5, 0xd2, 0xa5, 0x52, 0x8c, 0x21, 0x2e, 0x9b, 0x1e, 0x91, 0x4f,
|
0x37, 0xcd, 0xe1, 0x50, 0x8e, 0xe3, 0x90, 0x12, 0x32, 0xfa, 0xdd, 0x83, 0xce, 0xf7, 0x4a, 0x8c,
|
||||||
0xda, 0x83, 0xfc, 0xe1, 0x71, 0x56, 0x8e, 0x94, 0x63, 0xbf, 0x9e, 0x82, 0x2c, 0x63, 0xc4, 0xd7,
|
0x44, 0x14, 0x84, 0x9c, 0xae, 0x33, 0x99, 0x3a, 0x7e, 0x26, 0x6f, 0x5e, 0x76, 0x32, 0xac, 0xec,
|
||||||
0x70, 0x22, 0x29, 0x8d, 0xa3, 0xb1, 0xe9, 0xda, 0xe1, 0x4c, 0xb3, 0xd2, 0xbb, 0xcc, 0xb5, 0xa0,
|
0xd7, 0xec, 0x2f, 0x0b, 0x80, 0xd3, 0x80, 0xc4, 0x3b, 0xba, 0x4b, 0x3b, 0xdc, 0xc8, 0xbc, 0x71,
|
||||||
0x71, 0x1f, 0xe3, 0x49, 0x31, 0x44, 0xf2, 0x1d, 0xf6, 0xcb, 0xbd, 0xe5, 0x18, 0x71, 0x5e, 0xab,
|
0xab, 0xcc, 0xf7, 0xc0, 0x3d, 0x0c, 0x29, 0x50, 0xe5, 0x02, 0xdd, 0x8c, 0xc2, 0x0a, 0x5e, 0x2f,
|
||||||
0x8c, 0xde, 0x45, 0x9c, 0x2d, 0x24, 0xfc, 0x72, 0x4f, 0x2e, 0x91, 0x0b, 0x07, 0xf6, 0x17, 0xe9,
|
0x5a, 0xfb, 0xbf, 0x88, 0x76, 0xbd, 0x94, 0xbf, 0x66, 0x23, 0xd8, 0x3c, 0x22, 0x9d, 0x28, 0x39,
|
||||||
0x88, 0x8f, 0xe1, 0x60, 0x14, 0x25, 0xb7, 0x79, 0x02, 0x2e, 0x1c, 0xfc, 0x48, 0xcc, 0xe1, 0x6d,
|
0xcd, 0xbb, 0xff, 0x2e, 0x53, 0x13, 0x3f, 0x85, 0x76, 0xc1, 0xf7, 0x1a, 0x4b, 0x27, 0xe3, 0x8c,
|
||||||
0xd9, 0x35, 0xa5, 0x29, 0x3e, 0x2a, 0x83, 0x38, 0xef, 0xab, 0xab, 0xf1, 0x9d, 0x2a, 0xfb, 0x2a,
|
0xc4, 0xde, 0x00, 0x2e, 0x1c, 0x94, 0x0d, 0xd8, 0xdc, 0xcc, 0x5a, 0xa5, 0x76, 0xc0, 0xe6, 0x9c,
|
||||||
0x5f, 0x9f, 0x0d, 0xa0, 0x1d, 0xe8, 0x28, 0x1f, 0x31, 0x97, 0x2a, 0xc9, 0x86, 0xa1, 0xa6, 0xfe,
|
0x54, 0x6c, 0xc7, 0x4a, 0x49, 0x95, 0x8b, 0xcd, 0x18, 0xec, 0xa8, 0xee, 0x12, 0xe9, 0x9b, 0xd6,
|
||||||
0x1e, 0x76, 0xa1, 0x73, 0x1d, 0xc6, 0x4c, 0x7d, 0x0b, 0x1d, 0xb0, 0x03, 0x3d, 0xa3, 0x7e, 0xeb,
|
0x4a, 0x4b, 0x17, 0x26, 0xf9, 0xf0, 0xde, 0xf4, 0xab, 0x29, 0xf0, 0x9c, 0xc3, 0xbe, 0x80, 0xad,
|
||||||
0xfc, 0xdf, 0x56, 0x3e, 0x00, 0x96, 0x8f, 0x40, 0x0f, 0xec, 0xfc, 0x60, 0x74, 0xfc, 0x22, 0x09,
|
0x72, 0xb5, 0x26, 0x4a, 0x4b, 0x75, 0x97, 0x17, 0xa4, 0x5f, 0xbb, 0x4f, 0xe3, 0x56, 0x36, 0xae,
|
||||||
0xaf, 0x5c, 0x31, 0x7e, 0x03, 0x8f, 0xd7, 0xe7, 0x38, 0x23, 0xfa, 0xb5, 0x8f, 0x9f, 0x57, 0xc7,
|
0xd3, 0x1d, 0xf6, 0xcb, 0x95, 0x62, 0x60, 0x3b, 0xe7, 0x32, 0xa1, 0xf7, 0x42, 0x27, 0x37, 0x5d,
|
||||||
0x18, 0x47, 0xf0, 0xa4, 0xf9, 0x13, 0x80, 0x9e, 0xbf, 0xf5, 0xc3, 0xe2, 0x6d, 0xf7, 0x31, 0x3e,
|
0xf0, 0x72, 0x85, 0x17, 0xc8, 0x81, 0x03, 0x6b, 0x37, 0xe9, 0xb0, 0x27, 0xd0, 0xea, 0x89, 0x68,
|
||||||
0x87, 0xfe, 0x66, 0xe9, 0xf1, 0xc4, 0x6f, 0x90, 0xb4, 0xd7, 0x84, 0x32, 0x7e, 0x07, 0x47, 0xb5,
|
0x94, 0x26, 0xe0, 0x41, 0xeb, 0x3b, 0xd2, 0x3a, 0x18, 0xe5, 0x8d, 0x97, 0x9b, 0xec, 0x83, 0x9c,
|
||||||
0xe2, 0xe1, 0xfb, 0x7e, 0x93, 0x10, 0xbc, 0x46, 0x98, 0xf1, 0x2b, 0x78, 0xb4, 0xd6, 0xe2, 0x78,
|
0xa4, 0xd3, 0xd6, 0x3c, 0x1e, 0x5c, 0xca, 0xbc, 0x35, 0xd3, 0xf5, 0xde, 0x2e, 0x34, 0xfb, 0x4a,
|
||||||
0xe4, 0x6f, 0x8e, 0x0c, 0xaf, 0x06, 0xf1, 0x45, 0xe7, 0x6d, 0x3b, 0x9d, 0xcc, 0x7e, 0xdd, 0x37,
|
0xa4, 0xc3, 0xfc, 0x48, 0x46, 0xc9, 0x61, 0xa0, 0xc8, 0x5d, 0xc1, 0x36, 0xac, 0x9e, 0x04, 0xa1,
|
||||||
0xff, 0x1f, 0xbe, 0xf8, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x27, 0x95, 0xc1, 0x78, 0x4c, 0x08, 0x00,
|
0x26, 0xd7, 0x42, 0x07, 0xec, 0xbe, 0x9a, 0x90, 0xdb, 0xd8, 0xfb, 0xcd, 0x02, 0x6f, 0xd9, 0x38,
|
||||||
0x00,
|
0xc0, 0x2d, 0x70, 0x0b, 0xe0, 0x2c, 0x7a, 0x17, 0x84, 0x62, 0xe8, 0xae, 0xe0, 0x36, 0x3c, 0x28,
|
||||||
|
0x50, 0xa3, 0xd0, 0xe0, 0xad, 0x08, 0x45, 0x32, 0x75, 0x2d, 0x7c, 0x02, 0x1f, 0x96, 0x36, 0x14,
|
||||||
|
0xa3, 0xa4, 0x74, 0x80, 0xdb, 0x98, 0x8b, 0x7a, 0x2e, 0x93, 0x4b, 0x11, 0x8d, 0xdc, 0xe6, 0xfe,
|
||||||
|
0x3f, 0x8d, 0x74, 0xe6, 0x17, 0x3c, 0xec, 0x82, 0x9d, 0xde, 0x10, 0x1d, 0x3f, 0xab, 0x46, 0x37,
|
||||||
|
0x5f, 0x69, 0xfc, 0x12, 0xee, 0xcf, 0x3f, 0xdd, 0x1a, 0xd1, 0xaf, 0x7c, 0xef, 0x74, 0xab, 0x98,
|
||||||
|
0xc6, 0x1e, 0x3c, 0xac, 0x7f, 0xf5, 0xb1, 0xeb, 0x2f, 0xfd, 0x96, 0xe8, 0x2e, 0xf7, 0x69, 0x7c,
|
||||||
|
0x01, 0xee, 0xa2, 0x06, 0x71, 0xcb, 0xaf, 0xe9, 0xad, 0x6e, 0x1d, 0xaa, 0xf1, 0x9b, 0xf9, 0xc6,
|
||||||
|
0x36, 0x2a, 0xc2, 0x07, 0x7e, 0x9d, 0x22, 0xbb, 0xb5, 0xb0, 0xc6, 0xcf, 0xe1, 0xde, 0xdc, 0xb8,
|
||||||
|
0xc2, 0x0d, 0x7f, 0x71, 0xfc, 0x75, 0x2b, 0x90, 0x3e, 0x58, 0x7d, 0xd3, 0x8c, 0x87, 0x93, 0xb7,
|
||||||
|
0x6b, 0xe6, 0x93, 0xf1, 0xd9, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xa4, 0xfb, 0xca, 0x3f,
|
||||||
|
0x0a, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
@ -61,6 +61,25 @@ message SendReq {
|
|||||||
Tri Encrypted = 5;
|
Tri Encrypted = 5;
|
||||||
|
|
||||||
bool DryRun = 6;
|
bool DryRun = 6;
|
||||||
|
|
||||||
|
ReplicationConfig ReplicationConfig = 7;
|
||||||
|
}
|
||||||
|
|
||||||
|
message ReplicationConfig {
|
||||||
|
ReplicationConfigProtection protection = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
message ReplicationConfigProtection {
|
||||||
|
ReplicationGuaranteeKind Initial = 1;
|
||||||
|
ReplicationGuaranteeKind Incremental = 2;
|
||||||
|
}
|
||||||
|
|
||||||
|
enum ReplicationGuaranteeKind {
|
||||||
|
GuaranteeInvalid = 0;
|
||||||
|
GuaranteeResumability = 1;
|
||||||
|
GuaranteeIncrementalReplication = 2;
|
||||||
|
GuaranteeNothing = 3;
|
||||||
}
|
}
|
||||||
|
|
||||||
message Property {
|
message Property {
|
||||||
@ -93,6 +112,8 @@ message ReceiveReq {
|
|||||||
// If true, the receiver should clear the resume token before performing the
|
// If true, the receiver should clear the resume token before performing the
|
||||||
// zfs recv of the stream in the request
|
// zfs recv of the stream in the request
|
||||||
bool ClearResumeToken = 3;
|
bool ClearResumeToken = 3;
|
||||||
|
|
||||||
|
ReplicationConfig ReplicationConfig = 4;
|
||||||
}
|
}
|
||||||
|
|
||||||
message ReceiveRes {}
|
message ReceiveRes {}
|
||||||
|
@ -83,3 +83,10 @@ func (v *FilesystemVersion) ZFSFilesystemVersion() (*zfs.FilesystemVersion, erro
|
|||||||
Creation: ct,
|
Creation: ct,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func ReplicationConfigProtectionWithKind(both ReplicationGuaranteeKind) *ReplicationConfigProtection {
|
||||||
|
return &ReplicationConfigProtection{
|
||||||
|
Initial: both,
|
||||||
|
Incremental: both,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -52,10 +52,6 @@ type Receiver interface {
|
|||||||
Receive(ctx context.Context, req *pdu.ReceiveReq, receive io.ReadCloser) (*pdu.ReceiveRes, error)
|
Receive(ctx context.Context, req *pdu.ReceiveReq, receive io.ReadCloser) (*pdu.ReceiveRes, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type PlannerPolicy struct {
|
|
||||||
EncryptedSend tri // all sends must be encrypted (send -w, and encryption!=off)
|
|
||||||
}
|
|
||||||
|
|
||||||
type Planner struct {
|
type Planner struct {
|
||||||
sender Sender
|
sender Sender
|
||||||
receiver Receiver
|
receiver Receiver
|
||||||
@ -561,12 +557,13 @@ func (s *Step) updateSizeEstimate(ctx context.Context) error {
|
|||||||
func (s *Step) buildSendRequest(dryRun bool) (sr *pdu.SendReq) {
|
func (s *Step) buildSendRequest(dryRun bool) (sr *pdu.SendReq) {
|
||||||
fs := s.parent.Path
|
fs := s.parent.Path
|
||||||
sr = &pdu.SendReq{
|
sr = &pdu.SendReq{
|
||||||
Filesystem: fs,
|
Filesystem: fs,
|
||||||
From: s.from, // may be nil
|
From: s.from, // may be nil
|
||||||
To: s.to,
|
To: s.to,
|
||||||
Encrypted: s.encrypt.ToPDU(),
|
Encrypted: s.encrypt.ToPDU(),
|
||||||
ResumeToken: s.resumeToken,
|
ResumeToken: s.resumeToken,
|
||||||
DryRun: dryRun,
|
DryRun: dryRun,
|
||||||
|
ReplicationConfig: &s.parent.policy.ReplicationConfig,
|
||||||
}
|
}
|
||||||
return sr
|
return sr
|
||||||
}
|
}
|
||||||
@ -603,9 +600,10 @@ func (s *Step) doReplication(ctx context.Context) error {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
rr := &pdu.ReceiveReq{
|
rr := &pdu.ReceiveReq{
|
||||||
Filesystem: fs,
|
Filesystem: fs,
|
||||||
To: sr.GetTo(),
|
To: sr.GetTo(),
|
||||||
ClearResumeToken: !sres.UsedResumeToken,
|
ClearResumeToken: !sres.UsedResumeToken,
|
||||||
|
ReplicationConfig: &s.parent.policy.ReplicationConfig,
|
||||||
}
|
}
|
||||||
log.Debug("initiate receive request")
|
log.Debug("initiate receive request")
|
||||||
_, err = s.receiver.Receive(ctx, rr, byteCountingStream)
|
_, err = s.receiver.Receive(ctx, rr, byteCountingStream)
|
||||||
|
42
replication/logic/replication_logic_policy.go
Normal file
42
replication/logic/replication_logic_policy.go
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
package logic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/zrepl/zrepl/config"
|
||||||
|
"github.com/zrepl/zrepl/replication/logic/pdu"
|
||||||
|
)
|
||||||
|
|
||||||
|
type PlannerPolicy struct {
|
||||||
|
EncryptedSend tri // all sends must be encrypted (send -w, and encryption!=off)
|
||||||
|
ReplicationConfig pdu.ReplicationConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReplicationConfigFromConfig(in *config.Replication) (*pdu.ReplicationConfig, error) {
|
||||||
|
initial, err := pduReplicationGuaranteeKindFromConfig(in.Protection.Initial)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "field 'initial'")
|
||||||
|
}
|
||||||
|
incremental, err := pduReplicationGuaranteeKindFromConfig(in.Protection.Incremental)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "field 'incremental'")
|
||||||
|
}
|
||||||
|
return &pdu.ReplicationConfig{
|
||||||
|
Protection: &pdu.ReplicationConfigProtection{
|
||||||
|
Initial: initial,
|
||||||
|
Incremental: incremental,
|
||||||
|
},
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func pduReplicationGuaranteeKindFromConfig(in string) (k pdu.ReplicationGuaranteeKind, _ error) {
|
||||||
|
switch in {
|
||||||
|
case "guarantee_nothing":
|
||||||
|
return pdu.ReplicationGuaranteeKind_GuaranteeNothing, nil
|
||||||
|
case "guarantee_incremental":
|
||||||
|
return pdu.ReplicationGuaranteeKind_GuaranteeIncrementalReplication, nil
|
||||||
|
case "guarantee_resumability":
|
||||||
|
return pdu.ReplicationGuaranteeKind_GuaranteeResumability, nil
|
||||||
|
default:
|
||||||
|
return k, errors.Errorf("%q is not in guarantee_{nothing,incremental,resumability}", in)
|
||||||
|
}
|
||||||
|
}
|
@ -152,7 +152,7 @@ func (m *HandshakeMessage) DecodeReader(r io.Reader, maxLen int) error {
|
|||||||
|
|
||||||
func DoHandshakeCurrentVersion(conn net.Conn, deadline time.Time) *HandshakeError {
|
func DoHandshakeCurrentVersion(conn net.Conn, deadline time.Time) *HandshakeError {
|
||||||
// current protocol version is hardcoded here
|
// current protocol version is hardcoded here
|
||||||
return DoHandshakeVersion(conn, deadline, 4)
|
return DoHandshakeVersion(conn, deadline, 5)
|
||||||
}
|
}
|
||||||
|
|
||||||
const HandshakeMessageMaxLen = 16 * 4096
|
const HandshakeMessageMaxLen = 16 * 4096
|
||||||
|
29
zfs/zfs.go
29
zfs/zfs.go
@ -1593,9 +1593,10 @@ var ErrBookmarkCloningNotSupported = fmt.Errorf("bookmark cloning feature is not
|
|||||||
|
|
||||||
// idempotently create bookmark of the given version v
|
// idempotently create bookmark of the given version v
|
||||||
//
|
//
|
||||||
// v must be validated by the caller
|
// if `v` is a bookmark, returns ErrBookmarkCloningNotSupported
|
||||||
|
// unless a bookmark with the name `bookmark` exists and has the same idenitty (zfs.FilesystemVersionEqualIdentity)
|
||||||
//
|
//
|
||||||
// does not destroy an existing bookmark, returns
|
// v must be validated by the caller
|
||||||
//
|
//
|
||||||
func ZFSBookmark(ctx context.Context, fs string, v FilesystemVersion, bookmark string) (bm FilesystemVersion, err error) {
|
func ZFSBookmark(ctx context.Context, fs string, v FilesystemVersion, bookmark string) (bm FilesystemVersion, err error) {
|
||||||
|
|
||||||
@ -1612,7 +1613,21 @@ func ZFSBookmark(ctx context.Context, fs string, v FilesystemVersion, bookmark s
|
|||||||
promTimer := prometheus.NewTimer(prom.ZFSBookmarkDuration.WithLabelValues(fs))
|
promTimer := prometheus.NewTimer(prom.ZFSBookmarkDuration.WithLabelValues(fs))
|
||||||
defer promTimer.ObserveDuration()
|
defer promTimer.ObserveDuration()
|
||||||
|
|
||||||
if !v.IsSnapshot() {
|
bookmarkname := fmt.Sprintf("%s#%s", fs, bookmark)
|
||||||
|
if err := EntityNamecheck(bookmarkname, EntityTypeBookmark); err != nil {
|
||||||
|
return bm, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if v.IsBookmark() {
|
||||||
|
existingBm, err := ZFSGetFilesystemVersion(ctx, bookmarkname)
|
||||||
|
if _, ok := err.(*DatasetDoesNotExist); ok {
|
||||||
|
return bm, ErrBookmarkCloningNotSupported
|
||||||
|
} else if err != nil {
|
||||||
|
return bm, errors.Wrap(err, "bookmark: idempotency check for bookmark cloning")
|
||||||
|
}
|
||||||
|
if FilesystemVersionEqualIdentity(bm, existingBm) {
|
||||||
|
return existingBm, nil
|
||||||
|
}
|
||||||
return bm, ErrBookmarkCloningNotSupported // TODO This is work in progress: https://github.com/zfsonlinux/zfs/pull/9571
|
return bm, ErrBookmarkCloningNotSupported // TODO This is work in progress: https://github.com/zfsonlinux/zfs/pull/9571
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1620,12 +1635,6 @@ func ZFSBookmark(ctx context.Context, fs string, v FilesystemVersion, bookmark s
|
|||||||
if err := EntityNamecheck(snapname, EntityTypeSnapshot); err != nil {
|
if err := EntityNamecheck(snapname, EntityTypeSnapshot); err != nil {
|
||||||
return bm, err
|
return bm, err
|
||||||
}
|
}
|
||||||
bookmarkname := fmt.Sprintf("%s#%s", fs, bookmark)
|
|
||||||
if err := EntityNamecheck(bookmarkname, EntityTypeBookmark); err != nil {
|
|
||||||
return bm, err
|
|
||||||
}
|
|
||||||
|
|
||||||
debug("bookmark: %q %q", snapname, bookmarkname)
|
|
||||||
|
|
||||||
cmd := zfscmd.CommandContext(ctx, ZFS_BINARY, "bookmark", snapname, bookmarkname)
|
cmd := zfscmd.CommandContext(ctx, ZFS_BINARY, "bookmark", snapname, bookmarkname)
|
||||||
stdio, err := cmd.CombinedOutput()
|
stdio, err := cmd.CombinedOutput()
|
||||||
@ -1637,7 +1646,7 @@ func ZFSBookmark(ctx context.Context, fs string, v FilesystemVersion, bookmark s
|
|||||||
// check if this was idempotent
|
// check if this was idempotent
|
||||||
bookGuid, err := ZFSGetGUID(ctx, fs, "#"+bookmark)
|
bookGuid, err := ZFSGetGUID(ctx, fs, "#"+bookmark)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return bm, errors.Wrap(err, "bookmark idempotency check") // guid error expressive enough
|
return bm, errors.Wrap(err, "bookmark: idempotency check for bookmark creation") // guid error expressive enough
|
||||||
}
|
}
|
||||||
|
|
||||||
if v.Guid == bookGuid {
|
if v.Guid == bookGuid {
|
||||||
|
Loading…
Reference in New Issue
Block a user