mirror of
https://github.com/zrepl/zrepl.git
synced 2024-11-21 16:03:32 +01:00
make initial replication policy configurable (most_recent, all, fail)
Config: ``` - type: push ... conflict_resolution: initial_replication: most_recent | all | fali ``` The ``initial_replication`` option determines which snapshots zrepl replicates if the filesystem has not been replicated before. If ``most_recent`` (the default), the initial replication will only transfer the most recent snapshot, while ignoring previous snapshots. If all snapshots should be replicated, specify ``all``. Use ``fail`` to make replication of the filesystem fail in case there is no corresponding fileystem on the receiver. Code-Level Changes, apart from the obvious: - Rework IncrementalPath()'s return signature. Now returns an error for initial replications as well. - Rename & rework it's consumer, resolveConflict(). Co-authored-by: Graham Christensen <graham@grahamc.com> Fixes https://github.com/zrepl/zrepl/issues/550 Fixes https://github.com/zrepl/zrepl/issues/187 Closes https://github.com/zrepl/zrepl/pull/592
This commit is contained in:
parent
1acafabb5b
commit
2642c64303
@ -55,12 +55,17 @@ func (j JobEnum) Name() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type ActiveJob struct {
|
type ActiveJob struct {
|
||||||
Type string `yaml:"type"`
|
Type string `yaml:"type"`
|
||||||
Name string `yaml:"name"`
|
Name string `yaml:"name"`
|
||||||
Connect ConnectEnum `yaml:"connect"`
|
Connect ConnectEnum `yaml:"connect"`
|
||||||
Pruning PruningSenderReceiver `yaml:"pruning"`
|
Pruning PruningSenderReceiver `yaml:"pruning"`
|
||||||
Debug JobDebugSettings `yaml:"debug,optional"`
|
Debug JobDebugSettings `yaml:"debug,optional"`
|
||||||
Replication *Replication `yaml:"replication,optional,fromdefaults"`
|
Replication *Replication `yaml:"replication,optional,fromdefaults"`
|
||||||
|
ConflictResolution *ConflictResolution `yaml:"conflict_resolution,optional,fromdefaults"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type ConflictResolution struct {
|
||||||
|
InitialReplication string `yaml:"initial_replication,optional,default=most_recent"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type PassiveJob struct {
|
type PassiveJob struct {
|
||||||
|
@ -162,8 +162,14 @@ func modePushFromConfig(g *config.Global, in *config.PushJob, jobID endpoint.Job
|
|||||||
return nil, errors.Wrap(err, "field `replication`")
|
return nil, errors.Wrap(err, "field `replication`")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
conflictResolution, err := logic.ConflictResolutionFromConfig(in.ConflictResolution)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "field `conflict_resolution`")
|
||||||
|
}
|
||||||
|
|
||||||
m.plannerPolicy = &logic.PlannerPolicy{
|
m.plannerPolicy = &logic.PlannerPolicy{
|
||||||
EncryptedSend: logic.TriFromBool(in.Send.Encrypted),
|
EncryptedSend: logic.TriFromBool(in.Send.Encrypted),
|
||||||
|
ConflictResolution: conflictResolution,
|
||||||
ReplicationConfig: replicationConfig,
|
ReplicationConfig: replicationConfig,
|
||||||
SizeEstimationConcurrency: in.Replication.Concurrency.SizeEstimates,
|
SizeEstimationConcurrency: in.Replication.Concurrency.SizeEstimates,
|
||||||
}
|
}
|
||||||
@ -261,8 +267,14 @@ func modePullFromConfig(g *config.Global, in *config.PullJob, jobID endpoint.Job
|
|||||||
return nil, errors.Wrap(err, "field `replication`")
|
return nil, errors.Wrap(err, "field `replication`")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
conflictResolution, err := logic.ConflictResolutionFromConfig(in.ConflictResolution)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "field `conflict_resolution`")
|
||||||
|
}
|
||||||
|
|
||||||
m.plannerPolicy = &logic.PlannerPolicy{
|
m.plannerPolicy = &logic.PlannerPolicy{
|
||||||
EncryptedSend: logic.DontCare,
|
EncryptedSend: logic.DontCare,
|
||||||
|
ConflictResolution: conflictResolution,
|
||||||
ReplicationConfig: replicationConfig,
|
ReplicationConfig: replicationConfig,
|
||||||
SizeEstimationConcurrency: in.Replication.Concurrency.SizeEstimates,
|
SizeEstimationConcurrency: in.Replication.Concurrency.SizeEstimates,
|
||||||
}
|
}
|
||||||
|
@ -13,6 +13,7 @@ Configuration
|
|||||||
configuration/filter_syntax
|
configuration/filter_syntax
|
||||||
configuration/sendrecvoptions
|
configuration/sendrecvoptions
|
||||||
configuration/replication
|
configuration/replication
|
||||||
|
configuration/conflict_resolution
|
||||||
configuration/snapshotting
|
configuration/snapshotting
|
||||||
configuration/prune
|
configuration/prune
|
||||||
configuration/logging
|
configuration/logging
|
||||||
|
36
docs/configuration/conflict_resolution.rst
Normal file
36
docs/configuration/conflict_resolution.rst
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
.. include:: ../global.rst.inc
|
||||||
|
|
||||||
|
|
||||||
|
Conflict Resolution Options
|
||||||
|
===========================
|
||||||
|
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
- type: push
|
||||||
|
filesystems: ...
|
||||||
|
conflict_resolution:
|
||||||
|
initial_replication: most_recent | all | fail # default: most_recent
|
||||||
|
|
||||||
|
...
|
||||||
|
|
||||||
|
.. _conflict_resolution-initial_replication-option-send_all_snapshots:
|
||||||
|
|
||||||
|
|
||||||
|
``initial_replication`` option
|
||||||
|
------------------------------
|
||||||
|
|
||||||
|
The ``initial_replication`` option determines how many snapshots zrepl replicates if the filesystem has not been replicated before.
|
||||||
|
If ``most_recent`` (the default), the initial replication will only transfer the most recent snapshot, while ignoring previous snapshots.
|
||||||
|
If all snapshots should be replicated, specify ``all``.
|
||||||
|
Use ``fail`` to make replication of the filesystem fail in case there is no corresponding fileystem on the receiver.
|
||||||
|
|
||||||
|
For example, suppose there are snapshosts ``tank@1``, ``tank@2``, ``tank@3`` on a sender.
|
||||||
|
Then ``most_recent`` will replicate just ``@3``, but ``all`` will replicate ``@1``, ``@2``, and ``@3``.
|
||||||
|
|
||||||
|
If initial replication is interrupted, and there is at least one (maybe partial) snapshot on the receiver, zrepl will always resume in **incremental mode**.
|
||||||
|
And that is regardless of where the initial replication was interrupted.
|
||||||
|
|
||||||
|
For example, if ``initial_replication: all`` and the transfer of ``@1`` is interrupted, zrepl would retry/resume at ``@1``.
|
||||||
|
And even if the user changes the config to ``initial_replication: most_recent`` before resuming, **incremental mode** will still resume at ``@1``.
|
@ -813,7 +813,7 @@ func listStaleFiltering(abs []Abstraction, sinceBound *CreateTXGRangeBound) *Sta
|
|||||||
l := by[k]
|
l := by[k]
|
||||||
|
|
||||||
if k.Type == AbstractionStepHold {
|
if k.Type == AbstractionStepHold {
|
||||||
// all older than the most recent cursor are stale, others are always live
|
// all step holds older than the most recent cursor are stale, others are always live
|
||||||
|
|
||||||
// if we don't have a replication cursor yet, use untilBound = nil
|
// if we don't have a replication cursor yet, use untilBound = nil
|
||||||
// to consider all steps stale (...at first)
|
// to consider all steps stale (...at first)
|
||||||
@ -821,11 +821,10 @@ func listStaleFiltering(abs []Abstraction, sinceBound *CreateTXGRangeBound) *Sta
|
|||||||
{
|
{
|
||||||
sfnsc := stepFirstNotStaleCandidates[k.fsAndJobId]
|
sfnsc := stepFirstNotStaleCandidates[k.fsAndJobId]
|
||||||
|
|
||||||
// if there's a replication cursor, use it as a cutoff between live and stale
|
// If there's a replication cursor, use it as a cutoff between live and stale
|
||||||
// if there's none, we are in initial replication and only need to keep
|
// for both cursors and holds.
|
||||||
// the most recent step hold live, since that's what our initial replication strategy
|
// If there's no cursor, we are in initial replication and only need to keep
|
||||||
// uses (both initially and on resume)
|
// the most recent step hold live, which hold the .To of the initial send step.
|
||||||
// (FIXME hardcoded replication strategy)
|
|
||||||
if sfnsc.cursor != nil {
|
if sfnsc.cursor != nil {
|
||||||
untilBound = &CreateTXGRangeBound{
|
untilBound = &CreateTXGRangeBound{
|
||||||
CreateTXG: (*sfnsc.cursor).GetCreateTXG(),
|
CreateTXG: (*sfnsc.cursor).GetCreateTXG(),
|
||||||
@ -858,7 +857,7 @@ func listStaleFiltering(abs []Abstraction, sinceBound *CreateTXGRangeBound) *Sta
|
|||||||
}
|
}
|
||||||
|
|
||||||
} else if k.Type == AbstractionReplicationCursorBookmarkV2 || k.Type == AbstractionLastReceivedHold {
|
} else if k.Type == AbstractionReplicationCursorBookmarkV2 || k.Type == AbstractionLastReceivedHold {
|
||||||
// all but the most recent are stale by definition (we always _move_ them)
|
// all cursors but the most recent cursor are stale by definition (we always _move_ them)
|
||||||
// NOTE: must not use firstNotStale in this branch, not computed for these types
|
// NOTE: must not use firstNotStale in this branch, not computed for these types
|
||||||
|
|
||||||
// sort descending (highest createtxg first), then cut off
|
// sort descending (highest createtxg first), then cut off
|
||||||
|
@ -21,6 +21,9 @@ var Cases = []Case{BatchDestroy,
|
|||||||
ReplicationIncrementalCleansUpStaleAbstractionsWithoutCacheOnSecondReplication,
|
ReplicationIncrementalCleansUpStaleAbstractionsWithoutCacheOnSecondReplication,
|
||||||
ReplicationIncrementalDestroysStepHoldsIffIncrementalStepHoldsAreDisabledButStepHoldsExist,
|
ReplicationIncrementalDestroysStepHoldsIffIncrementalStepHoldsAreDisabledButStepHoldsExist,
|
||||||
ReplicationIncrementalIsPossibleIfCommonSnapshotIsDestroyed,
|
ReplicationIncrementalIsPossibleIfCommonSnapshotIsDestroyed,
|
||||||
|
ReplicationInitialAll,
|
||||||
|
ReplicationInitialFail,
|
||||||
|
ReplicationInitialMostRecent,
|
||||||
ReplicationIsResumableFullSend__both_GuaranteeResumability,
|
ReplicationIsResumableFullSend__both_GuaranteeResumability,
|
||||||
ReplicationIsResumableFullSend__initial_GuaranteeIncrementalReplication_incremental_GuaranteeIncrementalReplication,
|
ReplicationIsResumableFullSend__initial_GuaranteeIncrementalReplication_incremental_GuaranteeIncrementalReplication,
|
||||||
ReplicationIsResumableFullSend__initial_GuaranteeResumability_incremental_GuaranteeIncrementalReplication,
|
ReplicationIsResumableFullSend__initial_GuaranteeResumability_incremental_GuaranteeIncrementalReplication,
|
||||||
|
@ -44,6 +44,7 @@ type replicationInvocation struct {
|
|||||||
guarantee *pdu.ReplicationConfigProtection
|
guarantee *pdu.ReplicationConfigProtection
|
||||||
senderConfigHook func(*endpoint.SenderConfig)
|
senderConfigHook func(*endpoint.SenderConfig)
|
||||||
receiverConfigHook func(*endpoint.ReceiverConfig)
|
receiverConfigHook func(*endpoint.ReceiverConfig)
|
||||||
|
plannerPolicyHook func(*logic.PlannerPolicy)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i replicationInvocation) Do(ctx *platformtest.Context) *report.Report {
|
func (i replicationInvocation) Do(ctx *platformtest.Context) *report.Report {
|
||||||
@ -95,8 +96,14 @@ func (i replicationInvocation) Do(ctx *platformtest.Context) *report.Report {
|
|||||||
ReplicationConfig: &pdu.ReplicationConfig{
|
ReplicationConfig: &pdu.ReplicationConfig{
|
||||||
Protection: i.guarantee,
|
Protection: i.guarantee,
|
||||||
},
|
},
|
||||||
|
ConflictResolution: &logic.ConflictResolution{
|
||||||
|
InitialReplication: logic.InitialReplicationAutoResolutionMostRecent,
|
||||||
|
},
|
||||||
SizeEstimationConcurrency: 1,
|
SizeEstimationConcurrency: 1,
|
||||||
}
|
}
|
||||||
|
if i.plannerPolicyHook != nil {
|
||||||
|
i.plannerPolicyHook(&plannerPolicy)
|
||||||
|
}
|
||||||
|
|
||||||
report, wait := replication.Do(
|
report, wait := replication.Do(
|
||||||
ctx,
|
ctx,
|
||||||
@ -1306,3 +1313,79 @@ func ReplicationPlaceholderEncryption__EncryptOnReceiverUseCase__WorksIfConfigur
|
|||||||
require.NoError(ctx, err)
|
require.NoError(ctx, err)
|
||||||
require.Equal(ctx, rfsRoot, props.Get("encryptionroot"))
|
require.Equal(ctx, rfsRoot, props.Get("encryptionroot"))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func replicationInitialImpl(ctx *platformtest.Context, iras logic.InitialReplicationAutoResolution, expectExactRfsSnaps []string) *report.Report {
|
||||||
|
// reverse order for snap names to expose sorting assumptions
|
||||||
|
platformtest.Run(ctx, platformtest.PanicErr, ctx.RootDataset, `
|
||||||
|
CREATEROOT
|
||||||
|
+ "sender"
|
||||||
|
+ "sender@3"
|
||||||
|
+ "sender@2"
|
||||||
|
+ "sender@1"
|
||||||
|
+ "receiver"
|
||||||
|
R zfs create -p "${ROOTDS}/receiver/${ROOTDS}"
|
||||||
|
`)
|
||||||
|
|
||||||
|
sjid := endpoint.MustMakeJobID("sender-job")
|
||||||
|
rjid := endpoint.MustMakeJobID("receiver-job")
|
||||||
|
|
||||||
|
sfs := ctx.RootDataset + "/sender"
|
||||||
|
rfsRoot := ctx.RootDataset + "/receiver"
|
||||||
|
|
||||||
|
rep := replicationInvocation{
|
||||||
|
sjid: sjid,
|
||||||
|
rjid: rjid,
|
||||||
|
sfs: sfs,
|
||||||
|
rfsRoot: rfsRoot,
|
||||||
|
guarantee: pdu.ReplicationConfigProtectionWithKind(pdu.ReplicationGuaranteeKind_GuaranteeResumability),
|
||||||
|
plannerPolicyHook: func(pp *logic.PlannerPolicy) {
|
||||||
|
pp.ConflictResolution.InitialReplication = iras
|
||||||
|
},
|
||||||
|
}
|
||||||
|
rfs := rep.ReceiveSideFilesystem()
|
||||||
|
|
||||||
|
// first replication
|
||||||
|
report := rep.Do(ctx)
|
||||||
|
ctx.Logf("\n%s", pretty.Sprint(report))
|
||||||
|
|
||||||
|
versions, err := zfs.ZFSListFilesystemVersions(ctx, mustDatasetPath(rfs), zfs.ListFilesystemVersionsOptions{Types: zfs.Snapshots})
|
||||||
|
if _, ok := err.(*zfs.DatasetDoesNotExist); ok {
|
||||||
|
versions = nil
|
||||||
|
} else {
|
||||||
|
require.NoError(ctx, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
bySnapName := make(map[string]int)
|
||||||
|
for _, v := range versions {
|
||||||
|
bySnapName[v.GetName()] += 1
|
||||||
|
}
|
||||||
|
for _, v := range expectExactRfsSnaps {
|
||||||
|
bySnapName[v] -= 1
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, v := range bySnapName {
|
||||||
|
if v != 0 {
|
||||||
|
ctx.Logf("unexpected snaps:\n%#v", bySnapName)
|
||||||
|
ctx.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return report
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReplicationInitialAll(ctx *platformtest.Context) {
|
||||||
|
replicationInitialImpl(ctx, logic.InitialReplicationAutoResolutionAll, []string{"3", "2", "1"})
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReplicationInitialMostRecent(ctx *platformtest.Context) {
|
||||||
|
replicationInitialImpl(ctx, logic.InitialReplicationAutoResolutionMostRecent, []string{"1"})
|
||||||
|
}
|
||||||
|
|
||||||
|
func ReplicationInitialFail(ctx *platformtest.Context) {
|
||||||
|
report := replicationInitialImpl(ctx, logic.InitialReplicationAutoResolutionFail, []string{})
|
||||||
|
require.Len(ctx, report.Attempts, 1)
|
||||||
|
require.Nil(ctx, report.Attempts[0].PlanError)
|
||||||
|
require.Len(ctx, report.Attempts[0].Filesystems, 1)
|
||||||
|
require.NotNil(ctx, report.Attempts[0].Filesystems[0].PlanError)
|
||||||
|
require.Contains(ctx, report.Attempts[0].Filesystems[0].PlanError.Err, "automatic conflict resolution for initial replication is disabled in config")
|
||||||
|
}
|
||||||
|
@ -49,6 +49,23 @@ func (c *ConflictDiverged) Error() string {
|
|||||||
return buf.String()
|
return buf.String()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type ConflictNoSenderSnapshots struct{}
|
||||||
|
|
||||||
|
func (c *ConflictNoSenderSnapshots) Error() string {
|
||||||
|
return "no snapshots available on sender side"
|
||||||
|
}
|
||||||
|
|
||||||
|
type ConflictMostRecentSnapshotAlreadyPresent struct {
|
||||||
|
SortedSenderVersions, SortedReceiverVersions []*FilesystemVersion
|
||||||
|
CommonAncestor *FilesystemVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ConflictMostRecentSnapshotAlreadyPresent) Error() string {
|
||||||
|
var buf strings.Builder
|
||||||
|
fmt.Fprintf(&buf, "the most recent sender snapshot is already present on the receiver (guid=%v, name=%q)", c.CommonAncestor.GetGuid(), c.CommonAncestor.RelName())
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
func SortVersionListByCreateTXGThenBookmarkLTSnapshot(fsvslice []*FilesystemVersion) []*FilesystemVersion {
|
func SortVersionListByCreateTXGThenBookmarkLTSnapshot(fsvslice []*FilesystemVersion) []*FilesystemVersion {
|
||||||
lesser := func(s []*FilesystemVersion) func(i, j int) bool {
|
lesser := func(s []*FilesystemVersion) func(i, j int) bool {
|
||||||
return func(i, j int) bool {
|
return func(i, j int) bool {
|
||||||
@ -71,7 +88,6 @@ func SortVersionListByCreateTXGThenBookmarkLTSnapshot(fsvslice []*FilesystemVers
|
|||||||
return sorted
|
return sorted
|
||||||
}
|
}
|
||||||
|
|
||||||
// conflict may be a *ConflictDiverged or a *ConflictNoCommonAncestor
|
|
||||||
func IncrementalPath(receiver, sender []*FilesystemVersion) (incPath []*FilesystemVersion, conflict error) {
|
func IncrementalPath(receiver, sender []*FilesystemVersion) (incPath []*FilesystemVersion, conflict error) {
|
||||||
|
|
||||||
receiver = SortVersionListByCreateTXGThenBookmarkLTSnapshot(receiver)
|
receiver = SortVersionListByCreateTXGThenBookmarkLTSnapshot(receiver)
|
||||||
@ -98,9 +114,13 @@ findCandidate:
|
|||||||
|
|
||||||
// handle failure cases
|
// handle failure cases
|
||||||
if !mrcaCandidate.found {
|
if !mrcaCandidate.found {
|
||||||
return nil, &ConflictNoCommonAncestor{
|
if len(sender) == 0 {
|
||||||
SortedSenderVersions: sender,
|
return nil, &ConflictNoSenderSnapshots{}
|
||||||
SortedReceiverVersions: receiver,
|
} else {
|
||||||
|
return nil, &ConflictNoCommonAncestor{
|
||||||
|
SortedSenderVersions: sender,
|
||||||
|
SortedReceiverVersions: receiver,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else if mrcaCandidate.r != len(receiver)-1 {
|
} else if mrcaCandidate.r != len(receiver)-1 {
|
||||||
return nil, &ConflictDiverged{
|
return nil, &ConflictDiverged{
|
||||||
@ -125,7 +145,11 @@ findCandidate:
|
|||||||
}
|
}
|
||||||
if len(incPath) == 1 {
|
if len(incPath) == 1 {
|
||||||
// nothing to do
|
// nothing to do
|
||||||
incPath = incPath[1:]
|
return nil, &ConflictMostRecentSnapshotAlreadyPresent{
|
||||||
|
SortedSenderVersions: sender,
|
||||||
|
SortedReceiverVersions: receiver,
|
||||||
|
CommonAncestor: sender[mrcaCandidate.s],
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return incPath, nil
|
return incPath, nil
|
||||||
}
|
}
|
||||||
|
@ -96,12 +96,37 @@ func TestIncrementalPath_SnapshotsOnly(t *testing.T) {
|
|||||||
assert.Equal(t, l("@c,3", "@d,4"), path)
|
assert.Equal(t, l("@c,3", "@d,4"), path)
|
||||||
})
|
})
|
||||||
|
|
||||||
// sender with earlier but also current version as sender is not a conflict
|
// nothing to do if fully shared history
|
||||||
|
doTest(l("@a,1", "@b,2"), l("@a,1", "@b,2"), func(incpath []*FilesystemVersion, conflict error) {
|
||||||
|
assert.Nil(t, incpath)
|
||||||
|
assert.NotNil(t, conflict)
|
||||||
|
_, ok := conflict.(*ConflictMostRecentSnapshotAlreadyPresent)
|
||||||
|
assert.True(t, ok)
|
||||||
|
})
|
||||||
|
|
||||||
|
// ...but it's sufficient if the most recent snapshot is present
|
||||||
doTest(l("@c,3"), l("@a,1", "@b,2", "@c,3"), func(path []*FilesystemVersion, conflict error) {
|
doTest(l("@c,3"), l("@a,1", "@b,2", "@c,3"), func(path []*FilesystemVersion, conflict error) {
|
||||||
t.Logf("path: %#v", path)
|
assert.Nil(t, path)
|
||||||
t.Logf("conflict: %#v", conflict)
|
_, ok := conflict.(*ConflictMostRecentSnapshotAlreadyPresent)
|
||||||
assert.Empty(t, path)
|
assert.True(t, ok)
|
||||||
assert.Nil(t, conflict)
|
})
|
||||||
|
|
||||||
|
// no sender snapshots errors: empty receiver
|
||||||
|
doTest(l(), l(), func(incpath []*FilesystemVersion, conflict error) {
|
||||||
|
assert.Nil(t, incpath)
|
||||||
|
assert.NotNil(t, conflict)
|
||||||
|
t.Logf("%T", conflict)
|
||||||
|
_, ok := conflict.(*ConflictNoSenderSnapshots)
|
||||||
|
assert.True(t, ok)
|
||||||
|
})
|
||||||
|
|
||||||
|
// no sender snapshots errors: snapshots on receiver
|
||||||
|
doTest(l("@a,1"), l(), func(incpath []*FilesystemVersion, conflict error) {
|
||||||
|
assert.Nil(t, incpath)
|
||||||
|
assert.NotNil(t, conflict)
|
||||||
|
t.Logf("%T", conflict)
|
||||||
|
_, ok := conflict.(*ConflictNoSenderSnapshots)
|
||||||
|
assert.True(t, ok)
|
||||||
})
|
})
|
||||||
|
|
||||||
}
|
}
|
||||||
|
62
replication/logic/initialreplicationautoresolution_enumer.go
Normal file
62
replication/logic/initialreplicationautoresolution_enumer.go
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
// Code generated by "enumer -type=InitialReplicationAutoResolution -trimprefix=InitialReplicationAutoResolution"; DO NOT EDIT.
|
||||||
|
|
||||||
|
//
|
||||||
|
package logic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
_InitialReplicationAutoResolutionName_0 = "MostRecentAll"
|
||||||
|
_InitialReplicationAutoResolutionName_1 = "Fail"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
_InitialReplicationAutoResolutionIndex_0 = [...]uint8{0, 10, 13}
|
||||||
|
_InitialReplicationAutoResolutionIndex_1 = [...]uint8{0, 4}
|
||||||
|
)
|
||||||
|
|
||||||
|
func (i InitialReplicationAutoResolution) String() string {
|
||||||
|
switch {
|
||||||
|
case 1 <= i && i <= 2:
|
||||||
|
i -= 1
|
||||||
|
return _InitialReplicationAutoResolutionName_0[_InitialReplicationAutoResolutionIndex_0[i]:_InitialReplicationAutoResolutionIndex_0[i+1]]
|
||||||
|
case i == 4:
|
||||||
|
return _InitialReplicationAutoResolutionName_1
|
||||||
|
default:
|
||||||
|
return fmt.Sprintf("InitialReplicationAutoResolution(%d)", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var _InitialReplicationAutoResolutionValues = []InitialReplicationAutoResolution{1, 2, 4}
|
||||||
|
|
||||||
|
var _InitialReplicationAutoResolutionNameToValueMap = map[string]InitialReplicationAutoResolution{
|
||||||
|
_InitialReplicationAutoResolutionName_0[0:10]: 1,
|
||||||
|
_InitialReplicationAutoResolutionName_0[10:13]: 2,
|
||||||
|
_InitialReplicationAutoResolutionName_1[0:4]: 4,
|
||||||
|
}
|
||||||
|
|
||||||
|
// InitialReplicationAutoResolutionString retrieves an enum value from the enum constants string name.
|
||||||
|
// Throws an error if the param is not part of the enum.
|
||||||
|
func InitialReplicationAutoResolutionString(s string) (InitialReplicationAutoResolution, error) {
|
||||||
|
if val, ok := _InitialReplicationAutoResolutionNameToValueMap[s]; ok {
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
return 0, fmt.Errorf("%s does not belong to InitialReplicationAutoResolution values", s)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InitialReplicationAutoResolutionValues returns all values of the enum
|
||||||
|
func InitialReplicationAutoResolutionValues() []InitialReplicationAutoResolution {
|
||||||
|
return _InitialReplicationAutoResolutionValues
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsAInitialReplicationAutoResolution returns "true" if the value is listed in the enum definition. "false" otherwise
|
||||||
|
func (i InitialReplicationAutoResolution) IsAInitialReplicationAutoResolution() bool {
|
||||||
|
for _, v := range _InitialReplicationAutoResolutionValues {
|
||||||
|
if i == v {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
@ -235,25 +235,55 @@ func NewPlanner(secsPerState *prometheus.HistogramVec, bytesReplicated *promethe
|
|||||||
promBytesReplicated: bytesReplicated,
|
promBytesReplicated: bytesReplicated,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
func resolveConflict(conflict error) (path []*pdu.FilesystemVersion, msg string) {
|
|
||||||
|
func tryAutoresolveConflict(conflict error, policy ConflictResolution) (path []*pdu.FilesystemVersion, reason error) {
|
||||||
|
|
||||||
|
if _, ok := conflict.(*ConflictMostRecentSnapshotAlreadyPresent); ok {
|
||||||
|
// replicatoin is a no-op
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
if noCommonAncestor, ok := conflict.(*ConflictNoCommonAncestor); ok {
|
if noCommonAncestor, ok := conflict.(*ConflictNoCommonAncestor); ok {
|
||||||
if len(noCommonAncestor.SortedReceiverVersions) == 0 {
|
if len(noCommonAncestor.SortedReceiverVersions) == 0 {
|
||||||
// TODO this is hard-coded replication policy: most recent snapshot as source
|
|
||||||
// NOTE: Keep in sync with listStaleFiltering, it depends on this hard-coded assumption
|
if len(noCommonAncestor.SortedSenderVersions) == 0 {
|
||||||
var mostRecentSnap *pdu.FilesystemVersion
|
return nil, fmt.Errorf("no snapshots available on sender side")
|
||||||
for n := len(noCommonAncestor.SortedSenderVersions) - 1; n >= 0; n-- {
|
}
|
||||||
if noCommonAncestor.SortedSenderVersions[n].Type == pdu.FilesystemVersion_Snapshot {
|
|
||||||
mostRecentSnap = noCommonAncestor.SortedSenderVersions[n]
|
switch policy.InitialReplication {
|
||||||
break
|
|
||||||
|
case InitialReplicationAutoResolutionMostRecent:
|
||||||
|
|
||||||
|
var mostRecentSnap *pdu.FilesystemVersion
|
||||||
|
for n := len(noCommonAncestor.SortedSenderVersions) - 1; n >= 0; n-- {
|
||||||
|
if noCommonAncestor.SortedSenderVersions[n].Type == pdu.FilesystemVersion_Snapshot {
|
||||||
|
mostRecentSnap = noCommonAncestor.SortedSenderVersions[n]
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
return []*pdu.FilesystemVersion{nil, mostRecentSnap}, nil
|
||||||
|
|
||||||
|
case InitialReplicationAutoResolutionAll:
|
||||||
|
|
||||||
|
path = append(path, nil)
|
||||||
|
|
||||||
|
for n := 0; n < len(noCommonAncestor.SortedSenderVersions); n++ {
|
||||||
|
if noCommonAncestor.SortedSenderVersions[n].Type == pdu.FilesystemVersion_Snapshot {
|
||||||
|
path = append(path, noCommonAncestor.SortedSenderVersions[n])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return path, nil
|
||||||
|
|
||||||
|
case InitialReplicationAutoResolutionFail:
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("automatic conflict resolution for initial replication is disabled in config")
|
||||||
|
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unimplemented: %#v", policy.InitialReplication))
|
||||||
}
|
}
|
||||||
if mostRecentSnap == nil {
|
|
||||||
return nil, "no snapshots available on sender side"
|
|
||||||
}
|
|
||||||
return []*pdu.FilesystemVersion{mostRecentSnap}, fmt.Sprintf("start replication at most recent snapshot %s", mostRecentSnap.RelName())
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil, "no automated way to handle conflict type"
|
return nil, conflict
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *Planner) doPlanning(ctx context.Context) ([]*Filesystem, error) {
|
func (p *Planner) doPlanning(ctx context.Context) ([]*Filesystem, error) {
|
||||||
@ -456,39 +486,31 @@ func (fs *Filesystem) doPlanning(ctx context.Context) ([]*Step, error) {
|
|||||||
} else { // resumeToken == nil
|
} else { // resumeToken == nil
|
||||||
path, conflict := IncrementalPath(rfsvs, sfsvs)
|
path, conflict := IncrementalPath(rfsvs, sfsvs)
|
||||||
if conflict != nil {
|
if conflict != nil {
|
||||||
var msg string
|
updPath, updConflict := tryAutoresolveConflict(conflict, *fs.policy.ConflictResolution)
|
||||||
path, msg = resolveConflict(conflict) // no shadowing allowed!
|
if updConflict == nil {
|
||||||
if path != nil {
|
log(ctx).WithField("conflict", conflict).Info("conflict automatically resolved")
|
||||||
log(ctx).WithField("conflict", conflict).Info("conflict")
|
|
||||||
log(ctx).WithField("resolution", msg).Info("automatically resolved")
|
|
||||||
} else {
|
} else {
|
||||||
log(ctx).WithField("conflict", conflict).Error("conflict")
|
log(ctx).WithField("conflict", conflict).Error("cannot resolve conflict")
|
||||||
log(ctx).WithField("problem", msg).Error("cannot resolve conflict")
|
|
||||||
}
|
}
|
||||||
|
path, conflict = updPath, updConflict
|
||||||
}
|
}
|
||||||
if len(path) == 0 {
|
if conflict != nil {
|
||||||
return nil, conflict
|
return nil, conflict
|
||||||
}
|
}
|
||||||
|
if len(path) == 0 {
|
||||||
steps = make([]*Step, 0, len(path)) // shadow
|
steps = nil
|
||||||
if len(path) == 1 {
|
} else if len(path) == 1 {
|
||||||
steps = append(steps, &Step{
|
panic(fmt.Sprintf("len(path) must be two for incremental repl, and initial repl must start with nil, got path[0]=%#v", path[0]))
|
||||||
parent: fs,
|
|
||||||
sender: fs.sender,
|
|
||||||
receiver: fs.receiver,
|
|
||||||
|
|
||||||
from: nil,
|
|
||||||
to: path[0],
|
|
||||||
encrypt: fs.policy.EncryptedSend,
|
|
||||||
})
|
|
||||||
} else {
|
} else {
|
||||||
|
steps = make([]*Step, 0, len(path)) // shadow
|
||||||
for i := 0; i < len(path)-1; i++ {
|
for i := 0; i < len(path)-1; i++ {
|
||||||
steps = append(steps, &Step{
|
steps = append(steps, &Step{
|
||||||
parent: fs,
|
parent: fs,
|
||||||
sender: fs.sender,
|
sender: fs.sender,
|
||||||
receiver: fs.receiver,
|
receiver: fs.receiver,
|
||||||
|
|
||||||
from: path[i],
|
from: path[i], // nil in case of initial repl
|
||||||
to: path[i+1],
|
to: path[i+1],
|
||||||
encrypt: fs.policy.EncryptedSend,
|
encrypt: fs.policy.EncryptedSend,
|
||||||
})
|
})
|
||||||
|
@ -1,6 +1,9 @@
|
|||||||
package logic
|
package logic
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/go-playground/validator"
|
"github.com/go-playground/validator"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
@ -8,16 +11,74 @@ import (
|
|||||||
"github.com/zrepl/zrepl/replication/logic/pdu"
|
"github.com/zrepl/zrepl/replication/logic/pdu"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
//go:generate enumer -type=InitialReplicationAutoResolution -trimprefix=InitialReplicationAutoResolution
|
||||||
|
type InitialReplicationAutoResolution uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
InitialReplicationAutoResolutionMostRecent InitialReplicationAutoResolution = 1 << iota
|
||||||
|
InitialReplicationAutoResolutionAll
|
||||||
|
InitialReplicationAutoResolutionFail
|
||||||
|
)
|
||||||
|
|
||||||
|
var initialReplicationAutoResolutionConfigMap = map[InitialReplicationAutoResolution]string{
|
||||||
|
InitialReplicationAutoResolutionMostRecent: "most_recent",
|
||||||
|
InitialReplicationAutoResolutionAll: "all",
|
||||||
|
InitialReplicationAutoResolutionFail: "fail",
|
||||||
|
}
|
||||||
|
|
||||||
|
func InitialReplicationAutoResolutionFromConfig(in string) (InitialReplicationAutoResolution, error) {
|
||||||
|
for v, s := range initialReplicationAutoResolutionConfigMap {
|
||||||
|
if s == in {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
l := make([]string, 0, len(initialReplicationAutoResolutionConfigMap))
|
||||||
|
for _, v := range InitialReplicationAutoResolutionValues() {
|
||||||
|
l = append(l, initialReplicationAutoResolutionConfigMap[v])
|
||||||
|
}
|
||||||
|
return 0, fmt.Errorf("invalid value %q, must be one of %s", in, strings.Join(l, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
type ConflictResolution struct {
|
||||||
|
InitialReplication InitialReplicationAutoResolution
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ConflictResolution) Validate() error {
|
||||||
|
if !c.InitialReplication.IsAInitialReplicationAutoResolution() {
|
||||||
|
return errors.Errorf("must be one of %s", InitialReplicationAutoResolutionValues())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func ConflictResolutionFromConfig(in *config.ConflictResolution) (*ConflictResolution, error) {
|
||||||
|
|
||||||
|
initialReplication, err := InitialReplicationAutoResolutionFromConfig(in.InitialReplication)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Errorf("field `initial_replication` is invalid: %q is not one of %v", in.InitialReplication, InitialReplicationAutoResolutionValues())
|
||||||
|
}
|
||||||
|
|
||||||
|
return &ConflictResolution{
|
||||||
|
InitialReplication: initialReplication,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
type PlannerPolicy struct {
|
type PlannerPolicy struct {
|
||||||
EncryptedSend tri // all sends must be encrypted (send -w, and encryption!=off)
|
EncryptedSend tri // all sends must be encrypted (send -w, and encryption!=off)
|
||||||
ReplicationConfig *pdu.ReplicationConfig
|
ConflictResolution *ConflictResolution `validate:"ne=nil"`
|
||||||
SizeEstimationConcurrency int `validate:"gte=1"`
|
ReplicationConfig *pdu.ReplicationConfig `validate:"ne=nil"`
|
||||||
|
SizeEstimationConcurrency int `validate:"gte=1"`
|
||||||
}
|
}
|
||||||
|
|
||||||
var validate = validator.New()
|
var validate = validator.New()
|
||||||
|
|
||||||
func (p PlannerPolicy) Validate() error {
|
func (p PlannerPolicy) Validate() error {
|
||||||
return validate.Struct(p)
|
if err := validate.Struct(p); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := p.ConflictResolution.Validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ReplicationConfigFromConfig(in *config.Replication) (*pdu.ReplicationConfig, error) {
|
func ReplicationConfigFromConfig(in *config.Replication) (*pdu.ReplicationConfig, error) {
|
||||||
|
Loading…
Reference in New Issue
Block a user