mirror of
https://github.com/zrepl/zrepl.git
synced 2025-02-16 10:29:54 +01:00
Merge branch 'InsanePrawn-master' into 'master'
This commit is contained in:
commit
6f7467e8d8
@ -297,7 +297,44 @@ func (t *tui) draw() {
|
||||
t.setIndent(1)
|
||||
t.newline()
|
||||
|
||||
if v.Type != job.TypePush && v.Type != job.TypePull {
|
||||
if v.Type == job.TypePush || v.Type == job.TypePull {
|
||||
activeStatus, ok := v.JobSpecific.(*job.ActiveSideStatus)
|
||||
if !ok || activeStatus == nil {
|
||||
t.printf("ActiveSideStatus is null")
|
||||
t.newline()
|
||||
continue
|
||||
}
|
||||
|
||||
t.printf("Replication:")
|
||||
t.newline()
|
||||
t.addIndent(1)
|
||||
t.renderReplicationReport(activeStatus.Replication, t.getReplicationProgresHistory(k))
|
||||
t.addIndent(-1)
|
||||
|
||||
t.printf("Pruning Sender:")
|
||||
t.newline()
|
||||
t.addIndent(1)
|
||||
t.renderPrunerReport(activeStatus.PruningSender)
|
||||
t.addIndent(-1)
|
||||
|
||||
t.printf("Pruning Receiver:")
|
||||
t.newline()
|
||||
t.addIndent(1)
|
||||
t.renderPrunerReport(activeStatus.PruningReceiver)
|
||||
t.addIndent(-1)
|
||||
} else if v.Type == job.TypeSnap {
|
||||
snapStatus, ok := v.JobSpecific.(*job.SnapJobStatus)
|
||||
if !ok || snapStatus == nil {
|
||||
t.printf("SnapJobStatus is null")
|
||||
t.newline()
|
||||
continue
|
||||
}
|
||||
t.printf("Pruning snapshots:")
|
||||
t.newline()
|
||||
t.addIndent(1)
|
||||
t.renderPrunerReport(snapStatus.Pruning)
|
||||
t.addIndent(-1)
|
||||
} else {
|
||||
t.printf("No status representation for job type '%s', dumping as YAML", v.Type)
|
||||
t.newline()
|
||||
asYaml, err := yaml.Marshal(v.JobSpecific)
|
||||
@ -310,32 +347,6 @@ func (t *tui) draw() {
|
||||
t.newline()
|
||||
continue
|
||||
}
|
||||
|
||||
pushStatus, ok := v.JobSpecific.(*job.ActiveSideStatus)
|
||||
if !ok || pushStatus == nil {
|
||||
t.printf("ActiveSideStatus is null")
|
||||
t.newline()
|
||||
continue
|
||||
}
|
||||
|
||||
t.printf("Replication:")
|
||||
t.newline()
|
||||
t.addIndent(1)
|
||||
t.renderReplicationReport(pushStatus.Replication, t.getReplicationProgresHistory(k))
|
||||
t.addIndent(-1)
|
||||
|
||||
t.printf("Pruning Sender:")
|
||||
t.newline()
|
||||
t.addIndent(1)
|
||||
t.renderPrunerReport(pushStatus.PruningSender)
|
||||
t.addIndent(-1)
|
||||
|
||||
t.printf("Pruning Receiver:")
|
||||
t.newline()
|
||||
t.addIndent(1)
|
||||
t.renderPrunerReport(pushStatus.PruningReceiver)
|
||||
t.addIndent(-1)
|
||||
|
||||
}
|
||||
}
|
||||
termbox.Flush()
|
||||
|
@ -34,6 +34,7 @@ type JobEnum struct {
|
||||
func (j JobEnum) Name() string {
|
||||
var name string
|
||||
switch v := j.Ret.(type) {
|
||||
case *SnapJob: name = v.Name
|
||||
case *PushJob: name = v.Name
|
||||
case *SinkJob: name = v.Name
|
||||
case *PullJob: name = v.Name
|
||||
@ -59,6 +60,15 @@ type PassiveJob struct {
|
||||
Debug JobDebugSettings `yaml:"debug,optional"`
|
||||
}
|
||||
|
||||
type SnapJob struct {
|
||||
Type string `yaml:"type"`
|
||||
Name string `yaml:"name"`
|
||||
Pruning PruningLocal `yaml:"pruning"`
|
||||
Debug JobDebugSettings `yaml:"debug,optional"`
|
||||
Snapshotting SnapshottingEnum `yaml:"snapshotting"`
|
||||
Filesystems FilesystemsFilter `yaml:"filesystems"`
|
||||
}
|
||||
|
||||
type PushJob struct {
|
||||
ActiveJob `yaml:",inline"`
|
||||
Snapshotting SnapshottingEnum `yaml:"snapshotting"`
|
||||
@ -368,6 +378,7 @@ func enumUnmarshal(u func(interface{}, bool) error, types map[string]interface{}
|
||||
|
||||
func (t *JobEnum) UnmarshalYAML(u func(interface{}, bool) error) (err error) {
|
||||
t.Ret, err = enumUnmarshal(u, map[string]interface{}{
|
||||
"snap": &SnapJob{},
|
||||
"push": &PushJob{},
|
||||
"sink": &SinkJob{},
|
||||
"pull": &PullJob{},
|
||||
|
14
config/samples/snap.yml
Normal file
14
config/samples/snap.yml
Normal file
@ -0,0 +1,14 @@
|
||||
jobs:
|
||||
- name: snapjob
|
||||
type: snap
|
||||
filesystems: {
|
||||
"tank/frequently_changed<": true,
|
||||
}
|
||||
snapshotting:
|
||||
type: periodic
|
||||
interval: 2m
|
||||
prefix: zrepl_snapjob_
|
||||
pruning:
|
||||
keep:
|
||||
- type: last_n
|
||||
count: 60
|
@ -45,6 +45,11 @@ func buildJob(c *config.Global, in config.JobEnum) (j Job, err error) {
|
||||
if err != nil {
|
||||
return cannotBuildJob(err, v.Name)
|
||||
}
|
||||
case *config.SnapJob:
|
||||
j, err = snapJobFromConfig(c, v)
|
||||
if err != nil {
|
||||
return cannotBuildJob(err, v.Name)
|
||||
}
|
||||
case *config.PushJob:
|
||||
m, err := modePushFromConfig(c, v)
|
||||
if err != nil {
|
||||
|
@ -39,6 +39,7 @@ type Type string
|
||||
|
||||
const (
|
||||
TypeInternal Type = "internal"
|
||||
TypeSnap Type = "snap"
|
||||
TypePush Type = "push"
|
||||
TypeSink Type = "sink"
|
||||
TypePull Type = "pull"
|
||||
@ -84,6 +85,10 @@ func (s *Status) UnmarshalJSON(in []byte) (err error) {
|
||||
return fmt.Errorf("field '%s', not found", key)
|
||||
}
|
||||
switch s.Type {
|
||||
case TypeSnap:
|
||||
var st SnapJobStatus
|
||||
err = json.Unmarshal(jobJSON, &st)
|
||||
s.JobSpecific = &st
|
||||
case TypePull: fallthrough
|
||||
case TypePush:
|
||||
var st ActiveSideStatus
|
||||
|
163
daemon/job/snapjob.go
Normal file
163
daemon/job/snapjob.go
Normal file
@ -0,0 +1,163 @@
|
||||
package job
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/zrepl/zrepl/config"
|
||||
"github.com/zrepl/zrepl/daemon/filters"
|
||||
"github.com/zrepl/zrepl/daemon/job/wakeup"
|
||||
"github.com/zrepl/zrepl/daemon/logging"
|
||||
"github.com/zrepl/zrepl/daemon/pruner"
|
||||
"github.com/zrepl/zrepl/daemon/snapper"
|
||||
"github.com/zrepl/zrepl/endpoint"
|
||||
"github.com/zrepl/zrepl/replication/logic/pdu"
|
||||
"github.com/zrepl/zrepl/zfs"
|
||||
)
|
||||
|
||||
type SnapJob struct {
|
||||
name string
|
||||
fsfilter zfs.DatasetFilter
|
||||
snapper *snapper.PeriodicOrManual
|
||||
|
||||
prunerFactory *pruner.LocalPrunerFactory
|
||||
|
||||
promPruneSecs *prometheus.HistogramVec // labels: prune_side
|
||||
|
||||
pruner *pruner.Pruner
|
||||
}
|
||||
|
||||
func (j *SnapJob) Name() string { return j.name }
|
||||
|
||||
func (j *SnapJob) Type() Type { return TypeSnap }
|
||||
|
||||
func snapJobFromConfig(g *config.Global, in *config.SnapJob) (j *SnapJob, err error) {
|
||||
j = &SnapJob{}
|
||||
fsf, err := filters.DatasetMapFilterFromConfig(in.Filesystems)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannnot build filesystem filter")
|
||||
}
|
||||
j.fsfilter = fsf
|
||||
|
||||
if j.snapper, err = snapper.FromConfig(g, fsf, in.Snapshotting); err != nil {
|
||||
return nil, errors.Wrap(err, "cannot build snapper")
|
||||
}
|
||||
j.name = in.Name
|
||||
j.promPruneSecs = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||
Namespace: "zrepl",
|
||||
Subsystem: "pruning",
|
||||
Name: "time",
|
||||
Help: "seconds spent in pruner",
|
||||
ConstLabels: prometheus.Labels{"zrepl_job": j.name},
|
||||
}, []string{"prune_side"})
|
||||
j.prunerFactory, err = pruner.NewLocalPrunerFactory(in.Pruning, j.promPruneSecs)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannot build snapjob pruning rules")
|
||||
}
|
||||
return j, nil
|
||||
}
|
||||
|
||||
func (j *SnapJob) RegisterMetrics(registerer prometheus.Registerer) {
|
||||
registerer.MustRegister(j.promPruneSecs)
|
||||
}
|
||||
|
||||
type SnapJobStatus struct {
|
||||
Pruning *pruner.Report
|
||||
}
|
||||
|
||||
func (j *SnapJob) Status() *Status {
|
||||
s := &SnapJobStatus{}
|
||||
t := j.Type()
|
||||
if j.pruner != nil {
|
||||
s.Pruning = j.pruner.Report()
|
||||
}
|
||||
return &Status{Type: t, JobSpecific: s}
|
||||
}
|
||||
|
||||
func (j *SnapJob) Run(ctx context.Context) {
|
||||
log := GetLogger(ctx)
|
||||
ctx = logging.WithSubsystemLoggers(ctx, log)
|
||||
|
||||
defer log.Info("job exiting")
|
||||
|
||||
periodicDone := make(chan struct{})
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
go j.snapper.Run(ctx, periodicDone)
|
||||
|
||||
invocationCount := 0
|
||||
outer:
|
||||
for {
|
||||
log.Info("wait for wakeups")
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.WithError(ctx.Err()).Info("context")
|
||||
break outer
|
||||
|
||||
case <-wakeup.Wait(ctx):
|
||||
case <-periodicDone:
|
||||
}
|
||||
invocationCount++
|
||||
invLog := log.WithField("invocation", invocationCount)
|
||||
j.doPrune(WithLogger(ctx, invLog))
|
||||
}
|
||||
}
|
||||
|
||||
// Adaptor that implements pruner.History around a pruner.Target.
|
||||
// The ReplicationCursor method is Get-op only and always returns
|
||||
// the filesystem's most recent version's GUID.
|
||||
//
|
||||
// TODO:
|
||||
// This is a work-around for the current package daemon/pruner
|
||||
// and package pruning.Snapshot limitation: they require the
|
||||
// `Replicated` getter method be present, but obviously,
|
||||
// a local job like SnapJob can't deliver on that.
|
||||
// But the pruner.Pruner gives up on an FS if no replication
|
||||
// cursor is present, which is why this pruner returns the
|
||||
// most recent filesystem version.
|
||||
type alwaysUpToDateReplicationCursorHistory struct {
|
||||
// the Target passed as Target to BuildLocalPruner
|
||||
target pruner.Target
|
||||
}
|
||||
|
||||
var _ pruner.History = (*alwaysUpToDateReplicationCursorHistory)(nil)
|
||||
|
||||
func (h alwaysUpToDateReplicationCursorHistory) ReplicationCursor(ctx context.Context, req *pdu.ReplicationCursorReq) (*pdu.ReplicationCursorRes, error) {
|
||||
if req.GetGet() == nil {
|
||||
return nil, fmt.Errorf("unsupported ReplicationCursor request: SnapJob only supports GETting a (faked) cursor")
|
||||
}
|
||||
fsvReq := &pdu.ListFilesystemVersionsReq{
|
||||
Filesystem: req.GetFilesystem(),
|
||||
}
|
||||
res, err := h.target.ListFilesystemVersions(ctx, fsvReq)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fsvs := res.GetVersions()
|
||||
if len(fsvs) <= 0 {
|
||||
return &pdu.ReplicationCursorRes{Result: &pdu.ReplicationCursorRes_Notexist{Notexist: true}}, nil
|
||||
}
|
||||
// always return must recent version
|
||||
sort.Slice(fsvs, func(i, j int) bool {
|
||||
return fsvs[i].CreateTXG < fsvs[j].CreateTXG
|
||||
})
|
||||
mostRecent := fsvs[len(fsvs)-1]
|
||||
return &pdu.ReplicationCursorRes{Result: &pdu.ReplicationCursorRes_Guid{Guid: mostRecent.GetGuid()}}, nil
|
||||
}
|
||||
|
||||
func (h alwaysUpToDateReplicationCursorHistory) ListFilesystems(ctx context.Context, req *pdu.ListFilesystemReq) (*pdu.ListFilesystemRes, error) {
|
||||
return h.target.ListFilesystems(ctx, req)
|
||||
}
|
||||
|
||||
func (j *SnapJob) doPrune(ctx context.Context) {
|
||||
log := GetLogger(ctx)
|
||||
ctx = logging.WithSubsystemLoggers(ctx, log)
|
||||
sender := endpoint.NewSender(j.fsfilter)
|
||||
j.pruner = j.prunerFactory.BuildLocalPruner(ctx, sender, alwaysUpToDateReplicationCursorHistory{sender})
|
||||
log.Info("start pruning")
|
||||
j.pruner.Prune()
|
||||
log.Info("finished pruning")
|
||||
}
|
@ -78,6 +78,12 @@ type PrunerFactory struct {
|
||||
promPruneSecs *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
type LocalPrunerFactory struct {
|
||||
keepRules []pruning.KeepRule
|
||||
retryWait time.Duration
|
||||
promPruneSecs *prometheus.HistogramVec
|
||||
}
|
||||
|
||||
func checkContainsKeep1(rules []pruning.KeepRule) error {
|
||||
if len(rules) == 0 {
|
||||
return nil //No keep rules means keep all - ok
|
||||
@ -91,6 +97,26 @@ func checkContainsKeep1(rules []pruning.KeepRule) error {
|
||||
return errors.New("sender keep rules must contain last_n or be empty so that the last snapshot is definitely kept")
|
||||
}
|
||||
|
||||
func NewLocalPrunerFactory(in config.PruningLocal, promPruneSecs *prometheus.HistogramVec) (*LocalPrunerFactory, error) {
|
||||
rules, err := pruning.RulesFromConfig(in.Keep)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "cannot build pruning rules")
|
||||
}
|
||||
for _, r := range in.Keep {
|
||||
if _, ok := r.Ret.(*config.PruneKeepNotReplicated); ok {
|
||||
// rule NotReplicated for a local pruner doesn't make sense
|
||||
// because no replication happens with that job type
|
||||
return nil, fmt.Errorf("single-site pruner cannot support `not_replicated` keep rule")
|
||||
}
|
||||
}
|
||||
f := &LocalPrunerFactory{
|
||||
keepRules: rules,
|
||||
retryWait: envconst.Duration("ZREPL_PRUNER_RETRY_INTERVAL", 10*time.Second),
|
||||
promPruneSecs: promPruneSecs,
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
func NewPrunerFactory(in config.PruningSenderReceiver, promPruneSecs *prometheus.HistogramVec) (*PrunerFactory, error) {
|
||||
keepRulesReceiver, err := pruning.RulesFromConfig(in.KeepReceiver)
|
||||
if err != nil {
|
||||
@ -152,6 +178,22 @@ func (f *PrunerFactory) BuildReceiverPruner(ctx context.Context, target Target,
|
||||
return p
|
||||
}
|
||||
|
||||
func (f *LocalPrunerFactory) BuildLocalPruner(ctx context.Context, target Target, receiver History) *Pruner {
|
||||
p := &Pruner{
|
||||
args: args{
|
||||
ctx,
|
||||
target,
|
||||
receiver,
|
||||
f.keepRules,
|
||||
f.retryWait,
|
||||
false, // considerSnapAtCursorReplicated is not relevant for local pruning
|
||||
f.promPruneSecs.WithLabelValues("local"),
|
||||
},
|
||||
state: Plan,
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
//go:generate enumer -type=State
|
||||
type State int
|
||||
|
||||
|
@ -99,6 +99,7 @@ Changes
|
||||
* |break_config| ``keep_bookmarks`` parameter of the ``grid`` keep rule has been removed
|
||||
|
||||
* |feature| ``zrepl status`` for live-updating replication progress (it's really cool!)
|
||||
* |feature| :ref:`Snapshot- & pruning-only job type <job-snap>` (for local snapshot management)
|
||||
* |feature| :issue:`67`: Expose `Prometheus <https://prometheus.io>`_ metrics via HTTP (:ref:`config docs <monitoring-prometheus>`)
|
||||
|
||||
* Compatible Grafana dashboard shipping in ``dist/grafana``
|
||||
|
@ -25,18 +25,22 @@ For communication, the active side connects to the passive side using a :ref:`tr
|
||||
|
||||
The following table shows how different job types can be combined to achieve both push and pull mode setups:
|
||||
|
||||
+-----------------------+--------------+----------------------------------+-----------------------------------------------+
|
||||
| Setup name | active side | passive side | use case |
|
||||
+=======================+==============+==================================+===============================================+
|
||||
| Push mode | ``push`` | ``sink`` | * Laptop backup |
|
||||
| | | | * NAS behind NAT to offsite |
|
||||
+-----------------------+--------------+----------------------------------+-----------------------------------------------+
|
||||
| Pull mode | ``pull`` | ``source`` | * Central backup-server for many nodes |
|
||||
| | | | * Remote server to NAS behind NAT |
|
||||
+-----------------------+--------------+----------------------------------+-----------------------------------------------+
|
||||
| Local replication | | ``push`` + ``sink`` in one config | * Backup FreeBSD boot pool |
|
||||
| | | with :ref:`local transport <transport-local>` | |
|
||||
+-----------------------+--------------+----------------------------------+-----------------------------------------------+
|
||||
+-----------------------+--------------+----------------------------------+------------------------------------------------------------------------------------+
|
||||
| Setup name | active side | passive side | use case |
|
||||
+=======================+==============+==================================+====================================================================================+
|
||||
| Push mode | ``push`` | ``sink`` | * Laptop backup |
|
||||
| | | | * NAS behind NAT to offsite |
|
||||
+-----------------------+--------------+----------------------------------+------------------------------------------------------------------------------------+
|
||||
| Pull mode | ``pull`` | ``source`` | * Central backup-server for many nodes |
|
||||
| | | | * Remote server to NAS behind NAT |
|
||||
+-----------------------+--------------+----------------------------------+------------------------------------------------------------------------------------+
|
||||
| Local replication | | ``push`` + ``sink`` in one config | * Backup FreeBSD boot pool |
|
||||
| | | with :ref:`local transport <transport-local>` | |
|
||||
+-----------------------+--------------+----------------------------------+------------------------------------------------------------------------------------+
|
||||
| Snap & prune-only | ``snap`` | N/A | * | Snapshots & pruning but no replication |
|
||||
| | | | | required |
|
||||
| | | | * Workaround for :ref:`source-side pruning <prune-workaround-source-side-pruning>` |
|
||||
+-----------------------+--------------+----------------------------------+------------------------------------------------------------------------------------+
|
||||
|
||||
How the Active Side Works
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
@ -125,7 +129,7 @@ The ``zrepl test placeholder`` command can be used to check whether a filesystem
|
||||
Taking Snaphots
|
||||
---------------
|
||||
|
||||
The ``push`` and ``source`` jobs can automatically take periodic snapshots of the filesystems matched by the ``filesystems`` filter field.
|
||||
The ``push``, ``source`` and ``snap`` jobs can automatically take periodic snapshots of the filesystems matched by the ``filesystems`` filter field.
|
||||
The snapshot names are composed of a user-defined prefix followed by a UTC date formatted like ``20060102_150405_000``.
|
||||
We use UTC because it will avoid name conflicts when switching time zones or between summer and winter time.
|
||||
|
||||
@ -147,9 +151,10 @@ For ``push`` jobs, replication is automatically triggered after all filesystems
|
||||
|
||||
There is also a ``manual`` snapshotting type, which covers the following use cases:
|
||||
|
||||
* Existing infrastructure for automatic snapshots: you only want to use zrepl for replication.
|
||||
* Existing infrastructure for automatic snapshots: you only want to use this zrepl job for replication.
|
||||
* Run scripts before and after taking snapshots (like locking database tables).
|
||||
We are working on better integration for this use case: see :issue:`74`.
|
||||
* Handling snapshotting through a separate ``snap`` job.
|
||||
|
||||
Note that you will have to trigger replication manually using the ``zrepl signal wakeup JOB`` subcommand in that case.
|
||||
|
||||
@ -267,6 +272,7 @@ Job Type ``source``
|
||||
|
||||
Example config: :sampleconf:`/source.yml`
|
||||
|
||||
|
||||
.. _replication-local:
|
||||
|
||||
Local replication
|
||||
@ -277,3 +283,28 @@ If you have the need for local replication (most likely between two local storag
|
||||
Example config: :sampleconf:`/local.yml`.
|
||||
|
||||
|
||||
.. _job-snap:
|
||||
|
||||
Job Type ``snap`` (snapshot & prune only)
|
||||
-----------------------------------------
|
||||
|
||||
Job type that only takes snapshots and performs pruning on the local machine.
|
||||
|
||||
.. list-table::
|
||||
:widths: 20 80
|
||||
:header-rows: 1
|
||||
|
||||
* - Parameter
|
||||
- Comment
|
||||
* - ``type``
|
||||
- = ``snap``
|
||||
* - ``name``
|
||||
- unique name of the job
|
||||
* - ``filesystems``
|
||||
- |filter-spec| for filesystems to be snapshotted
|
||||
* - ``snapshotting``
|
||||
- |snapshotting-spec|
|
||||
* - ``pruning``
|
||||
- |pruning-spec|
|
||||
|
||||
Example config: :sampleconf:`/snap.yml`
|
||||
|
@ -10,10 +10,12 @@ Typically, the requirements to temporal resolution and maximum retention time di
|
||||
For example, when using zrepl to back up a busy database server, you will want high temporal resolution (snapshots every 10 min) for the last 24h in case of administrative disasters, but cannot afford to store them for much longer because you might have high turnover volume in the database.
|
||||
On the receiving side, you may have more disk space available, or need to comply with other backup retention policies.
|
||||
|
||||
zrepl uses a set of **keep rules** to determine which snapshots shall be kept per filesystem.
|
||||
zrepl uses a set of **keep rules** per sending and receiving side to determine which snapshots shall be kept per filesystem.
|
||||
**A snapshot that is not kept by any rule is destroyed.**
|
||||
The keep rules are **evaluated on the active side** (:ref:`push <job-push>` or :ref:`pull job <job-pull>`) of the replication setup, for both active and passive side, after replication completed or was determined to have failed permanently.
|
||||
|
||||
|
||||
|
||||
Example Configuration:
|
||||
|
||||
::
|
||||
@ -49,18 +51,10 @@ Example Configuration:
|
||||
You might have **existing snapshots** of filesystems affected by pruning which you want to keep, i.e. not be destroyed by zrepl.
|
||||
Make sure to actually add the necessary ``regex`` keep rules on both sides, like with ``manual`` in the example above.
|
||||
|
||||
.. ATTENTION::
|
||||
|
||||
It is currently not possible to define pruning on a source job.
|
||||
The source job creates snapshots, which means that extended replication downtime will fill up the source's zpool with snapshots, since pruning is directed by the corresponding active side (pull job).
|
||||
If this is a potential risk for you, consider using :ref:`push mode <job-push>`.
|
||||
|
||||
|
||||
.. _prune-keep-not-replicated:
|
||||
|
||||
Policy ``not_replicated``
|
||||
-------------------------
|
||||
|
||||
::
|
||||
|
||||
jobs:
|
||||
@ -164,4 +158,57 @@ Policy ``regex``
|
||||
Like all other regular expression fields in prune policies, zrepl uses Go's `regexp.Regexp <https://golang.org/pkg/regexp/#Compile>`_ Perl-compatible regular expressions (`Syntax <https://golang.org/pkg/regexp/syntax>`_).
|
||||
The optional `negate` boolean field inverts the semantics: Use it if you want to keep all snapshots that *do not* match the given regex.
|
||||
|
||||
.. _prune-workaround-source-side-pruning:
|
||||
|
||||
Source-side snapshot pruning
|
||||
----------------------------
|
||||
|
||||
A :ref:`source jobs<job-source>` takes snapshots on the system it runs on.
|
||||
The corresponding :ref:`pull job <job-pull>` on the replication target connects to the source job and replicates the snapshots.
|
||||
Afterwards, the pull job coordinates pruning on both sender (the source job side) and receiver (the pull job side).
|
||||
|
||||
There is no built-in way to define and execute pruning on the source side independently of the pull side.
|
||||
The source job will continue taking snapshots which will not be pruned until the pull side connects.
|
||||
This means that **extended replication downtime will fill up the source's zpool with snapshots**.
|
||||
|
||||
If the above is a conceivable situation for you, consider using :ref:`push mode <job-push>`, where pruning happens on the same side where snapshots are taken.
|
||||
|
||||
Workaround using ``snap`` job
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
As a workaround (see GitHub :issue:`102` for development progress), a pruning-only :ref:`snap job <job-snap>` can be defined on the source side:
|
||||
The snap job is in charge of snapshot creation & destruction, whereas the source job's role is reduced to just serving snapshots.
|
||||
However, since, jobs are run independently, it is possible that the snap job will prune snapshots that are queued for replication / destruction by the remote pull job that connects to the source job.
|
||||
Symptoms of such race conditions are spurious replication and destroy errors.
|
||||
|
||||
Example configuration:
|
||||
|
||||
::
|
||||
|
||||
# source side
|
||||
jobs:
|
||||
- type: snap
|
||||
snapshotting:
|
||||
type: periodic
|
||||
pruning:
|
||||
keep:
|
||||
# source side pruning rules go here
|
||||
...
|
||||
|
||||
- type: source
|
||||
snapshotting:
|
||||
type: manual
|
||||
root_fs: ...
|
||||
|
||||
# pull side
|
||||
jobs:
|
||||
- type: pull
|
||||
pruning:
|
||||
keep_sender:
|
||||
# let the source-side snap job do the pruning
|
||||
- type: regex
|
||||
regex: ".*"
|
||||
...
|
||||
keep_receiver:
|
||||
# feel free to prune on the pull side as desired
|
||||
...
|
||||
|
Loading…
Reference in New Issue
Block a user