mirror of
https://github.com/zrepl/zrepl.git
synced 2024-11-24 17:35:01 +01:00
9fa7a18351
closes #472
89 lines
3.3 KiB
YAML
89 lines
3.3 KiB
YAML
# This config serves as an example for a local zrepl installation that
|
|
# backups the entire zpool `system` to `backuppool/zrepl/sink`
|
|
#
|
|
# The requirements covered by this setup are described in the zrepl documentation's
|
|
# quick start section which inlines this example.
|
|
#
|
|
# CUSTOMIZATIONS YOU WILL LIKELY WANT TO APPLY:
|
|
# - adjust the name of the production pool `system` in the `filesystems` filter of jobs `snapjob` and `push_to_drive`
|
|
# - adjust the name of the backup pool `backuppool` in the `backuppool_sink` job
|
|
# - adjust the occurences of `myhostname` to the name of the system you are backing up (cannot be easily changed once you start replicating)
|
|
# - make sure the `zrepl_` prefix is not being used by any other zfs tools you might have installed (it likely isn't)
|
|
|
|
jobs:
|
|
|
|
# this job takes care of snapshot creation + pruning
|
|
- name: snapjob
|
|
type: snap
|
|
filesystems: {
|
|
"system<": true,
|
|
}
|
|
# create snapshots with prefix `zrepl_` every 15 minutes
|
|
snapshotting:
|
|
type: periodic
|
|
interval: 15m
|
|
prefix: zrepl_
|
|
pruning:
|
|
keep:
|
|
# fade-out scheme for snapshots starting with `zrepl_`
|
|
# - keep all created in the last hour
|
|
# - then destroy snapshots such that we keep 24 each 1 hour apart
|
|
# - then destroy snapshots such that we keep 14 each 1 day apart
|
|
# - then destroy all older snapshots
|
|
- type: grid
|
|
grid: 1x1h(keep=all) | 24x1h | 14x1d
|
|
regex: "^zrepl_.*"
|
|
# keep all snapshots that don't have the `zrepl_` prefix
|
|
- type: regex
|
|
negate: true
|
|
regex: "^zrepl_.*"
|
|
|
|
# This job pushes to the local sink defined in job `backuppool_sink`.
|
|
# We trigger replication manually from the command line / udev rules using
|
|
# `zrepl signal wakeup push_to_drive`
|
|
- type: push
|
|
name: push_to_drive
|
|
connect:
|
|
type: local
|
|
listener_name: backuppool_sink
|
|
client_identity: myhostname
|
|
filesystems: {
|
|
"system<": true
|
|
}
|
|
send:
|
|
encrypted: true
|
|
replication:
|
|
protection:
|
|
initial: guarantee_resumability
|
|
# Downgrade protection to guarantee_incremental which uses zfs bookmarks instead of zfs holds.
|
|
# Thus, when we yank out the backup drive during replication
|
|
# - we might not be able to resume the interrupted replication step because the partially received `to` snapshot of a `from`->`to` step may be pruned any time
|
|
# - but in exchange we get back the disk space allocated by `to` when we prune it
|
|
# - and because we still have the bookmarks created by `guarantee_incremental`, we can still do incremental replication of `from`->`to2` in the future
|
|
incremental: guarantee_incremental
|
|
snapshotting:
|
|
type: manual
|
|
pruning:
|
|
# no-op prune rule on sender (keep all snapshots), job `snapshot` takes care of this
|
|
keep_sender:
|
|
- type: regex
|
|
regex: ".*"
|
|
# retain
|
|
keep_receiver:
|
|
# longer retention on the backup drive, we have more space there
|
|
- type: grid
|
|
grid: 1x1h(keep=all) | 24x1h | 360x1d
|
|
regex: "^zrepl_.*"
|
|
# retain all non-zrepl snapshots on the backup drive
|
|
- type: regex
|
|
negate: true
|
|
regex: "^zrepl_.*"
|
|
|
|
# This job receives from job `push_to_drive` into `backuppool/zrepl/sink/myhostname`
|
|
- type: sink
|
|
name: backuppool_sink
|
|
root_fs: "backuppool/zrepl/sink"
|
|
serve:
|
|
type: local
|
|
listener_name: backuppool_sink
|