zrepl/cmd/sampleconf/zrepl.yml

122 lines
3.5 KiB
YAML
Raw Normal View History

2017-04-26 18:36:01 +02:00
pools:
- name: offsite_backups
transport:
ssh:
host: 192.168.122.6
user: root
port: 22
identity_file: /etc/zrepl/identities/offsite_backups
2017-04-26 18:36:01 +02:00
pushs:
- to: offsite_backups
filter: {
"tank/var/db*":"ok",
"tank/usr/home*":"ok"
}
2017-04-26 18:36:01 +02:00
pulls:
- from: offsite_backups
2017-04-26 18:36:01 +02:00
mapping: {
# like in sinks
}
# local replication, only allowed in pull mode
# the from name 'local' is reserved for this purpose
- from: local
repeat:
interval: 15m
mapping: {
"tank/usr/home":"mirrorpool/foo/bar"
}
2017-04-26 18:36:01 +02:00
sinks:
# direct mapping
# 1:1 mapping of remote dataset to local dataset
# We will reject a push request which contains > 0 datasets that do not
# match a mapping
- from: db1
mapping: {
"ssdpool/var/db/postgresql9.6":"zroot/backups/db1/pg_data"
}
# "|" non-recursive wildcard
# the remote must present excatly one dataset, mapped to the rhs
- from: cdn_master
mapping: {
"|":"tank/srv/cdn" # NOTE: | is currently an invalid character for a ZFS dataset
}
# "*" recursive wildcard
# the remote may present an arbitrary set of marks a recursive wildcard, i.e. map all remotes to a tree under rhs
- from: mirror1
mapping: {
"tank/foo/bar*":"zroot/backups/mirror1" # NOTE: * is currently an invalid character for a ZFS dataset
}
# "*":"!..." acceptor script
# shell out to an accceptor that receives the remote's offered datasets
# on stdin and, foreach line of this input, returns the corresponding
# local dataset (same order) or '!<space>optional reason' on stdout
# If the acceptor scripts exits with non-zero status code, the remote's
# request will be rejected
- from: complex_host
mapping: { #
"*":"!/path/to/acceptor" # we could just wire the path to the acceptor directly to the mapping
# but let's stick with the same type for the mapping field for now'
# NOTE: * and ! are currently invalid characters for a ZFS dataset
}
# Mixing the rules
# Mixing should be possible if there is a defined precedence (direct before *)
# and non-recursive wildcards are not allowed in multi-entry mapping objects
- from: special_snowflake
mapping: { # an explicit mapping mixed with a recursive wildcard
"sun/usr/home": backups/special_snowflake/homedirs,
"sun/var/db": backups/special_snowflake/database,
"*": backups/special_snowflake/remainingbackup
# NOTE: ^ alignment, should be possible, looks nicer
}
pull_acls:
# same synatx as in sinks, but the returned mapping does not matter
- from: office_backup
mapping: {
"tank/usr/home":"notnull"
}
2017-06-22 19:04:48 +02:00
prune:
- name: clean_backups
policy: grid
grid: 6x10min | 24x1h | 7x1d | 32 x 1d | 4 x 3mon
dataset_filter: {
"tank/backups/*": ok
}
snapshot_filter: {
prefix: zrepl_
}
- name: hfbak_prune # cleans up after hfbak autosnap job
policy: grid
grid: 1x1min(keep=all)
dataset_filter: {
"pool1*": ok
}
snapshot_filter: {
prefix: zrepl_hfbak_
}
autosnap:
- name: hfbak
prefix: zrepl_hfbak_
interval: 1s
dataset_filter: {
"pool1*": ok
}
# prune: hfbak_prune
# future versions may inline the retention policy here, but for now,
# pruning has to be triggered manually (it's safe to run autosnap + prune in parallel)