2017-04-26 18:36:01 +02:00
|
|
|
pools:
|
|
|
|
- name: offsite_backups
|
2017-04-29 20:10:09 +02:00
|
|
|
transport:
|
|
|
|
ssh:
|
2017-04-30 17:55:11 +02:00
|
|
|
host: 192.168.122.6
|
2017-04-29 20:10:09 +02:00
|
|
|
user: root
|
|
|
|
port: 22
|
2017-04-30 17:55:11 +02:00
|
|
|
identity_file: /etc/zrepl/identities/offsite_backups
|
2017-04-26 18:36:01 +02:00
|
|
|
|
|
|
|
pushs:
|
|
|
|
- to: offsite_backups
|
2017-05-16 16:57:24 +02:00
|
|
|
filter: {
|
|
|
|
"tank/var/db*":"ok",
|
|
|
|
"tank/usr/home*":"ok"
|
|
|
|
}
|
2017-04-26 18:36:01 +02:00
|
|
|
|
|
|
|
pulls:
|
2017-04-29 20:10:09 +02:00
|
|
|
- from: offsite_backups
|
2017-04-26 18:36:01 +02:00
|
|
|
mapping: {
|
|
|
|
# like in sinks
|
|
|
|
}
|
|
|
|
|
2017-04-30 17:55:11 +02:00
|
|
|
# local replication, only allowed in pull mode
|
|
|
|
# the from name 'local' is reserved for this purpose
|
|
|
|
- from: local
|
2017-06-09 21:00:28 +02:00
|
|
|
repeat:
|
|
|
|
interval: 15m
|
2017-04-29 20:10:09 +02:00
|
|
|
mapping: {
|
|
|
|
"tank/usr/home":"mirrorpool/foo/bar"
|
|
|
|
}
|
|
|
|
|
2017-04-26 18:36:01 +02:00
|
|
|
sinks:
|
|
|
|
|
|
|
|
# direct mapping
|
|
|
|
# 1:1 mapping of remote dataset to local dataset
|
|
|
|
# We will reject a push request which contains > 0 datasets that do not
|
|
|
|
# match a mapping
|
|
|
|
- from: db1
|
|
|
|
mapping: {
|
|
|
|
"ssdpool/var/db/postgresql9.6":"zroot/backups/db1/pg_data"
|
|
|
|
}
|
|
|
|
|
|
|
|
# "|" non-recursive wildcard
|
|
|
|
# the remote must present excatly one dataset, mapped to the rhs
|
|
|
|
- from: cdn_master
|
|
|
|
mapping: {
|
|
|
|
"|":"tank/srv/cdn" # NOTE: | is currently an invalid character for a ZFS dataset
|
|
|
|
}
|
|
|
|
|
|
|
|
# "*" recursive wildcard
|
|
|
|
# the remote may present an arbitrary set of marks a recursive wildcard, i.e. map all remotes to a tree under rhs
|
|
|
|
- from: mirror1
|
|
|
|
mapping: {
|
|
|
|
"tank/foo/bar*":"zroot/backups/mirror1" # NOTE: * is currently an invalid character for a ZFS dataset
|
|
|
|
}
|
|
|
|
|
|
|
|
# "*":"!..." acceptor script
|
|
|
|
# shell out to an accceptor that receives the remote's offered datasets
|
|
|
|
# on stdin and, foreach line of this input, returns the corresponding
|
|
|
|
# local dataset (same order) or '!<space>optional reason' on stdout
|
|
|
|
# If the acceptor scripts exits with non-zero status code, the remote's
|
|
|
|
# request will be rejected
|
|
|
|
- from: complex_host
|
|
|
|
mapping: { #
|
|
|
|
"*":"!/path/to/acceptor" # we could just wire the path to the acceptor directly to the mapping
|
|
|
|
# but let's stick with the same type for the mapping field for now'
|
|
|
|
# NOTE: * and ! are currently invalid characters for a ZFS dataset
|
|
|
|
}
|
|
|
|
|
|
|
|
# Mixing the rules
|
|
|
|
# Mixing should be possible if there is a defined precedence (direct before *)
|
|
|
|
# and non-recursive wildcards are not allowed in multi-entry mapping objects
|
|
|
|
- from: special_snowflake
|
|
|
|
mapping: { # an explicit mapping mixed with a recursive wildcard
|
|
|
|
"sun/usr/home": backups/special_snowflake/homedirs,
|
|
|
|
"sun/var/db": backups/special_snowflake/database,
|
|
|
|
"*": backups/special_snowflake/remainingbackup
|
|
|
|
# NOTE: ^ alignment, should be possible, looks nicer
|
|
|
|
}
|
2017-04-30 17:55:11 +02:00
|
|
|
|
|
|
|
pull_acls:
|
|
|
|
# same synatx as in sinks, but the returned mapping does not matter
|
|
|
|
- from: office_backup
|
|
|
|
mapping: {
|
|
|
|
"tank/usr/home":"notnull"
|
|
|
|
}
|
2017-06-22 19:04:48 +02:00
|
|
|
|
|
|
|
|
|
|
|
prune:
|
|
|
|
|
|
|
|
- name: clean_backups
|
|
|
|
policy: grid
|
|
|
|
grid: 6x10min | 24x1h | 7x1d | 32 x 1d | 4 x 3mon
|
|
|
|
dataset_filter: {
|
|
|
|
"tank/backups/*": ok
|
|
|
|
}
|
|
|
|
snapshot_filter: {
|
|
|
|
prefix: zrepl_
|
|
|
|
}
|
2017-07-01 20:28:46 +02:00
|
|
|
|
|
|
|
- name: hfbak_prune # cleans up after hfbak autosnap job
|
|
|
|
policy: grid
|
|
|
|
grid: 1x1min(keep=all)
|
|
|
|
dataset_filter: {
|
|
|
|
"pool1*": ok
|
|
|
|
}
|
|
|
|
snapshot_filter: {
|
|
|
|
prefix: zrepl_hfbak_
|
|
|
|
}
|
|
|
|
|
|
|
|
autosnap:
|
|
|
|
|
|
|
|
- name: hfbak
|
|
|
|
prefix: zrepl_hfbak_
|
|
|
|
interval: 1s
|
|
|
|
dataset_filter: {
|
|
|
|
"pool1*": ok
|
|
|
|
}
|
|
|
|
# prune: hfbak_prune
|
|
|
|
# future versions may inline the retention policy here, but for now,
|
|
|
|
# pruning has to be triggered manually (it's safe to run autosnap + prune in parallel)
|