alternative prototype for new config format

This commit is contained in:
Christian Schwarz 2017-09-07 11:18:06 +02:00
parent 98fc59dbd5
commit b2f3645bfd
5 changed files with 85 additions and 134 deletions

View File

@ -1,36 +1,25 @@
# mirror local pool to backup pool
local:
mirror_local:
global:
snapshot_prefix: zrepl_
type: local
schedule:
type: auto # infer schedule from autosnap step
steps:
- type: autosnap
interval: 10m
dataset_filter: {
"zroot<": ok
}
- type: pull
# snapshot the filesystems matched by the left-hand-side of the mapping
# every 10m with zrepl_ as prefix
mapping: {
"zroot<":"backups/local/zroot"
"zroot/var/db<": "storage/backups/local/zroot/var/db",
"zroot/usr/home<": "storage/backups/local/zroot/usr/home",
}
snapshot_prefix: zrepl_
interval: 10m
- type: prune
# keep one hour of 10m interval snapshots of filesystems matched by
# the left-hand-side of the mapping
prune_lhs:
policy: grid
grid: 1x1h(keep=all)
dataset_filter: {
"zroot<": ok
}
- type: prune
# follow a grandfathering scheme for filesystems on the right-hand-side of the mapping
prune_rhs:
policy: grid
grid: 1x1h(keep=all) | 24x1h | 35x1d | 6x30d
dataset_filter: {
"backups/local/zroot<": ok
}

View File

@ -1,34 +1,21 @@
prod1.example.com:
fullbackup_prod1:
# connect to remote using ssh / stdinserver command
type: pull
connect:
type: ssh
host: backuphost.example.com
type: ssh+stdinserver
host: prod1.example.com
user: root
port: 22
identity_file: /root/.ssh/id_ed25519
schedule:
# we cannot infer a schedule from steps below, thus define one here
type: cron
cron: "@every 10m" # see https://godoc.org/github.com/robfig/cron
steps:
# pull datasets from remote to local
- type: pull
# pull all offered filesystems to storage/backups/zrepl/pull/prod1.example.com
mapping: {
"zroot<":"storage/backups/zrepl/prod1.example.com/zroot"
"zroot/tmp": "!"
"<":"storage/backups/zrepl/pull/prod1.example.com"
}
# prune the snaphots we pulled
- type: prune
# follow a grandfathering scheme for filesystems on the right-hand-side of the mapping
prune:
policy: grid
grid: 1x1h(keep=all) | 24x1h | 35x1d | 6x30d
dataset_filter: {
"storage/backups/zrepl/prod1.example.com/zroot<": ok
}
snapshot_filter: {
snapshot_prefix: zrepl_
}

View File

@ -1,33 +1,22 @@
backuphost.example.com:
fullbackup_prod1:
global:
snapshot_prefix: zrepl_
# accept connections from backups.example.com via ssh / stdinserver command
# expect remote to connect via ssh+stdinserver with fullbackup_prod1 as client_identity
type: pull-source
serve:
auth: ssh
role: source
acl: {
"zroot<": ok,
type: stdinserver
client_identity: fullbackup_prod1
# snapshot these filesystems every 10m with zrepl_ as prefix
datasets: {
"zroot/var/db<": ok,
"zroot/usr/home<": omit,
}
# periodic task list, following given schedule
schedule:
type: auto # infer from autosnap step
steps:
- type: autosnap
snapshot_prefix: zrepl_
interval: 10m
dataset_filter: {
"zroot<": ok
}
- type: prune
# keep a one day window 10m interval snapshots in case pull doesn't work (link down, etc)
# (we cannot keep more than one day because this host will run out of disk space)
prune:
policy: grid
grid: 1x1h(keep=all)
dataset_filter: {
"zroot<": ok
}
grid: 1x1d(keep=all)

View File

@ -1,11 +1,19 @@
prod1.example.com:
fullbackup_prod1:
global:
snapshot_prefix: zrepl_
# accept connections from prod1.example.com via ssh / stdinserver command
# expect remote to connect via ssh+stdinserver with fullbackup_prod1 as client_identity
type: push-sink
serve:
auth: ssh
role: sink
root: storage/backups/zrepl/sink/prod1.example.com
type: stdinserver
client_identity: fullbackup_prod1
# map all pushed datasets to storage/backups/zrepl/sink/prod1.example.com
mapping: {
"<":"storage/backups/zrepl/sink/prod1.example.com"
}
# follow a grandfathering scheme for filesystems on the right-hand-side of the mapping
prune:
policy: grid
grid: 1x1h(keep=all) | 24x1h | 35x1d | 6x30d

View File

@ -1,47 +1,25 @@
# push backup to backups.example.com
backups.example.com:
fullbackup_prod1:
# connect to remote using ssh / stdinserver command
type: push
connect:
type: ssh
host: 192.168.122.128
type: ssh+stdinserver
host: prod1.example.com
user: root
port: 22
identity_file: /root/.ssh/id_ed25519
connlog_read_file: /tmp/connlog_read
connlog_write_file: /tmp/connlog_write
global:
snapshot_prefix: zrepl_
schedule:
type: auto
steps:
# take local snapshots & keep window of one day
- type: autosnap
interval: 10m
dataset_filter: {
# snapshot these datsets every 10m with zrepl_ as prefix
datasets: {
"zroot/var/db<": ok,
"zroot/usr/home<": ok,
}
- type: prune
snapshot_prefix: zrepl_
interval: 10m
# keep a one day window 10m interval snapshots in case push doesn't work (link down, etc)
# (we cannot keep more than one day because this host will run out of disk space)
prune:
policy: grid
grid: 1x1d(keep=all)
dataset_filter: {
"zroot/var/db<": ok,
"zroot/usr/home<": ok,
}
# push datasets to remote and prune there
# if these jobs fail, we have one day until we are out of sync
- type: push
dataset_filter: {
"zroot/var/db<": ok,
"zroot/usr/home<": ok,
}
- type: prune
remote: true
policy: grid
grid: 1x1h(keep=all) | 24x1h | 35x1d | 6x30d