alternative prototype for new config format

This commit is contained in:
Christian Schwarz 2017-09-07 11:18:06 +02:00
parent 98fc59dbd5
commit b2f3645bfd
5 changed files with 85 additions and 134 deletions

View File

@ -1,36 +1,25 @@
# mirror local pool to backup pool mirror_local:
local:
global: type: local
snapshot_prefix: zrepl_
schedule: # snapshot the filesystems matched by the left-hand-side of the mapping
type: auto # infer schedule from autosnap step # every 10m with zrepl_ as prefix
steps:
- type: autosnap
interval: 10m
dataset_filter: {
"zroot<": ok
}
- type: pull
mapping: { mapping: {
"zroot<":"backups/local/zroot" "zroot/var/db<": "storage/backups/local/zroot/var/db",
"zroot/usr/home<": "storage/backups/local/zroot/usr/home",
} }
snapshot_prefix: zrepl_
interval: 10m
- type: prune # keep one hour of 10m interval snapshots of filesystems matched by
# the left-hand-side of the mapping
prune_lhs:
policy: grid policy: grid
grid: 1x1h(keep=all) grid: 1x1h(keep=all)
dataset_filter: {
"zroot<": ok
}
- type: prune # follow a grandfathering scheme for filesystems on the right-hand-side of the mapping
prune_rhs:
policy: grid policy: grid
grid: 1x1h(keep=all) | 24x1h | 35x1d | 6x30d grid: 1x1h(keep=all) | 24x1h | 35x1d | 6x30d
dataset_filter: {
"backups/local/zroot<": ok
}

View File

@ -1,34 +1,21 @@
prod1.example.com: fullbackup_prod1:
# connect to remote using ssh / stdinserver command
type: pull
connect: connect:
type: ssh type: ssh+stdinserver
host: backuphost.example.com host: prod1.example.com
user: root user: root
port: 22 port: 22
identity_file: /root/.ssh/id_ed25519 identity_file: /root/.ssh/id_ed25519
schedule: # pull all offered filesystems to storage/backups/zrepl/pull/prod1.example.com
# we cannot infer a schedule from steps below, thus define one here
type: cron
cron: "@every 10m" # see https://godoc.org/github.com/robfig/cron
steps:
# pull datasets from remote to local
- type: pull
mapping: { mapping: {
"zroot<":"storage/backups/zrepl/prod1.example.com/zroot" "<":"storage/backups/zrepl/pull/prod1.example.com"
"zroot/tmp": "!"
} }
# prune the snaphots we pulled # follow a grandfathering scheme for filesystems on the right-hand-side of the mapping
- type: prune prune:
policy: grid policy: grid
grid: 1x1h(keep=all) | 24x1h | 35x1d | 6x30d grid: 1x1h(keep=all) | 24x1h | 35x1d | 6x30d
dataset_filter: {
"storage/backups/zrepl/prod1.example.com/zroot<": ok
}
snapshot_filter: {
snapshot_prefix: zrepl_
}

View File

@ -1,33 +1,22 @@
backuphost.example.com: fullbackup_prod1:
global: # expect remote to connect via ssh+stdinserver with fullbackup_prod1 as client_identity
snapshot_prefix: zrepl_ type: pull-source
# accept connections from backups.example.com via ssh / stdinserver command
serve: serve:
auth: ssh type: stdinserver
role: source client_identity: fullbackup_prod1
acl: {
"zroot<": ok, # snapshot these filesystems every 10m with zrepl_ as prefix
datasets: {
"zroot/var/db<": ok,
"zroot/usr/home<": omit, "zroot/usr/home<": omit,
} }
snapshot_prefix: zrepl_
# periodic task list, following given schedule
schedule:
type: auto # infer from autosnap step
steps:
- type: autosnap
interval: 10m interval: 10m
dataset_filter: {
"zroot<": ok
}
- type: prune # keep a one day window 10m interval snapshots in case pull doesn't work (link down, etc)
# (we cannot keep more than one day because this host will run out of disk space)
prune:
policy: grid policy: grid
grid: 1x1h(keep=all) grid: 1x1d(keep=all)
dataset_filter: {
"zroot<": ok
}

View File

@ -1,11 +1,19 @@
prod1.example.com: fullbackup_prod1:
global: # expect remote to connect via ssh+stdinserver with fullbackup_prod1 as client_identity
snapshot_prefix: zrepl_ type: push-sink
# accept connections from prod1.example.com via ssh / stdinserver command
serve: serve:
auth: ssh type: stdinserver
role: sink client_identity: fullbackup_prod1
root: storage/backups/zrepl/sink/prod1.example.com
# map all pushed datasets to storage/backups/zrepl/sink/prod1.example.com
mapping: {
"<":"storage/backups/zrepl/sink/prod1.example.com"
}
# follow a grandfathering scheme for filesystems on the right-hand-side of the mapping
prune:
policy: grid
grid: 1x1h(keep=all) | 24x1h | 35x1d | 6x30d

View File

@ -1,47 +1,25 @@
# push backup to backups.example.com fullbackup_prod1:
backups.example.com:
# connect to remote using ssh / stdinserver command
type: push
connect: connect:
type: ssh type: ssh+stdinserver
host: 192.168.122.128 host: prod1.example.com
user: root user: root
port: 22 port: 22
identity_file: /root/.ssh/id_ed25519 identity_file: /root/.ssh/id_ed25519
connlog_read_file: /tmp/connlog_read
connlog_write_file: /tmp/connlog_write
global: # snapshot these datsets every 10m with zrepl_ as prefix
snapshot_prefix: zrepl_ datasets: {
schedule:
type: auto
steps:
# take local snapshots & keep window of one day
- type: autosnap
interval: 10m
dataset_filter: {
"zroot/var/db<": ok, "zroot/var/db<": ok,
"zroot/usr/home<": ok, "zroot/usr/home<": ok,
} }
- type: prune snapshot_prefix: zrepl_
interval: 10m
# keep a one day window 10m interval snapshots in case push doesn't work (link down, etc)
# (we cannot keep more than one day because this host will run out of disk space)
prune:
policy: grid policy: grid
grid: 1x1d(keep=all) grid: 1x1d(keep=all)
dataset_filter: {
"zroot/var/db<": ok,
"zroot/usr/home<": ok,
}
# push datasets to remote and prune there
# if these jobs fail, we have one day until we are out of sync
- type: push
dataset_filter: {
"zroot/var/db<": ok,
"zroot/usr/home<": ok,
}
- type: prune
remote: true
policy: grid
grid: 1x1h(keep=all) | 24x1h | 35x1d | 6x30d