diff --git a/cmd/sampleconf/localbackup/host1.yml b/cmd/sampleconf/localbackup/host1.yml index b1b6b15..bbd8c2d 100644 --- a/cmd/sampleconf/localbackup/host1.yml +++ b/cmd/sampleconf/localbackup/host1.yml @@ -1,36 +1,25 @@ -# mirror local pool to backup pool -local: +mirror_local: - global: - snapshot_prefix: zrepl_ + type: local - schedule: - type: auto # infer schedule from autosnap step + # snapshot the filesystems matched by the left-hand-side of the mapping + # every 10m with zrepl_ as prefix + mapping: { + "zroot/var/db<": "storage/backups/local/zroot/var/db", + "zroot/usr/home<": "storage/backups/local/zroot/usr/home", + } + snapshot_prefix: zrepl_ + interval: 10m - steps: + # keep one hour of 10m interval snapshots of filesystems matched by + # the left-hand-side of the mapping + prune_lhs: + policy: grid + grid: 1x1h(keep=all) - - type: autosnap - interval: 10m - dataset_filter: { - "zroot<": ok - } + # follow a grandfathering scheme for filesystems on the right-hand-side of the mapping + prune_rhs: + policy: grid + grid: 1x1h(keep=all) | 24x1h | 35x1d | 6x30d - - type: pull - mapping: { - "zroot<":"backups/local/zroot" - } - - - type: prune - policy: grid - grid: 1x1h(keep=all) - dataset_filter: { - "zroot<": ok - } - - - type: prune - policy: grid - grid: 1x1h(keep=all) | 24x1h | 35x1d | 6x30d - dataset_filter: { - "backups/local/zroot<": ok - } diff --git a/cmd/sampleconf/pullbackup/backuphost.yml b/cmd/sampleconf/pullbackup/backuphost.yml index ec7bc07..b667d46 100644 --- a/cmd/sampleconf/pullbackup/backuphost.yml +++ b/cmd/sampleconf/pullbackup/backuphost.yml @@ -1,34 +1,21 @@ -prod1.example.com: +fullbackup_prod1: + # connect to remote using ssh / stdinserver command + type: pull connect: - type: ssh - host: backuphost.example.com + type: ssh+stdinserver + host: prod1.example.com user: root port: 22 identity_file: /root/.ssh/id_ed25519 - schedule: - # we cannot infer a schedule from steps below, thus define one here - type: cron - cron: "@every 10m" # see https://godoc.org/github.com/robfig/cron + # pull all offered filesystems to storage/backups/zrepl/pull/prod1.example.com + mapping: { + "<":"storage/backups/zrepl/pull/prod1.example.com" + } - steps: - - # pull datasets from remote to local - - type: pull - mapping: { - "zroot<":"storage/backups/zrepl/prod1.example.com/zroot" - "zroot/tmp": "!" - } - - # prune the snaphots we pulled - - type: prune - policy: grid - grid: 1x1h(keep=all) | 24x1h | 35x1d | 6x30d - dataset_filter: { - "storage/backups/zrepl/prod1.example.com/zroot<": ok - } - snapshot_filter: { - snapshot_prefix: zrepl_ - } + # follow a grandfathering scheme for filesystems on the right-hand-side of the mapping + prune: + policy: grid + grid: 1x1h(keep=all) | 24x1h | 35x1d | 6x30d diff --git a/cmd/sampleconf/pullbackup/productionhost.yml b/cmd/sampleconf/pullbackup/productionhost.yml index c7fbe79..96d6577 100644 --- a/cmd/sampleconf/pullbackup/productionhost.yml +++ b/cmd/sampleconf/pullbackup/productionhost.yml @@ -1,33 +1,22 @@ -backuphost.example.com: +fullbackup_prod1: - global: - snapshot_prefix: zrepl_ - - # accept connections from backups.example.com via ssh / stdinserver command + # expect remote to connect via ssh+stdinserver with fullbackup_prod1 as client_identity + type: pull-source serve: - auth: ssh - role: source - acl: { - "zroot<": ok, - "zroot/usr/home<": omit, - } + type: stdinserver + client_identity: fullbackup_prod1 - # periodic task list, following given schedule - schedule: - type: auto # infer from autosnap step - steps: - - - type: autosnap - interval: 10m - dataset_filter: { - "zroot<": ok - } - - - type: prune - policy: grid - grid: 1x1h(keep=all) - dataset_filter: { - "zroot<": ok - } + # snapshot these filesystems every 10m with zrepl_ as prefix + datasets: { + "zroot/var/db<": ok, + "zroot/usr/home<": omit, + } + snapshot_prefix: zrepl_ + interval: 10m + # keep a one day window 10m interval snapshots in case pull doesn't work (link down, etc) + # (we cannot keep more than one day because this host will run out of disk space) + prune: + policy: grid + grid: 1x1d(keep=all) diff --git a/cmd/sampleconf/pushbackup/backuphost.yml b/cmd/sampleconf/pushbackup/backuphost.yml index 7499d6e..68aec02 100644 --- a/cmd/sampleconf/pushbackup/backuphost.yml +++ b/cmd/sampleconf/pushbackup/backuphost.yml @@ -1,11 +1,19 @@ -prod1.example.com: +fullbackup_prod1: - global: - snapshot_prefix: zrepl_ - - # accept connections from prod1.example.com via ssh / stdinserver command + # expect remote to connect via ssh+stdinserver with fullbackup_prod1 as client_identity + type: push-sink serve: - auth: ssh - role: sink - root: storage/backups/zrepl/sink/prod1.example.com + type: stdinserver + client_identity: fullbackup_prod1 + + # map all pushed datasets to storage/backups/zrepl/sink/prod1.example.com + mapping: { + "<":"storage/backups/zrepl/sink/prod1.example.com" + } + + # follow a grandfathering scheme for filesystems on the right-hand-side of the mapping + prune: + policy: grid + grid: 1x1h(keep=all) | 24x1h | 35x1d | 6x30d + diff --git a/cmd/sampleconf/pushbackup/productionhost.yml b/cmd/sampleconf/pushbackup/productionhost.yml index 43d3d25..7722d58 100644 --- a/cmd/sampleconf/pushbackup/productionhost.yml +++ b/cmd/sampleconf/pushbackup/productionhost.yml @@ -1,47 +1,25 @@ -# push backup to backups.example.com -backups.example.com: +fullbackup_prod1: + # connect to remote using ssh / stdinserver command + type: push connect: - type: ssh - host: 192.168.122.128 - user: root - port: 22 - identity_file: /root/.ssh/id_ed25519 - connlog_read_file: /tmp/connlog_read - connlog_write_file: /tmp/connlog_write + type: ssh+stdinserver + host: prod1.example.com + user: root + port: 22 + identity_file: /root/.ssh/id_ed25519 - global: - snapshot_prefix: zrepl_ + # snapshot these datsets every 10m with zrepl_ as prefix + datasets: { + "zroot/var/db<": ok, + "zroot/usr/home<": ok, + } + snapshot_prefix: zrepl_ + interval: 10m - schedule: - type: auto - - steps: - - # take local snapshots & keep window of one day - - type: autosnap - interval: 10m - dataset_filter: { - "zroot/var/db<": ok, - "zroot/usr/home<": ok, - } - - type: prune - policy: grid - grid: 1x1d(keep=all) - dataset_filter: { - "zroot/var/db<": ok, - "zroot/usr/home<": ok, - } - - # push datasets to remote and prune there - # if these jobs fail, we have one day until we are out of sync - - type: push - dataset_filter: { - "zroot/var/db<": ok, - "zroot/usr/home<": ok, - } - - type: prune - remote: true - policy: grid - grid: 1x1h(keep=all) | 24x1h | 35x1d | 6x30d + # keep a one day window 10m interval snapshots in case push doesn't work (link down, etc) + # (we cannot keep more than one day because this host will run out of disk space) + prune: + policy: grid + grid: 1x1d(keep=all)