config: source job: rename field 'datasets' to 'filesystems'

While filesystems is also not the right term (since it excludes ZVOLs),
we want to stay consistent with comments & terminology used in docs.

BREAK CONFIG

fixes #17
This commit is contained in:
Christian Schwarz 2017-10-05 13:39:05 +02:00
parent b95260f4b5
commit 3e647c14c0
7 changed files with 12 additions and 12 deletions

View File

@ -13,7 +13,7 @@ import (
type SourceJob struct { type SourceJob struct {
Name string Name string
Serve AuthenticatedChannelListenerFactory Serve AuthenticatedChannelListenerFactory
Datasets *DatasetMapFilter Filesystems *DatasetMapFilter
SnapshotPrefix string SnapshotPrefix string
Interval time.Duration Interval time.Duration
Prune PrunePolicy Prune PrunePolicy
@ -24,7 +24,7 @@ func parseSourceJob(c JobParsingContext, name string, i map[string]interface{})
var asMap struct { var asMap struct {
Serve map[string]interface{} Serve map[string]interface{}
Datasets map[string]string Filesystems map[string]string
SnapshotPrefix string `mapstructure:"snapshot_prefix"` SnapshotPrefix string `mapstructure:"snapshot_prefix"`
Interval string Interval string
Prune map[string]interface{} Prune map[string]interface{}
@ -42,7 +42,7 @@ func parseSourceJob(c JobParsingContext, name string, i map[string]interface{})
return return
} }
if j.Datasets, err = parseDatasetMapFilter(asMap.Datasets, true); err != nil { if j.Filesystems, err = parseDatasetMapFilter(asMap.Filesystems, true); err != nil {
return return
} }
@ -77,7 +77,7 @@ func (j *SourceJob) JobStart(ctx context.Context) {
log := ctx.Value(contextKeyLog).(Logger) log := ctx.Value(contextKeyLog).(Logger)
defer log.Info("exiting") defer log.Info("exiting")
a := IntervalAutosnap{DatasetFilter: j.Datasets, Prefix: j.SnapshotPrefix, SnapshotInterval: j.Interval} a := IntervalAutosnap{DatasetFilter: j.Filesystems, Prefix: j.SnapshotPrefix, SnapshotInterval: j.Interval}
p, err := j.Pruner(PrunePolicySideDefault, false) p, err := j.Pruner(PrunePolicySideDefault, false)
if err != nil { if err != nil {
log.WithError(err).Error("error creating pruner") log.WithError(err).Error("error creating pruner")
@ -111,7 +111,7 @@ func (j *SourceJob) Pruner(side PrunePolicySide, dryRun bool) (p Pruner, err err
p = Pruner{ p = Pruner{
time.Now(), time.Now(),
dryRun, dryRun,
j.Datasets, j.Filesystems,
j.SnapshotPrefix, j.SnapshotPrefix,
j.Prune, j.Prune,
} }
@ -158,7 +158,7 @@ outer:
} }
// construct connection handler // construct connection handler
handler := NewHandler(log, j.Datasets, &PrefixSnapshotFilter{j.SnapshotPrefix}) handler := NewHandler(log, j.Filesystems, &PrefixSnapshotFilter{j.SnapshotPrefix})
// handle connection // handle connection
rpcServer := rpc.NewServer(rwc) rpcServer := rpc.NewServer(rwc)

View File

@ -28,7 +28,7 @@ jobs:
client_identity: fullbackup_prod1 client_identity: fullbackup_prod1
# snapshot these filesystems every 10m with zrepl_ as prefix # snapshot these filesystems every 10m with zrepl_ as prefix
datasets: { filesystems: {
"zroot/var/db<": "ok", "zroot/var/db<": "ok",
"zroot/usr/home<": "ok", "zroot/usr/home<": "ok",
"zroot/var/tmp": "!", #don't backup /tmp "zroot/var/tmp": "!", #don't backup /tmp

View File

@ -11,7 +11,7 @@ jobs:
identity_file: /root/.ssh/id_ed25519 identity_file: /root/.ssh/id_ed25519
# snapshot these datsets every 10m with zrepl_ as prefix # snapshot these datsets every 10m with zrepl_ as prefix
datasets: { filesystems: {
"zroot/var/db<": "ok", "zroot/var/db<": "ok",
"zroot/usr/home<": "!", "zroot/usr/home<": "!",
} }

View File

@ -22,7 +22,7 @@ jobs:
serve: serve:
type: stdinserver type: stdinserver
client_identity: debian2 client_identity: debian2
datasets: { filesystems: {
"pool1/db<": ok "pool1/db<": ok
} }
snapshot_prefix: zrepl_ snapshot_prefix: zrepl_

View File

@ -109,7 +109,7 @@ func doTestDatasetMapFilter(cmd *cobra.Command, args []string) {
case *PullJob: case *PullJob:
mf = j.Mapping mf = j.Mapping
case *SourceJob: case *SourceJob:
mf = j.Datasets mf = j.Filesystems
case *LocalJob: case *LocalJob:
mf = j.Mapping mf = j.Mapping
default: default:

View File

@ -94,7 +94,7 @@ jobs:
- name: pull_backup - name: pull_backup
type: source type: source
... ...
datasets: { filesystems: {
"zroot/var/db": "ok", "zroot/var/db": "ok",
"zroot/usr/home<": "ok", "zroot/usr/home<": "ok",
"zroot/usr/home/paranoid": "!", "zroot/usr/home/paranoid": "!",

View File

@ -102,7 +102,7 @@ jobs:
serve: serve:
type: stdinserver type: stdinserver
client_identity: backup-srv.example.com client_identity: backup-srv.example.com
datasets: { filesystems: {
"zroot/var/db": "ok", "zroot/var/db": "ok",
"zroot/usr/home<": "ok", "zroot/usr/home<": "ok",
"zroot/usr/home/paranoid": "!", "zroot/usr/home/paranoid": "!",