diff --git a/config/config.go b/config/config.go index 7a49d65..62d2cb3 100644 --- a/config/config.go +++ b/config/config.go @@ -44,13 +44,13 @@ type PushJob struct { type PullJob struct { ActiveJob `yaml:",inline"` - RootDataset string `yaml:"root_dataset"` - Interval time.Duration `yaml:"interval,positive"` + RootFS string `yaml:"root_fs"` + Interval time.Duration `yaml:"interval,positive"` } type SinkJob struct { PassiveJob `yaml:",inline"` - RootDataset string `yaml:"root_dataset"` + RootFS string `yaml:"root_fs"` } type SourceJob struct { diff --git a/config/config_global_test.go b/config/config_global_test.go index 50948b7..f73219e 100644 --- a/config/config_global_test.go +++ b/config/config_global_test.go @@ -18,7 +18,7 @@ jobs: clients: { "10.0.0.1":"foo" } - root_dataset: zoot/foo + root_fs: zoot/foo ` _, err := ParseConfigBytes([]byte(jobdef)) require.NoError(t, err) diff --git a/config/config_rpc_test.go b/config/config_rpc_test.go index f74581d..f02311e 100644 --- a/config/config_rpc_test.go +++ b/config/config_rpc_test.go @@ -16,7 +16,7 @@ jobs: address: "server1.foo.bar:8888" rpc: timeout: 20s # different form default, should merge - root_dataset: "pool2/backup_servers" + root_fs: "pool2/backup_servers" interval: 10m pruning: keep_sender: @@ -32,7 +32,7 @@ jobs: address: "server1.foo.bar:8888" rpc: tx_chunk_size: 0xabcd # different from default, should merge - root_dataset: "pool2/backup_servers" + root_fs: "pool2/backup_servers" interval: 10m pruning: keep_sender: @@ -43,7 +43,7 @@ jobs: - type: sink name: "laptop_sink" - root_dataset: "pool2/backup_laptops" + root_fs: "pool2/backup_laptops" serve: type: tcp listen: "192.168.122.189:8888" @@ -55,7 +55,7 @@ jobs: - type: sink name: "other_sink" - root_dataset: "pool2/backup_laptops" + root_fs: "pool2/backup_laptops" serve: type: tcp listen: "192.168.122.189:8888" diff --git a/config/samples/local.yml b/config/samples/local.yml index 7c886ba..fbf7861 100644 --- a/config/samples/local.yml +++ b/config/samples/local.yml @@ -2,7 +2,7 @@ jobs: - type: sink name: "local_sink" - root_dataset: "storage/zrepl/sink" + root_fs: "storage/zrepl/sink" serve: type: local listener_name: localsink diff --git a/config/samples/pull.yml b/config/samples/pull.yml index 4eb5113..9a32a43 100644 --- a/config/samples/pull.yml +++ b/config/samples/pull.yml @@ -8,7 +8,7 @@ jobs: cert: "/certs/cert.crt" key: "/certs/key.pem" server_cn: "server1" - root_dataset: "pool2/backup_servers" + root_fs: "pool2/backup_servers" interval: 10m pruning: keep_sender: diff --git a/config/samples/pull_ssh.yml b/config/samples/pull_ssh.yml index be5229b..cbe8559 100644 --- a/config/samples/pull_ssh.yml +++ b/config/samples/pull_ssh.yml @@ -10,7 +10,7 @@ jobs: identity_file: /etc/zrepl/ssh/identity options: # optional, default [], `-o` arguments passed to ssh - "Compression=on" - root_dataset: "pool2/backup_servers" + root_fs: "pool2/backup_servers" interval: 10m pruning: keep_sender: diff --git a/config/samples/sink.yml b/config/samples/sink.yml index 01990da..917a3cf 100644 --- a/config/samples/sink.yml +++ b/config/samples/sink.yml @@ -1,7 +1,7 @@ jobs: - type: sink name: "laptop_sink" - root_dataset: "pool2/backup_laptops" + root_fs: "pool2/backup_laptops" serve: type: tls listen: "192.168.122.189:8888" diff --git a/daemon/job/active.go b/daemon/job/active.go index 15af588..edf24e9 100644 --- a/daemon/job/active.go +++ b/daemon/job/active.go @@ -94,13 +94,13 @@ func modePushFromConfig(g *config.Global, in *config.PushJob) (*modePush, error) } type modePull struct { - rootDataset *zfs.DatasetPath + rootFS *zfs.DatasetPath interval time.Duration } func (m *modePull) SenderReceiver(client *streamrpc.Client) (replication.Sender, replication.Receiver, error) { sender := endpoint.NewRemote(client) - receiver, err := endpoint.NewReceiver(m.rootDataset) + receiver, err := endpoint.NewReceiver(m.rootFS) return sender, receiver, err } @@ -133,12 +133,12 @@ func modePullFromConfig(g *config.Global, in *config.PullJob) (m *modePull, err } m.interval = in.Interval - m.rootDataset, err = zfs.NewDatasetPath(in.RootDataset) + m.rootFS, err = zfs.NewDatasetPath(in.RootFS) if err != nil { - return nil, errors.New("root dataset is not a valid zfs filesystem path") + return nil, errors.New("RootFS is not a valid zfs filesystem path") } - if m.rootDataset.Length() <= 0 { - return nil, errors.New("root dataset must not be empty") // duplicates error check of receiver + if m.rootFS.Length() <= 0 { + return nil, errors.New("RootFS must not be empty") // duplicates error check of receiver } return m, nil diff --git a/daemon/job/passive.go b/daemon/job/passive.go index 02ac9b6..ac843b3 100644 --- a/daemon/job/passive.go +++ b/daemon/job/passive.go @@ -60,7 +60,7 @@ func (m *modeSink) RunPeriodic(_ context.Context) {} func modeSinkFromConfig(g *config.Global, in *config.SinkJob) (m *modeSink, err error) { m = &modeSink{} - m.rootDataset, err = zfs.NewDatasetPath(in.RootDataset) + m.rootDataset, err = zfs.NewDatasetPath(in.RootFS) if err != nil { return nil, errors.New("root dataset is not a valid zfs filesystem path") }