rename root_dataset to root_fs for receiving-side jobs

This commit is contained in:
Christian Schwarz 2018-10-11 18:00:23 +02:00
parent 0c3a694470
commit 125b561df3
9 changed files with 19 additions and 19 deletions

View File

@ -44,13 +44,13 @@ type PushJob struct {
type PullJob struct { type PullJob struct {
ActiveJob `yaml:",inline"` ActiveJob `yaml:",inline"`
RootDataset string `yaml:"root_dataset"` RootFS string `yaml:"root_fs"`
Interval time.Duration `yaml:"interval,positive"` Interval time.Duration `yaml:"interval,positive"`
} }
type SinkJob struct { type SinkJob struct {
PassiveJob `yaml:",inline"` PassiveJob `yaml:",inline"`
RootDataset string `yaml:"root_dataset"` RootFS string `yaml:"root_fs"`
} }
type SourceJob struct { type SourceJob struct {

View File

@ -18,7 +18,7 @@ jobs:
clients: { clients: {
"10.0.0.1":"foo" "10.0.0.1":"foo"
} }
root_dataset: zoot/foo root_fs: zoot/foo
` `
_, err := ParseConfigBytes([]byte(jobdef)) _, err := ParseConfigBytes([]byte(jobdef))
require.NoError(t, err) require.NoError(t, err)

View File

@ -16,7 +16,7 @@ jobs:
address: "server1.foo.bar:8888" address: "server1.foo.bar:8888"
rpc: rpc:
timeout: 20s # different form default, should merge timeout: 20s # different form default, should merge
root_dataset: "pool2/backup_servers" root_fs: "pool2/backup_servers"
interval: 10m interval: 10m
pruning: pruning:
keep_sender: keep_sender:
@ -32,7 +32,7 @@ jobs:
address: "server1.foo.bar:8888" address: "server1.foo.bar:8888"
rpc: rpc:
tx_chunk_size: 0xabcd # different from default, should merge tx_chunk_size: 0xabcd # different from default, should merge
root_dataset: "pool2/backup_servers" root_fs: "pool2/backup_servers"
interval: 10m interval: 10m
pruning: pruning:
keep_sender: keep_sender:
@ -43,7 +43,7 @@ jobs:
- type: sink - type: sink
name: "laptop_sink" name: "laptop_sink"
root_dataset: "pool2/backup_laptops" root_fs: "pool2/backup_laptops"
serve: serve:
type: tcp type: tcp
listen: "192.168.122.189:8888" listen: "192.168.122.189:8888"
@ -55,7 +55,7 @@ jobs:
- type: sink - type: sink
name: "other_sink" name: "other_sink"
root_dataset: "pool2/backup_laptops" root_fs: "pool2/backup_laptops"
serve: serve:
type: tcp type: tcp
listen: "192.168.122.189:8888" listen: "192.168.122.189:8888"

View File

@ -2,7 +2,7 @@
jobs: jobs:
- type: sink - type: sink
name: "local_sink" name: "local_sink"
root_dataset: "storage/zrepl/sink" root_fs: "storage/zrepl/sink"
serve: serve:
type: local type: local
listener_name: localsink listener_name: localsink

View File

@ -8,7 +8,7 @@ jobs:
cert: "/certs/cert.crt" cert: "/certs/cert.crt"
key: "/certs/key.pem" key: "/certs/key.pem"
server_cn: "server1" server_cn: "server1"
root_dataset: "pool2/backup_servers" root_fs: "pool2/backup_servers"
interval: 10m interval: 10m
pruning: pruning:
keep_sender: keep_sender:

View File

@ -10,7 +10,7 @@ jobs:
identity_file: /etc/zrepl/ssh/identity identity_file: /etc/zrepl/ssh/identity
options: # optional, default [], `-o` arguments passed to ssh options: # optional, default [], `-o` arguments passed to ssh
- "Compression=on" - "Compression=on"
root_dataset: "pool2/backup_servers" root_fs: "pool2/backup_servers"
interval: 10m interval: 10m
pruning: pruning:
keep_sender: keep_sender:

View File

@ -1,7 +1,7 @@
jobs: jobs:
- type: sink - type: sink
name: "laptop_sink" name: "laptop_sink"
root_dataset: "pool2/backup_laptops" root_fs: "pool2/backup_laptops"
serve: serve:
type: tls type: tls
listen: "192.168.122.189:8888" listen: "192.168.122.189:8888"

View File

@ -94,13 +94,13 @@ func modePushFromConfig(g *config.Global, in *config.PushJob) (*modePush, error)
} }
type modePull struct { type modePull struct {
rootDataset *zfs.DatasetPath rootFS *zfs.DatasetPath
interval time.Duration interval time.Duration
} }
func (m *modePull) SenderReceiver(client *streamrpc.Client) (replication.Sender, replication.Receiver, error) { func (m *modePull) SenderReceiver(client *streamrpc.Client) (replication.Sender, replication.Receiver, error) {
sender := endpoint.NewRemote(client) sender := endpoint.NewRemote(client)
receiver, err := endpoint.NewReceiver(m.rootDataset) receiver, err := endpoint.NewReceiver(m.rootFS)
return sender, receiver, err return sender, receiver, err
} }
@ -133,12 +133,12 @@ func modePullFromConfig(g *config.Global, in *config.PullJob) (m *modePull, err
} }
m.interval = in.Interval m.interval = in.Interval
m.rootDataset, err = zfs.NewDatasetPath(in.RootDataset) m.rootFS, err = zfs.NewDatasetPath(in.RootFS)
if err != nil { if err != nil {
return nil, errors.New("root dataset is not a valid zfs filesystem path") return nil, errors.New("RootFS is not a valid zfs filesystem path")
} }
if m.rootDataset.Length() <= 0 { if m.rootFS.Length() <= 0 {
return nil, errors.New("root dataset must not be empty") // duplicates error check of receiver return nil, errors.New("RootFS must not be empty") // duplicates error check of receiver
} }
return m, nil return m, nil

View File

@ -60,7 +60,7 @@ func (m *modeSink) RunPeriodic(_ context.Context) {}
func modeSinkFromConfig(g *config.Global, in *config.SinkJob) (m *modeSink, err error) { func modeSinkFromConfig(g *config.Global, in *config.SinkJob) (m *modeSink, err error) {
m = &modeSink{} m = &modeSink{}
m.rootDataset, err = zfs.NewDatasetPath(in.RootDataset) m.rootDataset, err = zfs.NewDatasetPath(in.RootFS)
if err != nil { if err != nil {
return nil, errors.New("root dataset is not a valid zfs filesystem path") return nil, errors.New("root dataset is not a valid zfs filesystem path")
} }