mirror of
https://github.com/zrepl/zrepl.git
synced 2024-11-22 00:13:52 +01:00
rename root_dataset to root_fs for receiving-side jobs
This commit is contained in:
parent
0c3a694470
commit
125b561df3
@ -44,13 +44,13 @@ type PushJob struct {
|
||||
|
||||
type PullJob struct {
|
||||
ActiveJob `yaml:",inline"`
|
||||
RootDataset string `yaml:"root_dataset"`
|
||||
Interval time.Duration `yaml:"interval,positive"`
|
||||
RootFS string `yaml:"root_fs"`
|
||||
Interval time.Duration `yaml:"interval,positive"`
|
||||
}
|
||||
|
||||
type SinkJob struct {
|
||||
PassiveJob `yaml:",inline"`
|
||||
RootDataset string `yaml:"root_dataset"`
|
||||
RootFS string `yaml:"root_fs"`
|
||||
}
|
||||
|
||||
type SourceJob struct {
|
||||
|
@ -18,7 +18,7 @@ jobs:
|
||||
clients: {
|
||||
"10.0.0.1":"foo"
|
||||
}
|
||||
root_dataset: zoot/foo
|
||||
root_fs: zoot/foo
|
||||
`
|
||||
_, err := ParseConfigBytes([]byte(jobdef))
|
||||
require.NoError(t, err)
|
||||
|
@ -16,7 +16,7 @@ jobs:
|
||||
address: "server1.foo.bar:8888"
|
||||
rpc:
|
||||
timeout: 20s # different form default, should merge
|
||||
root_dataset: "pool2/backup_servers"
|
||||
root_fs: "pool2/backup_servers"
|
||||
interval: 10m
|
||||
pruning:
|
||||
keep_sender:
|
||||
@ -32,7 +32,7 @@ jobs:
|
||||
address: "server1.foo.bar:8888"
|
||||
rpc:
|
||||
tx_chunk_size: 0xabcd # different from default, should merge
|
||||
root_dataset: "pool2/backup_servers"
|
||||
root_fs: "pool2/backup_servers"
|
||||
interval: 10m
|
||||
pruning:
|
||||
keep_sender:
|
||||
@ -43,7 +43,7 @@ jobs:
|
||||
|
||||
- type: sink
|
||||
name: "laptop_sink"
|
||||
root_dataset: "pool2/backup_laptops"
|
||||
root_fs: "pool2/backup_laptops"
|
||||
serve:
|
||||
type: tcp
|
||||
listen: "192.168.122.189:8888"
|
||||
@ -55,7 +55,7 @@ jobs:
|
||||
|
||||
- type: sink
|
||||
name: "other_sink"
|
||||
root_dataset: "pool2/backup_laptops"
|
||||
root_fs: "pool2/backup_laptops"
|
||||
serve:
|
||||
type: tcp
|
||||
listen: "192.168.122.189:8888"
|
||||
|
@ -2,7 +2,7 @@
|
||||
jobs:
|
||||
- type: sink
|
||||
name: "local_sink"
|
||||
root_dataset: "storage/zrepl/sink"
|
||||
root_fs: "storage/zrepl/sink"
|
||||
serve:
|
||||
type: local
|
||||
listener_name: localsink
|
||||
|
@ -8,7 +8,7 @@ jobs:
|
||||
cert: "/certs/cert.crt"
|
||||
key: "/certs/key.pem"
|
||||
server_cn: "server1"
|
||||
root_dataset: "pool2/backup_servers"
|
||||
root_fs: "pool2/backup_servers"
|
||||
interval: 10m
|
||||
pruning:
|
||||
keep_sender:
|
||||
|
@ -10,7 +10,7 @@ jobs:
|
||||
identity_file: /etc/zrepl/ssh/identity
|
||||
options: # optional, default [], `-o` arguments passed to ssh
|
||||
- "Compression=on"
|
||||
root_dataset: "pool2/backup_servers"
|
||||
root_fs: "pool2/backup_servers"
|
||||
interval: 10m
|
||||
pruning:
|
||||
keep_sender:
|
||||
|
@ -1,7 +1,7 @@
|
||||
jobs:
|
||||
- type: sink
|
||||
name: "laptop_sink"
|
||||
root_dataset: "pool2/backup_laptops"
|
||||
root_fs: "pool2/backup_laptops"
|
||||
serve:
|
||||
type: tls
|
||||
listen: "192.168.122.189:8888"
|
||||
|
@ -94,13 +94,13 @@ func modePushFromConfig(g *config.Global, in *config.PushJob) (*modePush, error)
|
||||
}
|
||||
|
||||
type modePull struct {
|
||||
rootDataset *zfs.DatasetPath
|
||||
rootFS *zfs.DatasetPath
|
||||
interval time.Duration
|
||||
}
|
||||
|
||||
func (m *modePull) SenderReceiver(client *streamrpc.Client) (replication.Sender, replication.Receiver, error) {
|
||||
sender := endpoint.NewRemote(client)
|
||||
receiver, err := endpoint.NewReceiver(m.rootDataset)
|
||||
receiver, err := endpoint.NewReceiver(m.rootFS)
|
||||
return sender, receiver, err
|
||||
}
|
||||
|
||||
@ -133,12 +133,12 @@ func modePullFromConfig(g *config.Global, in *config.PullJob) (m *modePull, err
|
||||
}
|
||||
m.interval = in.Interval
|
||||
|
||||
m.rootDataset, err = zfs.NewDatasetPath(in.RootDataset)
|
||||
m.rootFS, err = zfs.NewDatasetPath(in.RootFS)
|
||||
if err != nil {
|
||||
return nil, errors.New("root dataset is not a valid zfs filesystem path")
|
||||
return nil, errors.New("RootFS is not a valid zfs filesystem path")
|
||||
}
|
||||
if m.rootDataset.Length() <= 0 {
|
||||
return nil, errors.New("root dataset must not be empty") // duplicates error check of receiver
|
||||
if m.rootFS.Length() <= 0 {
|
||||
return nil, errors.New("RootFS must not be empty") // duplicates error check of receiver
|
||||
}
|
||||
|
||||
return m, nil
|
||||
|
@ -60,7 +60,7 @@ func (m *modeSink) RunPeriodic(_ context.Context) {}
|
||||
|
||||
func modeSinkFromConfig(g *config.Global, in *config.SinkJob) (m *modeSink, err error) {
|
||||
m = &modeSink{}
|
||||
m.rootDataset, err = zfs.NewDatasetPath(in.RootDataset)
|
||||
m.rootDataset, err = zfs.NewDatasetPath(in.RootFS)
|
||||
if err != nil {
|
||||
return nil, errors.New("root dataset is not a valid zfs filesystem path")
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user