config: no-field for replication anymore

It's closer to the original config and we don't want users to specify
'filesystems' and similar multiple times in a single job definition.
This commit is contained in:
Christian Schwarz 2018-09-04 14:44:45 -07:00
parent be57d6ce8e
commit 754b253043
12 changed files with 108 additions and 137 deletions

View File

@ -24,7 +24,8 @@ type JobEnum struct {
type PushJob struct { type PushJob struct {
Type string `yaml:"type"` Type string `yaml:"type"`
Name string `yaml:"name"` Name string `yaml:"name"`
Replication PushReplication `yaml:"replication"` Connect ConnectEnum `yaml:"connect"`
Filesystems FilesystemsFilter `yaml:"filesystems"`
Snapshotting Snapshotting `yaml:"snapshotting"` Snapshotting Snapshotting `yaml:"snapshotting"`
Pruning PruningSenderReceiver `yaml:"pruning"` Pruning PruningSenderReceiver `yaml:"pruning"`
Debug JobDebugSettings `yaml:"debug,optional"` Debug JobDebugSettings `yaml:"debug,optional"`
@ -33,28 +34,26 @@ type PushJob struct {
type SinkJob struct { type SinkJob struct {
Type string `yaml:"type"` Type string `yaml:"type"`
Name string `yaml:"name"` Name string `yaml:"name"`
Replication SinkReplication `yaml:"replication"` RootDataset string `yaml:"root_dataset"`
Serve ServeEnum `yaml:"serve"`
Debug JobDebugSettings `yaml:"debug,optional"` Debug JobDebugSettings `yaml:"debug,optional"`
} }
type PullJob struct { type PullJob struct {
Type string `yaml:"type"` Type string `yaml:"type"`
Name string `yaml:"name"` Name string `yaml:"name"`
Replication PullReplication `yaml:"replication"`
Pruning PruningSenderReceiver `yaml:"pruning"`
Debug JobDebugSettings `yaml:"debug,optional"`
}
type PullReplication struct {
Connect ConnectEnum `yaml:"connect"` Connect ConnectEnum `yaml:"connect"`
RootDataset string `yaml:"root_dataset"` RootDataset string `yaml:"root_dataset"`
Interval time.Duration `yaml:"interval,positive"` Interval time.Duration `yaml:"interval,positive"`
Pruning PruningSenderReceiver `yaml:"pruning"`
Debug JobDebugSettings `yaml:"debug,optional"`
} }
type SourceJob struct { type SourceJob struct {
Type string `yaml:"type"` Type string `yaml:"type"`
Name string `yaml:"name"` Name string `yaml:"name"`
Replication SourceReplication `yaml:"replication"` Serve ServeEnum `yaml:"serve"`
Filesystems FilesystemsFilter `yaml:"filesystems"`
Snapshotting Snapshotting `yaml:"snapshotting"` Snapshotting Snapshotting `yaml:"snapshotting"`
Pruning PruningLocal `yaml:"pruning"` Pruning PruningLocal `yaml:"pruning"`
Debug JobDebugSettings `yaml:"debug,optional"` Debug JobDebugSettings `yaml:"debug,optional"`
@ -63,31 +62,14 @@ type SourceJob struct {
type LocalJob struct { type LocalJob struct {
Type string `yaml:"type"` Type string `yaml:"type"`
Name string `yaml:"name"` Name string `yaml:"name"`
Replication LocalReplication `yaml:"replication"` Filesystems FilesystemsFilter `yaml:"filesystems"`
RootDataset string `yaml:"root_dataset"`
Snapshotting Snapshotting `yaml:"snapshotting"` Snapshotting Snapshotting `yaml:"snapshotting"`
Pruning PruningSenderReceiver `yaml:"pruning"` Pruning PruningSenderReceiver `yaml:"pruning"`
Debug JobDebugSettings `yaml:"debug,optional"` Debug JobDebugSettings `yaml:"debug,optional"`
} }
type PushReplication struct { type FilesystemsFilter map[string]bool
Connect ConnectEnum `yaml:"connect"`
Filesystems map[string]bool `yaml:"filesystems"`
}
type SinkReplication struct {
RootDataset string `yaml:"root_dataset"`
Serve ServeEnum `yaml:"serve"`
}
type SourceReplication struct {
Serve ServeEnum `yaml:"serve"`
Filesystems map[string]bool `yaml:"filesystems"`
}
type LocalReplication struct {
Filesystems map[string]bool `yaml:"filesystems"`
RootDataset string `yaml:"root_dataset"`
}
type Snapshotting struct { type Snapshotting struct {
SnapshotPrefix string `yaml:"snapshot_prefix"` SnapshotPrefix string `yaml:"snapshot_prefix"`

View File

@ -18,16 +18,15 @@ jobs:
type: push type: push
# snapshot the filesystems matched by the left-hand-side of the mapping # snapshot the filesystems matched by the left-hand-side of the mapping
# every 10m with zrepl_ as prefix # every 10m with zrepl_ as prefix
replication: connect:
connect: type: tcp
type: tcp address: localhost:2342
address: localhost:2342 filesystems: {
filesystems: { "pool1/var/db<": true,
"pool1/var/db<": true, "pool1/usr/home<": true,
"pool1/usr/home<": true, "pool1/usr/home/paranoid": false, #don't backup paranoid user
"pool1/usr/home/paranoid": false, #don't backup paranoid user "pool1/poudriere/ports<": false #don't backup the ports trees
"pool1/poudriere/ports<": false #don't backup the ports trees }
}
snapshotting: snapshotting:
snapshot_prefix: zrepl_ snapshot_prefix: zrepl_
interval: 10m interval: 10m

View File

@ -11,14 +11,13 @@ func TestRPC(t *testing.T) {
jobs: jobs:
- name: pull_servers - name: pull_servers
type: pull type: pull
replication: connect:
connect: type: tcp
type: tcp address: "server1.foo.bar:8888"
address: "server1.foo.bar:8888" rpc:
rpc: timeout: 20s # different form default, should merge
timeout: 20s # different form default, should merge root_dataset: "pool2/backup_servers"
root_dataset: "pool2/backup_servers" interval: 10m
interval: 10m
pruning: pruning:
keep_sender: keep_sender:
- type: not_replicated - type: not_replicated
@ -28,14 +27,13 @@ jobs:
- name: pull_servers2 - name: pull_servers2
type: pull type: pull
replication: connect:
connect: type: tcp
type: tcp address: "server1.foo.bar:8888"
address: "server1.foo.bar:8888" rpc:
rpc: tx_chunk_size: 0xabcd # different from default, should merge
tx_chunk_size: 0xabcd # different from default, should merge root_dataset: "pool2/backup_servers"
root_dataset: "pool2/backup_servers" interval: 10m
interval: 10m
pruning: pruning:
keep_sender: keep_sender:
- type: not_replicated - type: not_replicated
@ -45,22 +43,21 @@ jobs:
- type: sink - type: sink
name: "laptop_sink" name: "laptop_sink"
replication: root_dataset: "pool2/backup_laptops"
root_dataset: "pool2/backup_laptops" serve:
serve: type: tcp
type: tcp listen: "192.168.122.189:8888"
listen: "192.168.122.189:8888" clients: {
clients: { "10.23.42.23":"client1"
"10.23.42.23":"client1" }
} rpc:
rpc: rx_structured_max: 0x2342
rx_structured_max: 0x2342
`) `)
assert.Equal(t, 20*time.Second, conf.Jobs[0].Ret.(*PullJob).Replication.Connect.Ret.(*TCPConnect).RPC.Timeout) assert.Equal(t, 20*time.Second, conf.Jobs[0].Ret.(*PullJob).Connect.Ret.(*TCPConnect).RPC.Timeout)
assert.Equal(t, uint32(0xabcd), conf.Jobs[1].Ret.(*PullJob).Replication.Connect.Ret.(*TCPConnect).RPC.TxChunkSize) assert.Equal(t, uint32(0xabcd), conf.Jobs[1].Ret.(*PullJob).Connect.Ret.(*TCPConnect).RPC.TxChunkSize)
assert.Equal(t, uint32(0x2342), conf.Jobs[2].Ret.(*SinkJob).Replication.Serve.Ret.(*TCPServe).RPC.RxStructuredMaxLen) assert.Equal(t, uint32(0x2342), conf.Jobs[2].Ret.(*SinkJob).Serve.Ret.(*TCPServe).RPC.RxStructuredMaxLen)
defConf := RPCConfig{} defConf := RPCConfig{}
Default(&defConf) Default(&defConf)
assert.Equal(t, defConf.Timeout, conf.Global.RPC.Timeout) assert.Equal(t, defConf.Timeout, conf.Global.RPC.Timeout)

View File

@ -4,15 +4,14 @@ jobs:
type: local type: local
# snapshot the filesystems matched by the left-hand-side of the mapping # snapshot the filesystems matched by the left-hand-side of the mapping
# every 10m with zrepl_ as prefix # every 10m with zrepl_ as prefix
replication: filesystems: {
filesystems: { "pool1/var/db<": true,
"pool1/var/db<": true, "pool1/usr/home<": true,
"pool1/usr/home<": true, "pool1/usr/home/paranoid": false, #don't backup paranoid user
"pool1/usr/home/paranoid": false, #don't backup paranoid user "pool1/poudriere/ports<": false #don't backup the ports trees
"pool1/poudriere/ports<": false #don't backup the ports trees }
} # TODO FIXME enforce that the tree under root_dataset and the trees allowed (true) by filesystems are non-overlapping
# TODO FIXME enforce that the tree under root_dataset and the trees allowed (true) by filesystems are non-overlapping root_dataset: "pool2/backups/pool1"
root_dataset: "pool2/backups/pool1"
snapshotting: snapshotting:
snapshot_prefix: zrepl_ snapshot_prefix: zrepl_

View File

@ -1,16 +1,15 @@
jobs: jobs:
- name: pull_servers - name: pull_servers
type: pull type: pull
replication: connect:
connect: type: tls
type: tls address: "server1.foo.bar:8888"
address: "server1.foo.bar:8888" ca: "/certs/ca.crt"
ca: "/certs/ca.crt" cert: "/certs/cert.crt"
cert: "/certs/cert.crt" key: "/certs/key.pem"
key: "/certs/key.pem" server_cn: "server1"
server_cn: "server1" root_dataset: "pool2/backup_servers"
root_dataset: "pool2/backup_servers" interval: 10m
interval: 10m
pruning: pruning:
keep_sender: keep_sender:
- type: not_replicated - type: not_replicated

View File

@ -2,17 +2,16 @@ jobs:
- name: pull_servers - name: pull_servers
type: pull type: pull
replication: connect:
connect: type: ssh+stdinserver
type: ssh+stdinserver host: app-srv.example.com
host: app-srv.example.com user: root
user: root port: 22
port: 22 identity_file: /etc/zrepl/ssh/identity
identity_file: /etc/zrepl/ssh/identity options: # optional, default [], `-o` arguments passed to ssh
options: # optional, default [], `-o` arguments passed to ssh - "Compression=on"
- "Compression=on" root_dataset: "pool2/backup_servers"
root_dataset: "pool2/backup_servers" interval: 10m
interval: 10m
pruning: pruning:
keep_sender: keep_sender:
- type: not_replicated - type: not_replicated

View File

@ -1,14 +1,13 @@
jobs: jobs:
- type: push - type: push
name: "push" name: "push"
replication: filesystems: {
connect: "<": true,
type: tcp "tmp": false
address: "backup-server.foo.bar:8888" }
filesystems: { connect:
"<": true, type: tcp
"tmp": false address: "backup-server.foo.bar:8888"
}
snapshotting: snapshotting:
snapshot_prefix: zrepl_ snapshot_prefix: zrepl_
interval: 10m interval: 10m

View File

@ -1,15 +1,14 @@
jobs: jobs:
- type: sink - type: sink
name: "laptop_sink" name: "laptop_sink"
replication: root_dataset: "pool2/backup_laptops"
root_dataset: "pool2/backup_laptops" serve:
serve: type: tls
type: tls listen: "192.168.122.189:8888"
listen: "192.168.122.189:8888" ca: "ca.pem"
ca: "ca.pem" cert: "cert.pem"
cert: "cert.pem" key: "key.pem"
key: "key.pem" client_cn: "laptop1"
client_cn: "laptop1"
global: global:
logging: logging:
- type: "tcp" - type: "tcp"

View File

@ -1,17 +1,16 @@
jobs: jobs:
- name: pull_source - name: pull_source
type: source type: source
replication: serve:
serve: type: tcp
type: tcp listen: "0.0.0.0:8888"
listen: "0.0.0.0:8888" clients: {
clients: { "192.168.122.123" : "client1"
"192.168.122.123" : "client1"
}
filesystems: {
"<": true,
"secret": false
} }
filesystems: {
"<": true,
"secret": false
}
snapshotting: snapshotting:
snapshot_prefix: zrepl_ snapshot_prefix: zrepl_
interval: 10m interval: 10m

View File

@ -1,14 +1,13 @@
jobs: jobs:
- name: pull_source - name: pull_source
type: source type: source
replication: serve:
serve: type: stdinserver
type: stdinserver client_identity: "client1"
client_identity: "client1" filesystems: {
filesystems: { "<": true,
"<": true, "secret": false
"secret": false }
}
snapshotting: snapshotting:
snapshot_prefix: zrepl_ snapshot_prefix: zrepl_
interval: 10m interval: 10m

View File

@ -29,12 +29,12 @@ func PushFromConfig(g *config.Global, in *config.PushJob) (j *Push, err error) {
j = &Push{} j = &Push{}
j.name = in.Name j.name = in.Name
j.clientFactory, err = connecter.FromConfig(g, in.Replication.Connect) j.clientFactory, err = connecter.FromConfig(g, in.Connect)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "cannot build client") return nil, errors.Wrap(err, "cannot build client")
} }
if j.fsfilter, err = filters.DatasetMapFilterFromConfig(in.Replication.Filesystems); err != nil { if j.fsfilter, err = filters.DatasetMapFilterFromConfig(in.Filesystems); err != nil {
return nil, errors.Wrap(err, "cannnot build filesystem filter") return nil, errors.Wrap(err, "cannnot build filesystem filter")
} }

View File

@ -25,12 +25,12 @@ func SinkFromConfig(g *config.Global, in *config.SinkJob) (s *Sink, err error) {
// FIXME multi client support // FIXME multi client support
s = &Sink{name: in.Name} s = &Sink{name: in.Name}
if s.l, s.rpcConf, err = serve.FromConfig(g, in.Replication.Serve); err != nil { if s.l, s.rpcConf, err = serve.FromConfig(g, in.Serve); err != nil {
return nil, errors.Wrap(err, "cannot build server") return nil, errors.Wrap(err, "cannot build server")
} }
fsmap := filters.NewDatasetMapFilter(1, false) // FIXME multi-client support fsmap := filters.NewDatasetMapFilter(1, false) // FIXME multi-client support
if err := fsmap.Add("<", in.Replication.RootDataset); err != nil { if err := fsmap.Add("<", in.RootDataset); err != nil {
return nil, errors.Wrap(err, "unexpected error: cannot build filesystem mapping") return nil, errors.Wrap(err, "unexpected error: cannot build filesystem mapping")
} }
s.fsmap = fsmap s.fsmap = fsmap