Implement config parser.

This commit is contained in:
Christian Schwarz 2017-04-26 18:36:01 +02:00
parent 40f3b530e1
commit 00231ecb73
4 changed files with 305 additions and 0 deletions

224
cmd/config.go Normal file
View File

@ -0,0 +1,224 @@
package main
import (
yaml "gopkg.in/yaml.v2"
"io/ioutil"
"github.com/mitchellh/mapstructure"
"github.com/zrepl/zrepl/zfs"
"errors"
"strings"
)
type Pool struct {
Name string
Url string
}
type Push struct {
To string
Datasets []zfs.DatasetPath
}
type Pull struct {
From string
Mapping zfs.DatasetMapping
}
type Sink struct {
From string
Mapping zfs.DatasetMapping
}
type Config struct {
Pools []Pool
Pushs []Push
Pulls []Pull
Sinks []Sink
}
func ParseConfig(path string) (config Config, err error) {
c := make(map[string]interface{}, 0)
var bytes []byte
if bytes, err = ioutil.ReadFile(path); err != nil {
return
}
if err = yaml.Unmarshal(bytes, &c); err != nil {
return
}
return parseMain(c)
}
func parseMain(root map[string]interface{}) (c Config, err error) {
if c.Pools, err = parsePools(root["pools"]); err != nil {
return
}
if c.Pushs, err = parsePushs(root["pushs"]); err != nil {
return
}
if c.Pulls, err = parsePulls(root["pulls"]); err != nil {
return
}
if c.Sinks, err = parseSinks(root["sinks"]); err != nil {
return
}
return
}
func parsePools(v interface{}) (p []Pool, err error) {
p = make([]Pool, 0)
err = mapstructure.Decode(v, &p)
return
}
func parsePushs(v interface{}) (p []Push, err error) {
asList := make([]struct{
To string
Datasets []string
}, 0)
if err = mapstructure.Decode(v, &asList); err != nil {
return
}
p = make([]Push, len(asList))
for _, e := range asList {
push := Push{
To: e.To,
Datasets: make([]zfs.DatasetPath, len(e.Datasets)),
}
for i, ds := range e.Datasets {
if push.Datasets[i], err = zfs.NewDatasetPath(ds); err != nil {
return
}
}
p = append(p, push)
}
return
}
func parsePulls(v interface{}) (p []Pull, err error) {
asList := make([]struct{
From string
Mapping map[string]string
}, 0)
if err = mapstructure.Decode(v, &asList); err != nil {
return
}
p = make([]Pull, len(asList))
for _, e := range asList {
pull := Pull{
From: e.From,
}
if pull.Mapping, err = parseComboMapping(e.Mapping); err != nil {
return
}
p = append(p, pull)
}
return
}
func parseSinks(v interface{}) (s []Sink, err error) {
var asList []interface{}
var ok bool
if asList, ok = v.([]interface{}); !ok {
return nil, errors.New("expected list")
}
s = make([]Sink, len(asList))
for _, i := range asList {
var sink Sink
if sink, err = parseSink(i); err != nil {
return
}
s = append(s, sink)
}
return
}
func parseSink(v interface{}) (s Sink, err error) {
t := struct {
From string
Mapping map[string]string
}{}
if err = mapstructure.Decode(v, &t); err != nil {
return
}
s.From = t.From
s.Mapping, err = parseComboMapping(t.Mapping)
return
}
func parseComboMapping(m map[string]string) (c zfs.ComboMapping, err error) {
c.Mappings = make([]zfs.DatasetMapping, len(m))
for lhs,rhs := range m {
if lhs[0] == '|' {
if len(m) != 1 {
err = errors.New("non-recursive mapping must be the only mapping for a sink")
}
m := zfs.DirectMapping{
Source: nil,
}
if m.Target, err = zfs.NewDatasetPath(rhs); err != nil {
return
}
c.Mappings = append(c.Mappings, m)
} else if lhs[0] == '*' {
m := zfs.ExecMapping{}
fields := strings.Fields(strings.TrimPrefix(rhs, "!"))
if len(fields) < 1 {
err = errors.New("ExecMapping without acceptor path")
return
}
m.Name = fields[0]
m.Args = fields[1:]
c.Mappings = append(c.Mappings, m)
} else if strings.HasSuffix(lhs, "*") {
m := zfs.GlobMapping{}
m.PrefixPath, err = zfs.NewDatasetPath(strings.TrimSuffix(lhs, "*"))
if err != nil {
return
}
if m.TargetRoot, err = zfs.NewDatasetPath(rhs); err != nil {
return
}
c.Mappings = append(c.Mappings, m)
}
}
return
}

71
cmd/sampleconf/zrepl.yml Normal file
View File

@ -0,0 +1,71 @@
pools:
- name: offsite_backups
url: ssh://db2@backups1/db2/
- name:
- name: local_mirror
url: local://mirrorpool/mirrors/tank
pushs:
- to: offsite_backups
datasets:
- tank/var/db
- tank/usr/home
- to: local_mirror
datasets:
- tank
pulls:
- from: offsite
mapping: {
# like in sinks
}
sinks:
# direct mapping
# 1:1 mapping of remote dataset to local dataset
# We will reject a push request which contains > 0 datasets that do not
# match a mapping
- from: db1
mapping: {
"ssdpool/var/db/postgresql9.6":"zroot/backups/db1/pg_data"
}
# "|" non-recursive wildcard
# the remote must present excatly one dataset, mapped to the rhs
- from: cdn_master
mapping: {
"|":"tank/srv/cdn" # NOTE: | is currently an invalid character for a ZFS dataset
}
# "*" recursive wildcard
# the remote may present an arbitrary set of marks a recursive wildcard, i.e. map all remotes to a tree under rhs
- from: mirror1
mapping: {
"tank/foo/bar*":"zroot/backups/mirror1" # NOTE: * is currently an invalid character for a ZFS dataset
}
# "*":"!..." acceptor script
# shell out to an accceptor that receives the remote's offered datasets
# on stdin and, foreach line of this input, returns the corresponding
# local dataset (same order) or '!<space>optional reason' on stdout
# If the acceptor scripts exits with non-zero status code, the remote's
# request will be rejected
- from: complex_host
mapping: { #
"*":"!/path/to/acceptor" # we could just wire the path to the acceptor directly to the mapping
# but let's stick with the same type for the mapping field for now'
# NOTE: * and ! are currently invalid characters for a ZFS dataset
}
# Mixing the rules
# Mixing should be possible if there is a defined precedence (direct before *)
# and non-recursive wildcards are not allowed in multi-entry mapping objects
- from: special_snowflake
mapping: { # an explicit mapping mixed with a recursive wildcard
"sun/usr/home": backups/special_snowflake/homedirs,
"sun/var/db": backups/special_snowflake/database,
"*": backups/special_snowflake/remainingbackup
# NOTE: ^ alignment, should be possible, looks nicer
}

View File

@ -64,6 +64,11 @@ type DirectMapping struct {
} }
func (m DirectMapping) Map(source DatasetPath) (target DatasetPath, err error) { func (m DirectMapping) Map(source DatasetPath) (target DatasetPath, err error) {
if m.Source == nil {
return m.Target, nil
}
if len(m.Source) != len(source) { if len(m.Source) != len(source) {
return nil, NoMatchError return nil, NoMatchError
} }

View File

@ -35,6 +35,11 @@ func (p DatasetPath) ToString() string {
return strings.Join(p, "/") return strings.Join(p, "/")
} }
func NewDatasetPath(s string) (p DatasetPath, err error) {
// TODO validation
return toDatasetPath(s), nil
}
func toDatasetPath(s string) DatasetPath { func toDatasetPath(s string) DatasetPath {
return strings.Split(s, "/") return strings.Split(s, "/")
} }