2017-04-26 18:36:01 +02:00
|
|
|
package main
|
|
|
|
|
|
|
|
import (
|
2017-04-26 20:25:53 +02:00
|
|
|
"errors"
|
2017-04-29 19:07:47 +02:00
|
|
|
"fmt"
|
2017-04-29 20:10:09 +02:00
|
|
|
"github.com/jinzhu/copier"
|
2017-04-26 18:36:01 +02:00
|
|
|
"github.com/mitchellh/mapstructure"
|
2017-06-09 21:00:28 +02:00
|
|
|
"github.com/zrepl/zrepl/jobrun"
|
2017-04-29 20:10:09 +02:00
|
|
|
"github.com/zrepl/zrepl/rpc"
|
|
|
|
"github.com/zrepl/zrepl/sshbytestream"
|
2017-05-12 21:11:09 +02:00
|
|
|
. "github.com/zrepl/zrepl/util"
|
2017-04-26 18:36:01 +02:00
|
|
|
"github.com/zrepl/zrepl/zfs"
|
2017-04-26 20:25:53 +02:00
|
|
|
yaml "gopkg.in/yaml.v2"
|
2017-04-29 20:10:09 +02:00
|
|
|
"io"
|
2017-04-26 20:25:53 +02:00
|
|
|
"io/ioutil"
|
2017-06-22 19:04:48 +02:00
|
|
|
"regexp"
|
|
|
|
"strconv"
|
2017-04-26 18:36:01 +02:00
|
|
|
"strings"
|
2017-06-09 21:00:28 +02:00
|
|
|
"time"
|
2017-04-26 18:36:01 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
type Pool struct {
|
2017-04-29 20:10:09 +02:00
|
|
|
Name string
|
|
|
|
Transport Transport
|
2017-04-26 18:36:01 +02:00
|
|
|
}
|
2017-04-29 20:10:09 +02:00
|
|
|
|
|
|
|
type Transport interface {
|
2017-05-16 16:57:24 +02:00
|
|
|
Connect(rpcLog Logger) (rpc.RPCRequester, error)
|
2017-04-29 20:10:09 +02:00
|
|
|
}
|
|
|
|
type LocalTransport struct {
|
2017-04-30 16:11:33 +02:00
|
|
|
Handler rpc.RPCHandler
|
2017-04-29 20:10:09 +02:00
|
|
|
}
|
|
|
|
type SSHTransport struct {
|
|
|
|
Host string
|
|
|
|
User string
|
|
|
|
Port uint16
|
2017-04-30 16:11:33 +02:00
|
|
|
IdentityFile string `mapstructure:"identity_file"`
|
|
|
|
TransportOpenCommand []string `mapstructure:"transport_open_command"`
|
|
|
|
SSHCommand string `mapstructure:"ssh_command"`
|
2017-04-29 20:10:09 +02:00
|
|
|
Options []string
|
2017-05-12 21:11:09 +02:00
|
|
|
ConnLogReadFile string `mapstructure:"connlog_read_file"`
|
|
|
|
ConnLogWriteFile string `mapstructure:"connlog_write_file"`
|
2017-04-29 20:10:09 +02:00
|
|
|
}
|
|
|
|
|
2017-04-26 18:36:01 +02:00
|
|
|
type Push struct {
|
2017-05-07 12:00:34 +02:00
|
|
|
To *Pool
|
2017-05-16 16:57:24 +02:00
|
|
|
Filter zfs.DatasetMapping
|
2017-05-20 17:08:18 +02:00
|
|
|
InitialReplPolicy rpc.InitialReplPolicy
|
2017-06-09 21:00:28 +02:00
|
|
|
RepeatStrategy jobrun.RepeatStrategy
|
2017-04-26 18:36:01 +02:00
|
|
|
}
|
|
|
|
type Pull struct {
|
2017-05-07 12:00:34 +02:00
|
|
|
From *Pool
|
|
|
|
Mapping zfs.DatasetMapping
|
2017-05-20 17:08:18 +02:00
|
|
|
InitialReplPolicy rpc.InitialReplPolicy
|
2017-06-09 21:00:28 +02:00
|
|
|
RepeatStrategy jobrun.RepeatStrategy
|
2017-04-26 18:36:01 +02:00
|
|
|
}
|
2017-06-09 21:00:28 +02:00
|
|
|
|
2017-04-30 17:55:11 +02:00
|
|
|
type ClientMapping struct {
|
2017-04-26 20:25:53 +02:00
|
|
|
From string
|
|
|
|
Mapping zfs.DatasetMapping
|
2017-04-26 18:36:01 +02:00
|
|
|
}
|
|
|
|
|
2017-06-22 19:04:48 +02:00
|
|
|
type Prune struct {
|
|
|
|
Name string
|
|
|
|
DatasetFilter zfs.DatasetMapping
|
|
|
|
SnapshotFilter zfs.FilesystemVersionFilter
|
|
|
|
RetentionPolicy *RetentionGrid // TODO abstract interface to support future policies?
|
|
|
|
}
|
|
|
|
|
2017-04-26 18:36:01 +02:00
|
|
|
type Config struct {
|
2017-04-30 17:55:11 +02:00
|
|
|
Pools []Pool
|
|
|
|
Pushs []Push
|
|
|
|
Pulls []Pull
|
|
|
|
Sinks []ClientMapping
|
|
|
|
PullACLs []ClientMapping
|
2017-06-22 19:04:48 +02:00
|
|
|
Prunes []Prune
|
2017-04-26 18:36:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
func ParseConfig(path string) (config Config, err error) {
|
|
|
|
|
|
|
|
c := make(map[string]interface{}, 0)
|
|
|
|
|
|
|
|
var bytes []byte
|
|
|
|
|
|
|
|
if bytes, err = ioutil.ReadFile(path); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = yaml.Unmarshal(bytes, &c); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
return parseMain(c)
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseMain(root map[string]interface{}) (c Config, err error) {
|
|
|
|
if c.Pools, err = parsePools(root["pools"]); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2017-04-29 19:07:47 +02:00
|
|
|
|
|
|
|
poolLookup := func(name string) (*Pool, error) {
|
|
|
|
for _, pool := range c.Pools {
|
|
|
|
if pool.Name == name {
|
|
|
|
return &pool, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return nil, errors.New(fmt.Sprintf("pool '%s' not defined", name))
|
|
|
|
}
|
|
|
|
|
|
|
|
if c.Pushs, err = parsePushs(root["pushs"], poolLookup); err != nil {
|
2017-04-26 18:36:01 +02:00
|
|
|
return
|
|
|
|
}
|
2017-04-29 19:07:47 +02:00
|
|
|
if c.Pulls, err = parsePulls(root["pulls"], poolLookup); err != nil {
|
2017-04-26 18:36:01 +02:00
|
|
|
return
|
|
|
|
}
|
2017-04-30 17:55:11 +02:00
|
|
|
if c.Sinks, err = parseClientMappings(root["sinks"]); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if c.PullACLs, err = parseClientMappings(root["pull_acls"]); err != nil {
|
2017-04-26 18:36:01 +02:00
|
|
|
return
|
|
|
|
}
|
2017-06-22 19:04:48 +02:00
|
|
|
if c.Prunes, err = parsePrunes(root["prune"]); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2017-04-26 18:36:01 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-04-29 20:10:09 +02:00
|
|
|
func parsePools(v interface{}) (pools []Pool, err error) {
|
|
|
|
|
|
|
|
asList := make([]struct {
|
|
|
|
Name string
|
|
|
|
Transport map[string]interface{}
|
|
|
|
}, 0)
|
|
|
|
if err = mapstructure.Decode(v, &asList); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
pools = make([]Pool, len(asList))
|
|
|
|
for i, p := range asList {
|
2017-04-30 17:55:11 +02:00
|
|
|
|
2017-05-20 17:08:18 +02:00
|
|
|
if p.Name == rpc.LOCAL_TRANSPORT_IDENTITY {
|
|
|
|
err = errors.New(fmt.Sprintf("pool name '%s' reserved for local pulls", rpc.LOCAL_TRANSPORT_IDENTITY))
|
2017-04-30 17:55:11 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-04-29 20:10:09 +02:00
|
|
|
var transport Transport
|
|
|
|
if transport, err = parseTransport(p.Transport); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
pools[i] = Pool{
|
|
|
|
Name: p.Name,
|
|
|
|
Transport: transport,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-04-26 18:36:01 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-04-29 20:10:09 +02:00
|
|
|
func parseTransport(it map[string]interface{}) (t Transport, err error) {
|
|
|
|
|
|
|
|
if len(it) != 1 {
|
|
|
|
err = errors.New("ambiguous transport type")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
for key, val := range it {
|
|
|
|
switch key {
|
|
|
|
case "ssh":
|
|
|
|
t := SSHTransport{}
|
|
|
|
if err = mapstructure.Decode(val, &t); err != nil {
|
|
|
|
err = errors.New(fmt.Sprintf("could not parse ssh transport: %s", err))
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return t, nil
|
|
|
|
default:
|
|
|
|
return nil, errors.New(fmt.Sprintf("unknown transport type '%s'\n", key))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return // unreachable
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-04-29 19:07:47 +02:00
|
|
|
type poolLookup func(name string) (*Pool, error)
|
|
|
|
|
|
|
|
func parsePushs(v interface{}, pl poolLookup) (p []Push, err error) {
|
2017-04-26 18:36:01 +02:00
|
|
|
|
2017-04-26 20:25:53 +02:00
|
|
|
asList := make([]struct {
|
2017-05-07 12:00:34 +02:00
|
|
|
To string
|
2017-05-16 16:57:24 +02:00
|
|
|
Filter map[string]string
|
2017-05-07 12:00:34 +02:00
|
|
|
InitialReplPolicy string
|
2017-06-09 21:00:28 +02:00
|
|
|
Repeat map[string]string
|
2017-04-26 18:36:01 +02:00
|
|
|
}, 0)
|
|
|
|
|
|
|
|
if err = mapstructure.Decode(v, &asList); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
p = make([]Push, len(asList))
|
|
|
|
|
2017-04-29 18:26:43 +02:00
|
|
|
for i, e := range asList {
|
2017-04-29 19:07:47 +02:00
|
|
|
|
|
|
|
var toPool *Pool
|
|
|
|
if toPool, err = pl(e.To); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2017-04-26 18:36:01 +02:00
|
|
|
push := Push{
|
2017-05-16 16:57:24 +02:00
|
|
|
To: toPool,
|
2017-04-26 18:36:01 +02:00
|
|
|
}
|
|
|
|
|
2017-05-16 16:57:24 +02:00
|
|
|
if push.Filter, err = parseComboMapping(e.Filter); err != nil {
|
|
|
|
return
|
2017-04-26 18:36:01 +02:00
|
|
|
}
|
|
|
|
|
2017-05-20 17:08:18 +02:00
|
|
|
if push.InitialReplPolicy, err = parseInitialReplPolicy(e.InitialReplPolicy, rpc.DEFAULT_INITIAL_REPL_POLICY); err != nil {
|
2017-05-07 12:00:34 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-06-09 21:00:28 +02:00
|
|
|
if push.RepeatStrategy, err = parseRepeatStrategy(e.Repeat); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-04-29 18:26:43 +02:00
|
|
|
p[i] = push
|
2017-04-26 18:36:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-04-29 19:07:47 +02:00
|
|
|
func parsePulls(v interface{}, pl poolLookup) (p []Pull, err error) {
|
2017-04-26 18:36:01 +02:00
|
|
|
|
2017-04-26 20:25:53 +02:00
|
|
|
asList := make([]struct {
|
2017-05-07 12:00:34 +02:00
|
|
|
From string
|
|
|
|
Mapping map[string]string
|
|
|
|
InitialReplPolicy string
|
2017-06-09 21:00:28 +02:00
|
|
|
Repeat map[string]string
|
2017-04-26 18:36:01 +02:00
|
|
|
}, 0)
|
|
|
|
|
|
|
|
if err = mapstructure.Decode(v, &asList); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
p = make([]Pull, len(asList))
|
|
|
|
|
2017-04-29 18:26:43 +02:00
|
|
|
for i, e := range asList {
|
2017-04-29 19:07:47 +02:00
|
|
|
|
|
|
|
var fromPool *Pool
|
2017-04-30 16:11:33 +02:00
|
|
|
|
2017-05-20 17:08:18 +02:00
|
|
|
if e.From == rpc.LOCAL_TRANSPORT_IDENTITY {
|
2017-04-30 16:11:33 +02:00
|
|
|
fromPool = &Pool{
|
|
|
|
Name: "local",
|
|
|
|
Transport: LocalTransport{},
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if fromPool, err = pl(e.From); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2017-04-29 19:07:47 +02:00
|
|
|
}
|
2017-04-30 17:56:11 +02:00
|
|
|
|
2017-04-26 18:36:01 +02:00
|
|
|
pull := Pull{
|
2017-04-29 19:07:47 +02:00
|
|
|
From: fromPool,
|
2017-04-26 18:36:01 +02:00
|
|
|
}
|
|
|
|
if pull.Mapping, err = parseComboMapping(e.Mapping); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2017-05-20 17:08:18 +02:00
|
|
|
if pull.InitialReplPolicy, err = parseInitialReplPolicy(e.InitialReplPolicy, rpc.DEFAULT_INITIAL_REPL_POLICY); err != nil {
|
2017-05-07 12:00:34 +02:00
|
|
|
return
|
|
|
|
}
|
2017-06-09 21:00:28 +02:00
|
|
|
if pull.RepeatStrategy, err = parseRepeatStrategy(e.Repeat); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2017-05-07 12:00:34 +02:00
|
|
|
|
2017-04-29 18:26:43 +02:00
|
|
|
p[i] = pull
|
2017-04-26 18:36:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-05-20 17:08:18 +02:00
|
|
|
func parseInitialReplPolicy(v interface{}, defaultPolicy rpc.InitialReplPolicy) (p rpc.InitialReplPolicy, err error) {
|
2017-05-07 12:00:34 +02:00
|
|
|
s, ok := v.(string)
|
|
|
|
if !ok {
|
|
|
|
goto err
|
|
|
|
}
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case s == "":
|
|
|
|
p = defaultPolicy
|
|
|
|
case s == "most_recent":
|
2017-05-20 17:08:18 +02:00
|
|
|
p = rpc.InitialReplPolicyMostRecent
|
2017-05-07 12:00:34 +02:00
|
|
|
case s == "all":
|
2017-05-20 17:08:18 +02:00
|
|
|
p = rpc.InitialReplPolicyAll
|
2017-05-07 12:00:34 +02:00
|
|
|
default:
|
|
|
|
goto err
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
err:
|
|
|
|
err = errors.New(fmt.Sprintf("expected InitialReplPolicy, got %#v", v))
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-06-09 21:00:28 +02:00
|
|
|
func parseRepeatStrategy(r map[string]string) (s jobrun.RepeatStrategy, err error) {
|
|
|
|
|
|
|
|
if r == nil {
|
|
|
|
return jobrun.NoRepeatStrategy{}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
if repeatStr, ok := r["interval"]; ok {
|
|
|
|
d, err := time.ParseDuration(repeatStr)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
s = &jobrun.PeriodicRepeatStrategy{d}
|
|
|
|
return s, err
|
|
|
|
} else {
|
|
|
|
return nil, fmt.Errorf("attribute 'interval' not found but required in repeat specification")
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-04-30 17:55:11 +02:00
|
|
|
func expectList(v interface{}) (asList []interface{}, err error) {
|
2017-04-26 18:36:01 +02:00
|
|
|
var ok bool
|
2017-04-26 20:25:53 +02:00
|
|
|
if asList, ok = v.([]interface{}); !ok {
|
2017-04-30 17:55:11 +02:00
|
|
|
err = errors.New("expected list")
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseClientMappings(v interface{}) (cm []ClientMapping, err error) {
|
|
|
|
|
|
|
|
var asList []interface{}
|
|
|
|
if asList, err = expectList(v); err != nil {
|
|
|
|
return
|
2017-04-26 18:36:01 +02:00
|
|
|
}
|
|
|
|
|
2017-04-30 17:55:11 +02:00
|
|
|
cm = make([]ClientMapping, len(asList))
|
2017-04-26 18:36:01 +02:00
|
|
|
|
2017-04-30 17:55:11 +02:00
|
|
|
for i, e := range asList {
|
|
|
|
var m ClientMapping
|
|
|
|
if m, err = parseClientMapping(e); err != nil {
|
2017-04-26 18:36:01 +02:00
|
|
|
return
|
|
|
|
}
|
2017-04-30 17:55:11 +02:00
|
|
|
cm[i] = m
|
2017-04-26 18:36:01 +02:00
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-04-30 17:55:11 +02:00
|
|
|
func parseClientMapping(v interface{}) (s ClientMapping, err error) {
|
2017-04-26 18:36:01 +02:00
|
|
|
t := struct {
|
2017-04-26 20:25:53 +02:00
|
|
|
From string
|
2017-04-26 18:36:01 +02:00
|
|
|
Mapping map[string]string
|
|
|
|
}{}
|
|
|
|
if err = mapstructure.Decode(v, &t); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
s.From = t.From
|
|
|
|
s.Mapping, err = parseComboMapping(t.Mapping)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseComboMapping(m map[string]string) (c zfs.ComboMapping, err error) {
|
|
|
|
|
2017-04-30 17:56:11 +02:00
|
|
|
c.Mappings = make([]zfs.DatasetMapping, 0, len(m))
|
2017-04-26 18:36:01 +02:00
|
|
|
|
2017-04-26 20:25:53 +02:00
|
|
|
for lhs, rhs := range m {
|
2017-04-26 18:36:01 +02:00
|
|
|
|
2017-05-01 20:07:57 +02:00
|
|
|
if lhs == "*" && strings.HasPrefix(rhs, "!") {
|
2017-04-26 18:36:01 +02:00
|
|
|
|
|
|
|
m := zfs.ExecMapping{}
|
|
|
|
fields := strings.Fields(strings.TrimPrefix(rhs, "!"))
|
|
|
|
if len(fields) < 1 {
|
|
|
|
err = errors.New("ExecMapping without acceptor path")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
m.Name = fields[0]
|
|
|
|
m.Args = fields[1:]
|
|
|
|
|
|
|
|
c.Mappings = append(c.Mappings, m)
|
|
|
|
|
|
|
|
} else if strings.HasSuffix(lhs, "*") {
|
|
|
|
|
|
|
|
m := zfs.GlobMapping{}
|
|
|
|
|
|
|
|
m.PrefixPath, err = zfs.NewDatasetPath(strings.TrimSuffix(lhs, "*"))
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if m.TargetRoot, err = zfs.NewDatasetPath(rhs); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Mappings = append(c.Mappings, m)
|
|
|
|
|
2017-05-01 20:07:57 +02:00
|
|
|
} else {
|
|
|
|
|
|
|
|
m := zfs.DirectMapping{}
|
|
|
|
|
|
|
|
if lhs == "|" {
|
|
|
|
m.Source = nil
|
|
|
|
} else {
|
|
|
|
if m.Source, err = zfs.NewDatasetPath(lhs); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if m.Target, err = zfs.NewDatasetPath(rhs); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
c.Mappings = append(c.Mappings, m)
|
|
|
|
|
2017-04-26 18:36:01 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
}
|
2017-04-29 20:10:09 +02:00
|
|
|
|
2017-05-16 16:57:24 +02:00
|
|
|
func (t SSHTransport) Connect(rpcLog Logger) (r rpc.RPCRequester, err error) {
|
2017-04-29 20:10:09 +02:00
|
|
|
var stream io.ReadWriteCloser
|
|
|
|
var rpcTransport sshbytestream.SSHTransport
|
2017-04-30 16:11:33 +02:00
|
|
|
if err = copier.Copy(&rpcTransport, t); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2017-04-29 20:10:09 +02:00
|
|
|
if stream, err = sshbytestream.Outgoing(rpcTransport); err != nil {
|
|
|
|
return
|
|
|
|
}
|
2017-05-12 21:11:09 +02:00
|
|
|
stream, err = NewReadWriteCloserLogger(stream, t.ConnLogReadFile, t.ConnLogWriteFile)
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
2017-05-16 16:57:24 +02:00
|
|
|
return rpc.ConnectByteStreamRPC(stream, rpcLog)
|
2017-04-29 20:10:09 +02:00
|
|
|
}
|
|
|
|
|
2017-05-16 16:57:24 +02:00
|
|
|
func (t LocalTransport) Connect(rpcLog Logger) (r rpc.RPCRequester, err error) {
|
2017-04-30 16:11:33 +02:00
|
|
|
if t.Handler == nil {
|
|
|
|
panic("local transport with uninitialized handler")
|
|
|
|
}
|
|
|
|
return rpc.ConnectLocalRPC(t.Handler), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *LocalTransport) SetHandler(handler rpc.RPCHandler) {
|
|
|
|
t.Handler = handler
|
2017-04-29 20:10:09 +02:00
|
|
|
}
|
2017-06-22 19:04:48 +02:00
|
|
|
|
|
|
|
func parsePrunes(m interface{}) (rets []Prune, err error) {
|
|
|
|
|
|
|
|
asList := make([]map[string]interface{}, 0)
|
|
|
|
if err = mapstructure.Decode(m, &asList); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
rets = make([]Prune, len(asList))
|
|
|
|
|
|
|
|
for i, e := range asList {
|
|
|
|
if rets[i], err = parsePrune(e); err != nil {
|
|
|
|
err = fmt.Errorf("cannot parse prune job #%d: %s", i+1, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
func parsePrune(e map[string]interface{}) (prune Prune, err error) {
|
|
|
|
// Only support grid policy for now
|
|
|
|
policyName, ok := e["policy"]
|
|
|
|
if !ok || policyName != "grid" {
|
|
|
|
err = fmt.Errorf("prune job with unimplemented policy '%s'", policyName)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var i struct {
|
|
|
|
Name string
|
|
|
|
Grid string
|
|
|
|
DatasetFilter map[string]string `mapstructure:"dataset_filter"`
|
|
|
|
SnapshotFilter map[string]string `mapstructure:"snapshot_filter"`
|
|
|
|
}
|
|
|
|
|
|
|
|
if err = mapstructure.Decode(e, &i); err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
prune.Name = i.Name
|
|
|
|
|
|
|
|
// Parse grid policy
|
|
|
|
intervals, err := parseRetentionGridIntervalsString(i.Grid)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("cannot parse retention grid: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Assert intervals are of increasing length (not necessarily required, but indicates config mistake)
|
|
|
|
lastDuration := time.Duration(0)
|
|
|
|
for i := range intervals {
|
|
|
|
if intervals[i].Length < lastDuration {
|
|
|
|
err = fmt.Errorf("retention grid interval length must be monotonically increasing:"+
|
|
|
|
"interval %d is shorter than %d", i+1, i)
|
|
|
|
return
|
|
|
|
} else {
|
|
|
|
lastDuration = intervals[i].Length
|
|
|
|
}
|
|
|
|
}
|
|
|
|
prune.RetentionPolicy = NewRetentionGrid(intervals)
|
|
|
|
|
|
|
|
// Parse filters
|
|
|
|
if prune.DatasetFilter, err = parseComboMapping(i.DatasetFilter); err != nil {
|
|
|
|
err = fmt.Errorf("cannot parse dataset filter: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if prune.SnapshotFilter, err = parseSnapshotFilter(i.SnapshotFilter); err != nil {
|
|
|
|
err = fmt.Errorf("cannot parse snapshot filter: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var retentionStringIntervalRegex *regexp.Regexp = regexp.MustCompile(`^\s*(\d+)\s*x\s*(\d+)\s*(s|min|h|d|w|mon)\s*\s*(\((.*)\))?\s*$`)
|
|
|
|
|
|
|
|
func parseRetentionGridIntervalString(e string) (intervals []RetentionInterval, err error) {
|
|
|
|
|
|
|
|
comps := retentionStringIntervalRegex.FindStringSubmatch(e)
|
|
|
|
if comps == nil {
|
|
|
|
err = fmt.Errorf("retention string does not match expected format")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
times, err := strconv.Atoi(comps[1])
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
} else if times <= 0 {
|
|
|
|
return nil, fmt.Errorf("contains factor <= 0")
|
|
|
|
}
|
|
|
|
|
|
|
|
durationFactor, err := strconv.ParseInt(comps[2], 10, 64)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
var durationUnit time.Duration
|
|
|
|
switch comps[3] {
|
|
|
|
case "s":
|
|
|
|
durationUnit = time.Second
|
|
|
|
case "min":
|
|
|
|
durationUnit = time.Minute
|
|
|
|
case "h":
|
|
|
|
durationUnit = time.Hour
|
|
|
|
case "d":
|
|
|
|
durationUnit = 24 * time.Hour
|
|
|
|
case "w":
|
|
|
|
durationUnit = 24 * 7 * time.Hour
|
|
|
|
case "mon":
|
|
|
|
durationUnit = 32 * 24 * 7 * time.Hour
|
|
|
|
default:
|
|
|
|
err = fmt.Errorf("contains unknown time unit '%s'", comps[3])
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
keepCount := 1
|
|
|
|
if comps[4] != "" {
|
|
|
|
// Decompose key=value, comma separated
|
|
|
|
// For now, only keep_count is supported
|
|
|
|
re := regexp.MustCompile(`^\s*keep=(.+)\s*$`)
|
|
|
|
res := re.FindStringSubmatch(comps[5])
|
|
|
|
if res == nil || len(res) != 2 {
|
|
|
|
err = fmt.Errorf("interval parameter contains unknown parameters")
|
|
|
|
}
|
|
|
|
if res[1] == "all" {
|
|
|
|
keepCount = RetentionGridKeepCountAll
|
|
|
|
} else {
|
|
|
|
keepCount, err = strconv.Atoi(res[1])
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("cannot parse keep_count value")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
intervals = make([]RetentionInterval, times)
|
|
|
|
for i := range intervals {
|
|
|
|
intervals[i] = RetentionInterval{
|
|
|
|
Length: time.Duration(durationFactor) * durationUnit, // TODO is this conversion fututre-proof?
|
|
|
|
KeepCount: keepCount,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseRetentionGridIntervalsString(s string) (intervals []RetentionInterval, err error) {
|
|
|
|
|
|
|
|
ges := strings.Split(s, "|")
|
|
|
|
intervals = make([]RetentionInterval, 0, 7*len(ges))
|
|
|
|
|
|
|
|
for intervalIdx, e := range ges {
|
|
|
|
parsed, err := parseRetentionGridIntervalString(e)
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("cannot parse interval %d of %d: %s: %s", intervalIdx+1, len(ges), err, strings.TrimSpace(e))
|
|
|
|
}
|
|
|
|
intervals = append(intervals, parsed...)
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
type prefixSnapshotFilter struct {
|
|
|
|
prefix string
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f prefixSnapshotFilter) Filter(fsv zfs.FilesystemVersion) (accept bool, err error) {
|
|
|
|
return fsv.Type == zfs.Snapshot && strings.HasPrefix(fsv.Name, f.prefix), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func parseSnapshotFilter(fm map[string]string) (snapFilter zfs.FilesystemVersionFilter, err error) {
|
|
|
|
prefix, ok := fm["prefix"]
|
|
|
|
if !ok {
|
|
|
|
err = fmt.Errorf("unsupported snapshot filter")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
snapFilter = prefixSnapshotFilter{prefix}
|
|
|
|
return
|
|
|
|
}
|