2017-08-05 21:15:37 +02:00
|
|
|
package cmd
|
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
|
|
|
"strings"
|
|
|
|
|
2017-09-10 16:13:05 +02:00
|
|
|
"github.com/mitchellh/mapstructure"
|
2017-09-13 22:55:10 +02:00
|
|
|
"github.com/pkg/errors"
|
|
|
|
"github.com/zrepl/zrepl/zfs"
|
2017-08-05 21:15:37 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
type DatasetMapFilter struct {
|
|
|
|
entries []datasetMapFilterEntry
|
|
|
|
|
|
|
|
// if set, only valid filter entries can be added using Add()
|
|
|
|
// and Map() will always return an error
|
2017-09-16 19:42:42 +02:00
|
|
|
filterMode bool
|
2017-08-05 21:15:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
type datasetMapFilterEntry struct {
|
2017-08-06 13:04:29 +02:00
|
|
|
path *zfs.DatasetPath
|
2017-08-05 21:15:37 +02:00
|
|
|
// the mapping. since this datastructure acts as both mapping and filter
|
|
|
|
// we have to convert it to the desired rep dynamically
|
|
|
|
mapping string
|
|
|
|
subtreeMatch bool
|
|
|
|
}
|
|
|
|
|
2017-09-16 19:42:42 +02:00
|
|
|
func NewDatasetMapFilter(capacity int, filterMode bool) *DatasetMapFilter {
|
2017-09-10 16:13:05 +02:00
|
|
|
return &DatasetMapFilter{
|
2017-09-02 12:22:34 +02:00
|
|
|
entries: make([]datasetMapFilterEntry, 0, capacity),
|
2017-09-16 19:42:42 +02:00
|
|
|
filterMode: filterMode,
|
2017-08-05 21:15:37 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (m *DatasetMapFilter) Add(pathPattern, mapping string) (err error) {
|
|
|
|
|
2017-09-16 19:42:42 +02:00
|
|
|
if m.filterMode {
|
|
|
|
if _, err = m.parseDatasetFilterResult(mapping); err != nil {
|
2017-08-05 21:15:37 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// assert path glob adheres to spec
|
|
|
|
const SUBTREE_PATTERN string = "<"
|
|
|
|
patternCount := strings.Count(pathPattern, SUBTREE_PATTERN)
|
|
|
|
switch {
|
|
|
|
case patternCount > 1:
|
|
|
|
case patternCount == 1 && !strings.HasSuffix(pathPattern, SUBTREE_PATTERN):
|
|
|
|
err = fmt.Errorf("pattern invalid: only one '<' at end of string allowed")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
pathStr := strings.TrimSuffix(pathPattern, SUBTREE_PATTERN)
|
2017-08-06 13:04:29 +02:00
|
|
|
path, err := zfs.NewDatasetPath(pathStr)
|
2017-08-05 21:15:37 +02:00
|
|
|
if err != nil {
|
2017-08-06 13:04:29 +02:00
|
|
|
return fmt.Errorf("pattern is not a dataset path: %s", err)
|
2017-08-05 21:15:37 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
entry := datasetMapFilterEntry{
|
|
|
|
path: path,
|
|
|
|
mapping: mapping,
|
|
|
|
subtreeMatch: patternCount > 0,
|
|
|
|
}
|
|
|
|
m.entries = append(m.entries, entry)
|
|
|
|
return
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// find the most specific prefix mapping we have
|
|
|
|
//
|
|
|
|
// longer prefix wins over shorter prefix, direct wins over glob
|
2017-08-06 13:04:29 +02:00
|
|
|
func (m DatasetMapFilter) mostSpecificPrefixMapping(path *zfs.DatasetPath) (idx int, found bool) {
|
2017-08-05 21:15:37 +02:00
|
|
|
lcp, lcp_entry_idx := -1, -1
|
|
|
|
direct_idx := -1
|
|
|
|
for e := range m.entries {
|
|
|
|
entry := m.entries[e]
|
|
|
|
ep := m.entries[e].path
|
|
|
|
lep := ep.Length()
|
|
|
|
|
|
|
|
switch {
|
|
|
|
case !entry.subtreeMatch && ep.Equal(path):
|
|
|
|
direct_idx = e
|
|
|
|
continue
|
|
|
|
case entry.subtreeMatch && path.HasPrefix(ep) && lep > lcp:
|
|
|
|
lcp = lep
|
|
|
|
lcp_entry_idx = e
|
|
|
|
default:
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if lcp_entry_idx >= 0 || direct_idx >= 0 {
|
|
|
|
found = true
|
|
|
|
switch {
|
|
|
|
case direct_idx >= 0:
|
|
|
|
idx = direct_idx
|
|
|
|
case lcp_entry_idx >= 0:
|
|
|
|
idx = lcp_entry_idx
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
// Returns target == nil if there is no mapping
|
2017-08-06 13:04:29 +02:00
|
|
|
func (m DatasetMapFilter) Map(source *zfs.DatasetPath) (target *zfs.DatasetPath, err error) {
|
2017-08-05 21:15:37 +02:00
|
|
|
|
2017-09-16 19:42:42 +02:00
|
|
|
if m.filterMode {
|
2017-08-05 21:15:37 +02:00
|
|
|
err = fmt.Errorf("using a filter for mapping simply does not work")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
mi, hasMapping := m.mostSpecificPrefixMapping(source)
|
|
|
|
if !hasMapping {
|
2017-09-02 12:40:22 +02:00
|
|
|
return nil, nil
|
2017-08-05 21:15:37 +02:00
|
|
|
}
|
|
|
|
me := m.entries[mi]
|
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
if me.mapping == "" {
|
|
|
|
// Special case treatment: 'foo/bar<' => ''
|
|
|
|
if !me.subtreeMatch {
|
|
|
|
return nil, fmt.Errorf("mapping to '' must be a subtree match")
|
|
|
|
}
|
|
|
|
// ok...
|
|
|
|
} else {
|
|
|
|
if strings.HasPrefix("!", me.mapping) {
|
|
|
|
// reject mapping
|
|
|
|
return nil, nil
|
|
|
|
}
|
2017-09-16 19:42:42 +02:00
|
|
|
}
|
|
|
|
|
2017-08-05 21:15:37 +02:00
|
|
|
target, err = zfs.NewDatasetPath(me.mapping)
|
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("mapping target is not a dataset path: %s", err)
|
|
|
|
return
|
|
|
|
}
|
2017-10-05 18:55:02 +02:00
|
|
|
if me.subtreeMatch {
|
2017-10-05 19:58:43 +02:00
|
|
|
// strip common prefix ('<' wildcards are no special case here)
|
2017-08-05 21:15:37 +02:00
|
|
|
extendComps := source.Copy()
|
2017-10-05 19:58:43 +02:00
|
|
|
extendComps.TrimPrefix(me.path)
|
2017-08-05 21:15:37 +02:00
|
|
|
target.Extend(extendComps)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-08-06 13:04:29 +02:00
|
|
|
func (m DatasetMapFilter) Filter(p *zfs.DatasetPath) (pass bool, err error) {
|
2017-09-16 19:42:42 +02:00
|
|
|
|
|
|
|
if !m.filterMode {
|
|
|
|
err = fmt.Errorf("using a mapping as a filter does not work")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-08-05 21:15:37 +02:00
|
|
|
mi, hasMapping := m.mostSpecificPrefixMapping(p)
|
|
|
|
if !hasMapping {
|
|
|
|
pass = false
|
|
|
|
return
|
|
|
|
}
|
|
|
|
me := m.entries[mi]
|
2017-09-16 19:42:42 +02:00
|
|
|
pass, err = m.parseDatasetFilterResult(me.mapping)
|
2017-08-05 21:15:37 +02:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-09-13 22:55:10 +02:00
|
|
|
// Construct a new filter-only DatasetMapFilter from a mapping
|
|
|
|
// The new filter allows excactly those paths that were not forbidden by the mapping.
|
|
|
|
func (m DatasetMapFilter) InvertedFilter() (inv *DatasetMapFilter, err error) {
|
|
|
|
|
2017-09-16 19:42:42 +02:00
|
|
|
if m.filterMode {
|
2017-09-13 22:55:10 +02:00
|
|
|
err = errors.Errorf("can only invert mappings")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
inv = &DatasetMapFilter{
|
|
|
|
make([]datasetMapFilterEntry, len(m.entries)),
|
|
|
|
true,
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, e := range m.entries {
|
|
|
|
inv.entries[i].path, err = zfs.NewDatasetPath(e.mapping)
|
|
|
|
if err != nil {
|
|
|
|
err = errors.Wrapf(err, "mapping cannot be inverted: '%s' is not a dataset path: %s", e.mapping)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
inv.entries[i].mapping = MapFilterResultOk
|
|
|
|
inv.entries[i].subtreeMatch = e.subtreeMatch
|
|
|
|
}
|
|
|
|
|
|
|
|
return inv, nil
|
|
|
|
}
|
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
// FIXME investigate whether we can support more...
|
|
|
|
func (m DatasetMapFilter) Invert() (inv *DatasetMapFilter, err error) {
|
|
|
|
|
|
|
|
if m.filterMode {
|
|
|
|
err = errors.Errorf("can only invert mappings")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(m.entries) != 1 {
|
|
|
|
return nil, errors.Errorf("inversion of complicated mappings is not implemented") // FIXME
|
|
|
|
}
|
|
|
|
|
|
|
|
e := m.entries[0]
|
|
|
|
|
|
|
|
inv = &DatasetMapFilter{
|
|
|
|
make([]datasetMapFilterEntry, len(m.entries)),
|
|
|
|
false,
|
|
|
|
}
|
|
|
|
mp, err := zfs.NewDatasetPath(e.mapping)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
inv.entries[0] = datasetMapFilterEntry{
|
|
|
|
path: mp,
|
|
|
|
mapping: e.path.ToString(),
|
|
|
|
subtreeMatch: e.subtreeMatch,
|
|
|
|
}
|
|
|
|
|
|
|
|
return inv, nil
|
|
|
|
}
|
|
|
|
|
2017-09-16 19:42:42 +02:00
|
|
|
// Creates a new DatasetMapFilter in filter mode from a mapping
|
|
|
|
// All accepting mapping results are mapped to accepting filter results
|
|
|
|
// All rejecting mapping results are mapped to rejecting filter results
|
|
|
|
func (m DatasetMapFilter) AsFilter() (f *DatasetMapFilter) {
|
|
|
|
|
|
|
|
f = &DatasetMapFilter{
|
|
|
|
make([]datasetMapFilterEntry, len(m.entries)),
|
|
|
|
true,
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, e := range m.entries {
|
|
|
|
var newe datasetMapFilterEntry = e
|
|
|
|
if strings.HasPrefix(newe.mapping, "!") {
|
|
|
|
newe.mapping = MapFilterResultOmit
|
|
|
|
} else {
|
|
|
|
newe.mapping = MapFilterResultOk
|
|
|
|
}
|
|
|
|
f.entries[i] = newe
|
|
|
|
}
|
|
|
|
|
|
|
|
return f
|
|
|
|
}
|
|
|
|
|
2017-09-13 22:55:10 +02:00
|
|
|
const (
|
|
|
|
MapFilterResultOk string = "ok"
|
2017-09-16 19:42:42 +02:00
|
|
|
MapFilterResultOmit string = "!"
|
2017-09-13 22:55:10 +02:00
|
|
|
)
|
|
|
|
|
2017-08-05 21:15:37 +02:00
|
|
|
// Parse a dataset filter result
|
2017-09-16 19:42:42 +02:00
|
|
|
func (m DatasetMapFilter) parseDatasetFilterResult(result string) (pass bool, err error) {
|
2017-08-05 21:15:37 +02:00
|
|
|
l := strings.ToLower(result)
|
2017-09-16 19:42:42 +02:00
|
|
|
if l == MapFilterResultOk {
|
|
|
|
return true, nil
|
2017-08-05 21:15:37 +02:00
|
|
|
}
|
2017-09-16 19:42:42 +02:00
|
|
|
if l == MapFilterResultOmit {
|
|
|
|
return false, nil
|
|
|
|
}
|
|
|
|
return false, fmt.Errorf("'%s' is not a valid filter result", result)
|
2017-08-05 21:15:37 +02:00
|
|
|
}
|
2017-09-10 16:13:05 +02:00
|
|
|
|
2017-09-16 19:42:42 +02:00
|
|
|
func parseDatasetMapFilter(mi interface{}, filterMode bool) (f *DatasetMapFilter, err error) {
|
2017-09-10 16:13:05 +02:00
|
|
|
|
|
|
|
var m map[string]string
|
|
|
|
if err = mapstructure.Decode(mi, &m); err != nil {
|
|
|
|
err = fmt.Errorf("maps / filters must be specified as map[string]string: %s", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-09-16 19:42:42 +02:00
|
|
|
f = NewDatasetMapFilter(len(m), filterMode)
|
2017-09-10 16:13:05 +02:00
|
|
|
for pathPattern, mapping := range m {
|
|
|
|
if err = f.Add(pathPattern, mapping); err != nil {
|
|
|
|
err = fmt.Errorf("invalid mapping entry ['%s':'%s']: %s", pathPattern, mapping, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|