Make zfs.DatasetPath json.Marshaler and json.Unmarshaler

Had to resort to using pointers to zfs.DatasetPath everywhere... Should
find a better solution for that.
This commit is contained in:
Christian Schwarz 2017-08-06 13:04:29 +02:00
parent 2ce07c9342
commit cba083cadf
11 changed files with 75 additions and 57 deletions

View File

@ -17,7 +17,7 @@ type DatasetMapFilter struct {
} }
type datasetMapFilterEntry struct { type datasetMapFilterEntry struct {
path zfs.DatasetPath path *zfs.DatasetPath
// the mapping. since this datastructure acts as both mapping and filter // the mapping. since this datastructure acts as both mapping and filter
// we have to convert it to the desired rep dynamically // we have to convert it to the desired rep dynamically
mapping string mapping string
@ -50,12 +50,10 @@ func (m *DatasetMapFilter) Add(pathPattern, mapping string) (err error) {
return return
} }
var path zfs.DatasetPath
pathStr := strings.TrimSuffix(pathPattern, SUBTREE_PATTERN) pathStr := strings.TrimSuffix(pathPattern, SUBTREE_PATTERN)
path, err = zfs.NewDatasetPath(pathStr) path, err := zfs.NewDatasetPath(pathStr)
if err != nil { if err != nil {
err = fmt.Errorf("pattern is not a dataset path: %s", err) return fmt.Errorf("pattern is not a dataset path: %s", err)
return
} }
entry := datasetMapFilterEntry{ entry := datasetMapFilterEntry{
@ -71,7 +69,7 @@ func (m *DatasetMapFilter) Add(pathPattern, mapping string) (err error) {
// find the most specific prefix mapping we have // find the most specific prefix mapping we have
// //
// longer prefix wins over shorter prefix, direct wins over glob // longer prefix wins over shorter prefix, direct wins over glob
func (m DatasetMapFilter) mostSpecificPrefixMapping(path zfs.DatasetPath) (idx int, found bool) { func (m DatasetMapFilter) mostSpecificPrefixMapping(path *zfs.DatasetPath) (idx int, found bool) {
lcp, lcp_entry_idx := -1, -1 lcp, lcp_entry_idx := -1, -1
direct_idx := -1 direct_idx := -1
for e := range m.entries { for e := range m.entries {
@ -103,7 +101,7 @@ func (m DatasetMapFilter) mostSpecificPrefixMapping(path zfs.DatasetPath) (idx i
return return
} }
func (m DatasetMapFilter) Map(source zfs.DatasetPath) (target zfs.DatasetPath, err error) { func (m DatasetMapFilter) Map(source *zfs.DatasetPath) (target *zfs.DatasetPath, err error) {
if m.filterOnly { if m.filterOnly {
err = fmt.Errorf("using a filter for mapping simply does not work") err = fmt.Errorf("using a filter for mapping simply does not work")
@ -136,7 +134,7 @@ func (m DatasetMapFilter) Map(source zfs.DatasetPath) (target zfs.DatasetPath, e
return return
} }
func (m DatasetMapFilter) Filter(p zfs.DatasetPath) (pass bool, err error) { func (m DatasetMapFilter) Filter(p *zfs.DatasetPath) (pass bool, err error) {
mi, hasMapping := m.mostSpecificPrefixMapping(p) mi, hasMapping := m.mostSpecificPrefixMapping(p)
if !hasMapping { if !hasMapping {
pass = false pass = false

View File

@ -8,7 +8,7 @@ import (
) )
type DatasetMapping interface { type DatasetMapping interface {
Map(source zfs.DatasetPath) (target zfs.DatasetPath, err error) Map(source *zfs.DatasetPath) (target *zfs.DatasetPath, err error)
} }
type Handler struct { type Handler struct {
@ -17,7 +17,7 @@ type Handler struct {
SinkMappingFunc func(clientIdentity string) (mapping DatasetMapping, err error) SinkMappingFunc func(clientIdentity string) (mapping DatasetMapping, err error)
} }
func (h Handler) HandleFilesystemRequest(r rpc.FilesystemRequest) (roots []zfs.DatasetPath, err error) { func (h Handler) HandleFilesystemRequest(r rpc.FilesystemRequest) (roots []*zfs.DatasetPath, err error) {
h.Logger.Printf("handling fsr: %#v", r) h.Logger.Printf("handling fsr: %#v", r)
@ -120,7 +120,7 @@ func (h Handler) HandlePullMeRequest(r rpc.PullMeRequest, clientIdentity string,
return return
} }
func (h Handler) pullACLCheck(p zfs.DatasetPath) (err error) { func (h Handler) pullACLCheck(p *zfs.DatasetPath) (err error) {
var allowed bool var allowed bool
allowed, err = h.PullACL.Filter(p) allowed, err = h.PullACL.Filter(p)
if err != nil { if err != nil {

View File

@ -143,7 +143,7 @@ func cmdRun(cmd *cobra.Command, args []string) {
type localPullACL struct{} type localPullACL struct{}
func (a localPullACL) Filter(p zfs.DatasetPath) (pass bool, err error) { func (a localPullACL) Filter(p *zfs.DatasetPath) (pass bool, err error) {
return true, nil return true, nil
} }
@ -250,15 +250,15 @@ func doPull(pull PullContext) (err error) {
log := pull.Log log := pull.Log
fsr := rpc.FilesystemRequest{} fsr := rpc.FilesystemRequest{}
var remoteFilesystems []zfs.DatasetPath var remoteFilesystems []*zfs.DatasetPath
if remoteFilesystems, err = remote.FilesystemRequest(fsr); err != nil { if remoteFilesystems, err = remote.FilesystemRequest(fsr); err != nil {
return return
} }
// build mapping (local->RemoteLocalMapping) + traversal datastructure // build mapping (local->RemoteLocalMapping) + traversal datastructure
type RemoteLocalMapping struct { type RemoteLocalMapping struct {
Remote zfs.DatasetPath Remote *zfs.DatasetPath
Local zfs.DatasetPath Local *zfs.DatasetPath
} }
replMapping := make(map[string]RemoteLocalMapping, len(remoteFilesystems)) replMapping := make(map[string]RemoteLocalMapping, len(remoteFilesystems))
localTraversal := zfs.NewDatasetPathForest() localTraversal := zfs.NewDatasetPathForest()
@ -267,7 +267,7 @@ func doPull(pull PullContext) (err error) {
log.Printf("mapping using %#v\n", pull.Mapping) log.Printf("mapping using %#v\n", pull.Mapping)
for fs := range remoteFilesystems { for fs := range remoteFilesystems {
var err error var err error
var localFs zfs.DatasetPath var localFs *zfs.DatasetPath
localFs, err = pull.Mapping.Map(remoteFilesystems[fs]) localFs, err = pull.Mapping.Map(remoteFilesystems[fs])
if err != nil { if err != nil {
if err != NoMatchError { if err != NoMatchError {

View File

@ -14,7 +14,7 @@ import (
) )
type RPCRequester interface { type RPCRequester interface {
FilesystemRequest(r FilesystemRequest) (roots []zfs.DatasetPath, err error) FilesystemRequest(r FilesystemRequest) (roots []*zfs.DatasetPath, err error)
FilesystemVersionsRequest(r FilesystemVersionsRequest) (versions []zfs.FilesystemVersion, err error) FilesystemVersionsRequest(r FilesystemVersionsRequest) (versions []zfs.FilesystemVersion, err error)
InitialTransferRequest(r InitialTransferRequest) (io.Reader, error) InitialTransferRequest(r InitialTransferRequest) (io.Reader, error)
IncrementalTransferRequest(r IncrementalTransferRequest) (io.Reader, error) IncrementalTransferRequest(r IncrementalTransferRequest) (io.Reader, error)
@ -24,7 +24,7 @@ type RPCRequester interface {
} }
type RPCHandler interface { type RPCHandler interface {
HandleFilesystemRequest(r FilesystemRequest) (roots []zfs.DatasetPath, err error) HandleFilesystemRequest(r FilesystemRequest) (roots []*zfs.DatasetPath, err error)
// returned versions ordered by birthtime, oldest first // returned versions ordered by birthtime, oldest first
HandleFilesystemVersionsRequest(r FilesystemVersionsRequest) (versions []zfs.FilesystemVersion, err error) HandleFilesystemVersionsRequest(r FilesystemVersionsRequest) (versions []zfs.FilesystemVersion, err error)
@ -451,13 +451,13 @@ func (c ByteStreamRPC) ProtocolVersionRequest() (err error) {
return c.sendRequestReceiveHeader(b, ROK) return c.sendRequestReceiveHeader(b, ROK)
} }
func (c ByteStreamRPC) FilesystemRequest(r FilesystemRequest) (roots []zfs.DatasetPath, err error) { func (c ByteStreamRPC) FilesystemRequest(r FilesystemRequest) (roots []*zfs.DatasetPath, err error) {
if err = c.sendRequestReceiveHeader(r, RFilesystems); err != nil { if err = c.sendRequestReceiveHeader(r, RFilesystems); err != nil {
return return
} }
roots = make([]zfs.DatasetPath, 0) roots = make([]*zfs.DatasetPath, 0)
if err = readChunkedJSON(c.conn, &roots); err != nil { if err = readChunkedJSON(c.conn, &roots); err != nil {
return return
@ -520,7 +520,7 @@ func ConnectLocalRPC(handler RPCHandler) RPCRequester {
return LocalRPC{handler} return LocalRPC{handler}
} }
func (c LocalRPC) FilesystemRequest(r FilesystemRequest) (roots []zfs.DatasetPath, err error) { func (c LocalRPC) FilesystemRequest(r FilesystemRequest) (roots []*zfs.DatasetPath, err error) {
return c.handler.HandleFilesystemRequest(r) return c.handler.HandleFilesystemRequest(r)
} }

View File

@ -1,7 +1,14 @@
package rpc package rpc
import "io" import (
import "github.com/zrepl/zrepl/zfs" "encoding/json"
"io"
"github.com/zrepl/zrepl/zfs"
)
var _ json.Marshaler = &zfs.DatasetPath{}
var _ json.Unmarshaler = &zfs.DatasetPath{}
type RequestId [16]byte type RequestId [16]byte
type RequestType uint8 type RequestType uint8
@ -26,11 +33,11 @@ type FilesystemRequest struct {
} }
type FilesystemVersionsRequest struct { type FilesystemVersionsRequest struct {
Filesystem zfs.DatasetPath Filesystem *zfs.DatasetPath
} }
type InitialTransferRequest struct { type InitialTransferRequest struct {
Filesystem zfs.DatasetPath Filesystem *zfs.DatasetPath
FilesystemVersion zfs.FilesystemVersion FilesystemVersion zfs.FilesystemVersion
} }
@ -39,7 +46,7 @@ func (r InitialTransferRequest) Respond(snapshotReader io.Reader) {
} }
type IncrementalTransferRequest struct { type IncrementalTransferRequest struct {
Filesystem zfs.DatasetPath Filesystem *zfs.DatasetPath
From zfs.FilesystemVersion From zfs.FilesystemVersion
To zfs.FilesystemVersion To zfs.FilesystemVersion
} }

View File

@ -10,7 +10,7 @@ func NewDatasetPathForest() *DatasetPathForest {
} }
} }
func (f *DatasetPathForest) Add(p DatasetPath) { func (f *DatasetPathForest) Add(p *DatasetPath) {
if len(p.comps) <= 0 { if len(p.comps) <= 0 {
panic("dataset path too short. must have length > 0") panic("dataset path too short. must have length > 0")
} }
@ -30,7 +30,7 @@ func (f *DatasetPathForest) Add(p DatasetPath) {
} }
type DatasetPathVisit struct { type DatasetPathVisit struct {
Path DatasetPath Path *DatasetPath
// If true, the dataset referenced by Path was not in the list of datasets to traverse // If true, the dataset referenced by Path was not in the list of datasets to traverse
FilledIn bool FilledIn bool
} }
@ -93,7 +93,7 @@ func (t *datasetPathTree) WalkTopDown(parent []string, visitor DatasetPathsVisit
this := append(parent, t.Component) this := append(parent, t.Component)
thisVisit := DatasetPathVisit{ thisVisit := DatasetPathVisit{
DatasetPath{this}, &DatasetPath{this},
t.FilledIn, t.FilledIn,
} }
visitChildTree := visitor(thisVisit) visitChildTree := visitor(thisVisit)

View File

@ -34,7 +34,7 @@ func makeVisitRecorder() (v DatasetPathsVisitor, rec *visitRecorder) {
return return
} }
func buildForest(paths []DatasetPath) (f *DatasetPathForest) { func buildForest(paths []*DatasetPath) (f *DatasetPathForest) {
f = NewDatasetPathForest() f = NewDatasetPathForest()
for _, p := range paths { for _, p := range paths {
f.Add(p) f.Add(p)
@ -44,7 +44,7 @@ func buildForest(paths []DatasetPath) (f *DatasetPathForest) {
func TestDatasetPathForestWalkTopDown(t *testing.T) { func TestDatasetPathForestWalkTopDown(t *testing.T) {
paths := []DatasetPath{ paths := []*DatasetPath{
toDatasetPath("pool1"), toDatasetPath("pool1"),
toDatasetPath("pool1/foo/bar"), toDatasetPath("pool1/foo/bar"),
toDatasetPath("pool1/foo/bar/looloo"), toDatasetPath("pool1/foo/bar/looloo"),
@ -70,7 +70,7 @@ func TestDatasetPathForestWalkTopDown(t *testing.T) {
func TestDatasetPathWalkTopDownWorksUnordered(t *testing.T) { func TestDatasetPathWalkTopDownWorksUnordered(t *testing.T) {
paths := []DatasetPath{ paths := []*DatasetPath{
toDatasetPath("pool1"), toDatasetPath("pool1"),
toDatasetPath("pool1/foo/bar/looloo"), toDatasetPath("pool1/foo/bar/looloo"),
toDatasetPath("pool1/foo/bar"), toDatasetPath("pool1/foo/bar"),

View File

@ -208,13 +208,13 @@ func ZFSListFilesystemState() (localState map[string]FilesystemState, err error)
// move. // move.
// //
// TODO better solution available? // TODO better solution available?
func PlaceholderPropertyValue(p DatasetPath) string { func PlaceholderPropertyValue(p *DatasetPath) string {
ps := []byte(p.ToString()) ps := []byte(p.ToString())
sum := sha512.Sum512_256(ps) sum := sha512.Sum512_256(ps)
return hex.EncodeToString(sum[:]) return hex.EncodeToString(sum[:])
} }
func IsPlaceholder(p DatasetPath, placeholderPropertyValue string) (isPlaceholder bool, err error) { func IsPlaceholder(p *DatasetPath, placeholderPropertyValue string) (isPlaceholder bool, err error) {
expected := PlaceholderPropertyValue(p) expected := PlaceholderPropertyValue(p)
isPlaceholder = expected == placeholderPropertyValue isPlaceholder = expected == placeholderPropertyValue
if !isPlaceholder { if !isPlaceholder {
@ -223,7 +223,7 @@ func IsPlaceholder(p DatasetPath, placeholderPropertyValue string) (isPlaceholde
return return
} }
func ZFSCreatePlaceholderFilesystem(p DatasetPath) (err error) { func ZFSCreatePlaceholderFilesystem(p *DatasetPath) (err error) {
v := PlaceholderPropertyValue(p) v := PlaceholderPropertyValue(p)
cmd := exec.Command(ZFS_BINARY, "create", cmd := exec.Command(ZFS_BINARY, "create",
"-o", fmt.Sprintf("%s=%s", ZREPL_PLACEHOLDER_PROPERTY_NAME, v), "-o", fmt.Sprintf("%s=%s", ZREPL_PLACEHOLDER_PROPERTY_NAME, v),

View File

@ -3,10 +3,10 @@ package zfs
import "fmt" import "fmt"
type DatasetFilter interface { type DatasetFilter interface {
Filter(p DatasetPath) (pass bool, err error) Filter(p *DatasetPath) (pass bool, err error)
} }
func ZFSListMapping(filter DatasetFilter) (datasets []DatasetPath, err error) { func ZFSListMapping(filter DatasetFilter) (datasets []*DatasetPath, err error) {
if filter == nil { if filter == nil {
panic("filter must not be nil") panic("filter must not be nil")
@ -15,11 +15,11 @@ func ZFSListMapping(filter DatasetFilter) (datasets []DatasetPath, err error) {
var lines [][]string var lines [][]string
lines, err = ZFSList([]string{"name"}, "-r", "-t", "filesystem,volume") lines, err = ZFSList([]string{"name"}, "-r", "-t", "filesystem,volume")
datasets = make([]DatasetPath, 0, len(lines)) datasets = make([]*DatasetPath, 0, len(lines))
for _, line := range lines { for _, line := range lines {
var path DatasetPath var path *DatasetPath
if path, err = NewDatasetPath(line[0]); err != nil { if path, err = NewDatasetPath(line[0]); err != nil {
return return
} }

View File

@ -45,7 +45,7 @@ type FilesystemVersion struct {
Creation time.Time Creation time.Time
} }
func (v FilesystemVersion) ToAbsPath(p DatasetPath) string { func (v FilesystemVersion) ToAbsPath(p *DatasetPath) string {
var b bytes.Buffer var b bytes.Buffer
b.WriteString(p.ToString()) b.WriteString(p.ToString())
b.WriteString(v.Type.DelimiterChar()) b.WriteString(v.Type.DelimiterChar())
@ -57,7 +57,7 @@ type FilesystemVersionFilter interface {
Filter(fsv FilesystemVersion) (accept bool, err error) Filter(fsv FilesystemVersion) (accept bool, err error)
} }
func ZFSListFilesystemVersions(fs DatasetPath, filter FilesystemVersionFilter) (res []FilesystemVersion, err error) { func ZFSListFilesystemVersions(fs *DatasetPath, filter FilesystemVersionFilter) (res []FilesystemVersion, err error) {
var fieldLines [][]string var fieldLines [][]string
fieldLines, err = ZFSList( fieldLines, err = ZFSList(
[]string{"name", "guid", "createtxg", "creation"}, []string{"name", "guid", "createtxg", "creation"},
@ -125,7 +125,7 @@ func ZFSListFilesystemVersions(fs DatasetPath, filter FilesystemVersionFilter) (
return return
} }
func ZFSDestroyFilesystemVersion(filesystem DatasetPath, version FilesystemVersion) (err error) { func ZFSDestroyFilesystemVersion(filesystem *DatasetPath, version FilesystemVersion) (err error) {
datasetPath := version.ToAbsPath(filesystem) datasetPath := version.ToAbsPath(filesystem)

View File

@ -3,31 +3,33 @@ package zfs
import ( import (
"bufio" "bufio"
"bytes" "bytes"
"encoding/json"
"errors" "errors"
"fmt" "fmt"
"github.com/zrepl/zrepl/util"
"io" "io"
"os/exec" "os/exec"
"strings" "strings"
"github.com/zrepl/zrepl/util"
) )
type DatasetPath struct { type DatasetPath struct {
comps []string comps []string
} }
func (p DatasetPath) ToString() string { func (p *DatasetPath) ToString() string {
return strings.Join(p.comps, "/") return strings.Join(p.comps, "/")
} }
func (p DatasetPath) Empty() bool { func (p *DatasetPath) Empty() bool {
return len(p.comps) == 0 return len(p.comps) == 0
} }
func (p *DatasetPath) Extend(extend DatasetPath) { func (p *DatasetPath) Extend(extend *DatasetPath) {
p.comps = append(p.comps, extend.comps...) p.comps = append(p.comps, extend.comps...)
} }
func (p DatasetPath) HasPrefix(prefix DatasetPath) bool { func (p *DatasetPath) HasPrefix(prefix *DatasetPath) bool {
if len(prefix.comps) > len(p.comps) { if len(prefix.comps) > len(p.comps) {
return false return false
} }
@ -39,7 +41,7 @@ func (p DatasetPath) HasPrefix(prefix DatasetPath) bool {
return true return true
} }
func (p *DatasetPath) TrimPrefix(prefix DatasetPath) { func (p *DatasetPath) TrimPrefix(prefix *DatasetPath) {
if !p.HasPrefix(prefix) { if !p.HasPrefix(prefix) {
return return
} }
@ -64,7 +66,7 @@ func (p *DatasetPath) TrimNPrefixComps(n int) {
} }
func (p DatasetPath) Equal(q DatasetPath) bool { func (p DatasetPath) Equal(q *DatasetPath) bool {
if len(p.comps) != len(q.comps) { if len(p.comps) != len(q.comps) {
return false return false
} }
@ -76,17 +78,28 @@ func (p DatasetPath) Equal(q DatasetPath) bool {
return true return true
} }
func (p DatasetPath) Length() int { func (p *DatasetPath) Length() int {
return len(p.comps) return len(p.comps)
} }
func (p DatasetPath) Copy() (c DatasetPath) { func (p *DatasetPath) Copy() (c *DatasetPath) {
c = &DatasetPath{}
c.comps = make([]string, len(p.comps)) c.comps = make([]string, len(p.comps))
copy(c.comps, p.comps) copy(c.comps, p.comps)
return return
} }
func NewDatasetPath(s string) (p DatasetPath, err error) { func (p *DatasetPath) MarshalJSON() ([]byte, error) {
return json.Marshal(p.comps)
}
func (p *DatasetPath) UnmarshalJSON(b []byte) error {
p.comps = make([]string, 0)
return json.Unmarshal(b, &p.comps)
}
func NewDatasetPath(s string) (p *DatasetPath, err error) {
p = &DatasetPath{}
if s == "" { if s == "" {
p.comps = make([]string, 0) p.comps = make([]string, 0)
return p, nil // the empty dataset path return p, nil // the empty dataset path
@ -104,7 +117,7 @@ func NewDatasetPath(s string) (p DatasetPath, err error) {
return return
} }
func toDatasetPath(s string) DatasetPath { func toDatasetPath(s string) *DatasetPath {
p, err := NewDatasetPath(s) p, err := NewDatasetPath(s)
if err != nil { if err != nil {
panic(err) panic(err)
@ -172,7 +185,7 @@ func ZFSList(properties []string, zfsArgs ...string) (res [][]string, err error)
return return
} }
func ZFSSend(fs DatasetPath, from, to *FilesystemVersion) (stream io.Reader, err error) { func ZFSSend(fs *DatasetPath, from, to *FilesystemVersion) (stream io.Reader, err error) {
args := make([]string, 0) args := make([]string, 0)
args = append(args, "send") args = append(args, "send")
@ -188,7 +201,7 @@ func ZFSSend(fs DatasetPath, from, to *FilesystemVersion) (stream io.Reader, err
return return
} }
func ZFSRecv(fs DatasetPath, stream io.Reader, additionalArgs ...string) (err error) { func ZFSRecv(fs *DatasetPath, stream io.Reader, additionalArgs ...string) (err error) {
args := make([]string, 0) args := make([]string, 0)
args = append(args, "recv") args = append(args, "recv")
@ -226,7 +239,7 @@ func ZFSRecv(fs DatasetPath, stream io.Reader, additionalArgs ...string) (err er
return nil return nil
} }
func ZFSSet(fs DatasetPath, prop, val string) (err error) { func ZFSSet(fs *DatasetPath, prop, val string) (err error) {
if strings.ContainsRune(prop, '=') { if strings.ContainsRune(prop, '=') {
panic("prop contains rune '=' which is the delimiter between property name and value") panic("prop contains rune '=' which is the delimiter between property name and value")
@ -273,7 +286,7 @@ func ZFSDestroy(dataset string) (err error) {
} }
func ZFSSnapshot(fs DatasetPath, name string, recursive bool) (err error) { func ZFSSnapshot(fs *DatasetPath, name string, recursive bool) (err error) {
snapname := fmt.Sprintf("%s@%s", fs.ToString(), name) snapname := fmt.Sprintf("%s@%s", fs.ToString(), name)
cmd := exec.Command(ZFS_BINARY, "snapshot", snapname) cmd := exec.Command(ZFS_BINARY, "snapshot", snapname)