mirror of
https://github.com/zrepl/zrepl.git
synced 2025-04-14 14:38:25 +02:00
Make zfs.DatasetPath json.Marshaler and json.Unmarshaler
Had to resort to using pointers to zfs.DatasetPath everywhere... Should find a better solution for that.
This commit is contained in:
parent
2ce07c9342
commit
cba083cadf
@ -17,7 +17,7 @@ type DatasetMapFilter struct {
|
||||
}
|
||||
|
||||
type datasetMapFilterEntry struct {
|
||||
path zfs.DatasetPath
|
||||
path *zfs.DatasetPath
|
||||
// the mapping. since this datastructure acts as both mapping and filter
|
||||
// we have to convert it to the desired rep dynamically
|
||||
mapping string
|
||||
@ -50,12 +50,10 @@ func (m *DatasetMapFilter) Add(pathPattern, mapping string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
var path zfs.DatasetPath
|
||||
pathStr := strings.TrimSuffix(pathPattern, SUBTREE_PATTERN)
|
||||
path, err = zfs.NewDatasetPath(pathStr)
|
||||
path, err := zfs.NewDatasetPath(pathStr)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("pattern is not a dataset path: %s", err)
|
||||
return
|
||||
return fmt.Errorf("pattern is not a dataset path: %s", err)
|
||||
}
|
||||
|
||||
entry := datasetMapFilterEntry{
|
||||
@ -71,7 +69,7 @@ func (m *DatasetMapFilter) Add(pathPattern, mapping string) (err error) {
|
||||
// find the most specific prefix mapping we have
|
||||
//
|
||||
// longer prefix wins over shorter prefix, direct wins over glob
|
||||
func (m DatasetMapFilter) mostSpecificPrefixMapping(path zfs.DatasetPath) (idx int, found bool) {
|
||||
func (m DatasetMapFilter) mostSpecificPrefixMapping(path *zfs.DatasetPath) (idx int, found bool) {
|
||||
lcp, lcp_entry_idx := -1, -1
|
||||
direct_idx := -1
|
||||
for e := range m.entries {
|
||||
@ -103,7 +101,7 @@ func (m DatasetMapFilter) mostSpecificPrefixMapping(path zfs.DatasetPath) (idx i
|
||||
return
|
||||
}
|
||||
|
||||
func (m DatasetMapFilter) Map(source zfs.DatasetPath) (target zfs.DatasetPath, err error) {
|
||||
func (m DatasetMapFilter) Map(source *zfs.DatasetPath) (target *zfs.DatasetPath, err error) {
|
||||
|
||||
if m.filterOnly {
|
||||
err = fmt.Errorf("using a filter for mapping simply does not work")
|
||||
@ -136,7 +134,7 @@ func (m DatasetMapFilter) Map(source zfs.DatasetPath) (target zfs.DatasetPath, e
|
||||
return
|
||||
}
|
||||
|
||||
func (m DatasetMapFilter) Filter(p zfs.DatasetPath) (pass bool, err error) {
|
||||
func (m DatasetMapFilter) Filter(p *zfs.DatasetPath) (pass bool, err error) {
|
||||
mi, hasMapping := m.mostSpecificPrefixMapping(p)
|
||||
if !hasMapping {
|
||||
pass = false
|
||||
|
@ -8,7 +8,7 @@ import (
|
||||
)
|
||||
|
||||
type DatasetMapping interface {
|
||||
Map(source zfs.DatasetPath) (target zfs.DatasetPath, err error)
|
||||
Map(source *zfs.DatasetPath) (target *zfs.DatasetPath, err error)
|
||||
}
|
||||
|
||||
type Handler struct {
|
||||
@ -17,7 +17,7 @@ type Handler struct {
|
||||
SinkMappingFunc func(clientIdentity string) (mapping DatasetMapping, err error)
|
||||
}
|
||||
|
||||
func (h Handler) HandleFilesystemRequest(r rpc.FilesystemRequest) (roots []zfs.DatasetPath, err error) {
|
||||
func (h Handler) HandleFilesystemRequest(r rpc.FilesystemRequest) (roots []*zfs.DatasetPath, err error) {
|
||||
|
||||
h.Logger.Printf("handling fsr: %#v", r)
|
||||
|
||||
@ -120,7 +120,7 @@ func (h Handler) HandlePullMeRequest(r rpc.PullMeRequest, clientIdentity string,
|
||||
return
|
||||
}
|
||||
|
||||
func (h Handler) pullACLCheck(p zfs.DatasetPath) (err error) {
|
||||
func (h Handler) pullACLCheck(p *zfs.DatasetPath) (err error) {
|
||||
var allowed bool
|
||||
allowed, err = h.PullACL.Filter(p)
|
||||
if err != nil {
|
||||
|
@ -143,7 +143,7 @@ func cmdRun(cmd *cobra.Command, args []string) {
|
||||
|
||||
type localPullACL struct{}
|
||||
|
||||
func (a localPullACL) Filter(p zfs.DatasetPath) (pass bool, err error) {
|
||||
func (a localPullACL) Filter(p *zfs.DatasetPath) (pass bool, err error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@ -250,15 +250,15 @@ func doPull(pull PullContext) (err error) {
|
||||
log := pull.Log
|
||||
|
||||
fsr := rpc.FilesystemRequest{}
|
||||
var remoteFilesystems []zfs.DatasetPath
|
||||
var remoteFilesystems []*zfs.DatasetPath
|
||||
if remoteFilesystems, err = remote.FilesystemRequest(fsr); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// build mapping (local->RemoteLocalMapping) + traversal datastructure
|
||||
type RemoteLocalMapping struct {
|
||||
Remote zfs.DatasetPath
|
||||
Local zfs.DatasetPath
|
||||
Remote *zfs.DatasetPath
|
||||
Local *zfs.DatasetPath
|
||||
}
|
||||
replMapping := make(map[string]RemoteLocalMapping, len(remoteFilesystems))
|
||||
localTraversal := zfs.NewDatasetPathForest()
|
||||
@ -267,7 +267,7 @@ func doPull(pull PullContext) (err error) {
|
||||
log.Printf("mapping using %#v\n", pull.Mapping)
|
||||
for fs := range remoteFilesystems {
|
||||
var err error
|
||||
var localFs zfs.DatasetPath
|
||||
var localFs *zfs.DatasetPath
|
||||
localFs, err = pull.Mapping.Map(remoteFilesystems[fs])
|
||||
if err != nil {
|
||||
if err != NoMatchError {
|
||||
|
10
rpc/rpc.go
10
rpc/rpc.go
@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
type RPCRequester interface {
|
||||
FilesystemRequest(r FilesystemRequest) (roots []zfs.DatasetPath, err error)
|
||||
FilesystemRequest(r FilesystemRequest) (roots []*zfs.DatasetPath, err error)
|
||||
FilesystemVersionsRequest(r FilesystemVersionsRequest) (versions []zfs.FilesystemVersion, err error)
|
||||
InitialTransferRequest(r InitialTransferRequest) (io.Reader, error)
|
||||
IncrementalTransferRequest(r IncrementalTransferRequest) (io.Reader, error)
|
||||
@ -24,7 +24,7 @@ type RPCRequester interface {
|
||||
}
|
||||
|
||||
type RPCHandler interface {
|
||||
HandleFilesystemRequest(r FilesystemRequest) (roots []zfs.DatasetPath, err error)
|
||||
HandleFilesystemRequest(r FilesystemRequest) (roots []*zfs.DatasetPath, err error)
|
||||
|
||||
// returned versions ordered by birthtime, oldest first
|
||||
HandleFilesystemVersionsRequest(r FilesystemVersionsRequest) (versions []zfs.FilesystemVersion, err error)
|
||||
@ -451,13 +451,13 @@ func (c ByteStreamRPC) ProtocolVersionRequest() (err error) {
|
||||
return c.sendRequestReceiveHeader(b, ROK)
|
||||
}
|
||||
|
||||
func (c ByteStreamRPC) FilesystemRequest(r FilesystemRequest) (roots []zfs.DatasetPath, err error) {
|
||||
func (c ByteStreamRPC) FilesystemRequest(r FilesystemRequest) (roots []*zfs.DatasetPath, err error) {
|
||||
|
||||
if err = c.sendRequestReceiveHeader(r, RFilesystems); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
roots = make([]zfs.DatasetPath, 0)
|
||||
roots = make([]*zfs.DatasetPath, 0)
|
||||
|
||||
if err = readChunkedJSON(c.conn, &roots); err != nil {
|
||||
return
|
||||
@ -520,7 +520,7 @@ func ConnectLocalRPC(handler RPCHandler) RPCRequester {
|
||||
return LocalRPC{handler}
|
||||
}
|
||||
|
||||
func (c LocalRPC) FilesystemRequest(r FilesystemRequest) (roots []zfs.DatasetPath, err error) {
|
||||
func (c LocalRPC) FilesystemRequest(r FilesystemRequest) (roots []*zfs.DatasetPath, err error) {
|
||||
return c.handler.HandleFilesystemRequest(r)
|
||||
}
|
||||
|
||||
|
@ -1,7 +1,14 @@
|
||||
package rpc
|
||||
|
||||
import "io"
|
||||
import "github.com/zrepl/zrepl/zfs"
|
||||
import (
|
||||
"encoding/json"
|
||||
"io"
|
||||
|
||||
"github.com/zrepl/zrepl/zfs"
|
||||
)
|
||||
|
||||
var _ json.Marshaler = &zfs.DatasetPath{}
|
||||
var _ json.Unmarshaler = &zfs.DatasetPath{}
|
||||
|
||||
type RequestId [16]byte
|
||||
type RequestType uint8
|
||||
@ -26,11 +33,11 @@ type FilesystemRequest struct {
|
||||
}
|
||||
|
||||
type FilesystemVersionsRequest struct {
|
||||
Filesystem zfs.DatasetPath
|
||||
Filesystem *zfs.DatasetPath
|
||||
}
|
||||
|
||||
type InitialTransferRequest struct {
|
||||
Filesystem zfs.DatasetPath
|
||||
Filesystem *zfs.DatasetPath
|
||||
FilesystemVersion zfs.FilesystemVersion
|
||||
}
|
||||
|
||||
@ -39,7 +46,7 @@ func (r InitialTransferRequest) Respond(snapshotReader io.Reader) {
|
||||
}
|
||||
|
||||
type IncrementalTransferRequest struct {
|
||||
Filesystem zfs.DatasetPath
|
||||
Filesystem *zfs.DatasetPath
|
||||
From zfs.FilesystemVersion
|
||||
To zfs.FilesystemVersion
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ func NewDatasetPathForest() *DatasetPathForest {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *DatasetPathForest) Add(p DatasetPath) {
|
||||
func (f *DatasetPathForest) Add(p *DatasetPath) {
|
||||
if len(p.comps) <= 0 {
|
||||
panic("dataset path too short. must have length > 0")
|
||||
}
|
||||
@ -30,7 +30,7 @@ func (f *DatasetPathForest) Add(p DatasetPath) {
|
||||
}
|
||||
|
||||
type DatasetPathVisit struct {
|
||||
Path DatasetPath
|
||||
Path *DatasetPath
|
||||
// If true, the dataset referenced by Path was not in the list of datasets to traverse
|
||||
FilledIn bool
|
||||
}
|
||||
@ -93,7 +93,7 @@ func (t *datasetPathTree) WalkTopDown(parent []string, visitor DatasetPathsVisit
|
||||
this := append(parent, t.Component)
|
||||
|
||||
thisVisit := DatasetPathVisit{
|
||||
DatasetPath{this},
|
||||
&DatasetPath{this},
|
||||
t.FilledIn,
|
||||
}
|
||||
visitChildTree := visitor(thisVisit)
|
||||
|
@ -34,7 +34,7 @@ func makeVisitRecorder() (v DatasetPathsVisitor, rec *visitRecorder) {
|
||||
return
|
||||
}
|
||||
|
||||
func buildForest(paths []DatasetPath) (f *DatasetPathForest) {
|
||||
func buildForest(paths []*DatasetPath) (f *DatasetPathForest) {
|
||||
f = NewDatasetPathForest()
|
||||
for _, p := range paths {
|
||||
f.Add(p)
|
||||
@ -44,7 +44,7 @@ func buildForest(paths []DatasetPath) (f *DatasetPathForest) {
|
||||
|
||||
func TestDatasetPathForestWalkTopDown(t *testing.T) {
|
||||
|
||||
paths := []DatasetPath{
|
||||
paths := []*DatasetPath{
|
||||
toDatasetPath("pool1"),
|
||||
toDatasetPath("pool1/foo/bar"),
|
||||
toDatasetPath("pool1/foo/bar/looloo"),
|
||||
@ -70,7 +70,7 @@ func TestDatasetPathForestWalkTopDown(t *testing.T) {
|
||||
|
||||
func TestDatasetPathWalkTopDownWorksUnordered(t *testing.T) {
|
||||
|
||||
paths := []DatasetPath{
|
||||
paths := []*DatasetPath{
|
||||
toDatasetPath("pool1"),
|
||||
toDatasetPath("pool1/foo/bar/looloo"),
|
||||
toDatasetPath("pool1/foo/bar"),
|
||||
|
@ -208,13 +208,13 @@ func ZFSListFilesystemState() (localState map[string]FilesystemState, err error)
|
||||
// move.
|
||||
//
|
||||
// TODO better solution available?
|
||||
func PlaceholderPropertyValue(p DatasetPath) string {
|
||||
func PlaceholderPropertyValue(p *DatasetPath) string {
|
||||
ps := []byte(p.ToString())
|
||||
sum := sha512.Sum512_256(ps)
|
||||
return hex.EncodeToString(sum[:])
|
||||
}
|
||||
|
||||
func IsPlaceholder(p DatasetPath, placeholderPropertyValue string) (isPlaceholder bool, err error) {
|
||||
func IsPlaceholder(p *DatasetPath, placeholderPropertyValue string) (isPlaceholder bool, err error) {
|
||||
expected := PlaceholderPropertyValue(p)
|
||||
isPlaceholder = expected == placeholderPropertyValue
|
||||
if !isPlaceholder {
|
||||
@ -223,7 +223,7 @@ func IsPlaceholder(p DatasetPath, placeholderPropertyValue string) (isPlaceholde
|
||||
return
|
||||
}
|
||||
|
||||
func ZFSCreatePlaceholderFilesystem(p DatasetPath) (err error) {
|
||||
func ZFSCreatePlaceholderFilesystem(p *DatasetPath) (err error) {
|
||||
v := PlaceholderPropertyValue(p)
|
||||
cmd := exec.Command(ZFS_BINARY, "create",
|
||||
"-o", fmt.Sprintf("%s=%s", ZREPL_PLACEHOLDER_PROPERTY_NAME, v),
|
||||
|
@ -3,10 +3,10 @@ package zfs
|
||||
import "fmt"
|
||||
|
||||
type DatasetFilter interface {
|
||||
Filter(p DatasetPath) (pass bool, err error)
|
||||
Filter(p *DatasetPath) (pass bool, err error)
|
||||
}
|
||||
|
||||
func ZFSListMapping(filter DatasetFilter) (datasets []DatasetPath, err error) {
|
||||
func ZFSListMapping(filter DatasetFilter) (datasets []*DatasetPath, err error) {
|
||||
|
||||
if filter == nil {
|
||||
panic("filter must not be nil")
|
||||
@ -15,11 +15,11 @@ func ZFSListMapping(filter DatasetFilter) (datasets []DatasetPath, err error) {
|
||||
var lines [][]string
|
||||
lines, err = ZFSList([]string{"name"}, "-r", "-t", "filesystem,volume")
|
||||
|
||||
datasets = make([]DatasetPath, 0, len(lines))
|
||||
datasets = make([]*DatasetPath, 0, len(lines))
|
||||
|
||||
for _, line := range lines {
|
||||
|
||||
var path DatasetPath
|
||||
var path *DatasetPath
|
||||
if path, err = NewDatasetPath(line[0]); err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ type FilesystemVersion struct {
|
||||
Creation time.Time
|
||||
}
|
||||
|
||||
func (v FilesystemVersion) ToAbsPath(p DatasetPath) string {
|
||||
func (v FilesystemVersion) ToAbsPath(p *DatasetPath) string {
|
||||
var b bytes.Buffer
|
||||
b.WriteString(p.ToString())
|
||||
b.WriteString(v.Type.DelimiterChar())
|
||||
@ -57,7 +57,7 @@ type FilesystemVersionFilter interface {
|
||||
Filter(fsv FilesystemVersion) (accept bool, err error)
|
||||
}
|
||||
|
||||
func ZFSListFilesystemVersions(fs DatasetPath, filter FilesystemVersionFilter) (res []FilesystemVersion, err error) {
|
||||
func ZFSListFilesystemVersions(fs *DatasetPath, filter FilesystemVersionFilter) (res []FilesystemVersion, err error) {
|
||||
var fieldLines [][]string
|
||||
fieldLines, err = ZFSList(
|
||||
[]string{"name", "guid", "createtxg", "creation"},
|
||||
@ -125,7 +125,7 @@ func ZFSListFilesystemVersions(fs DatasetPath, filter FilesystemVersionFilter) (
|
||||
return
|
||||
}
|
||||
|
||||
func ZFSDestroyFilesystemVersion(filesystem DatasetPath, version FilesystemVersion) (err error) {
|
||||
func ZFSDestroyFilesystemVersion(filesystem *DatasetPath, version FilesystemVersion) (err error) {
|
||||
|
||||
datasetPath := version.ToAbsPath(filesystem)
|
||||
|
||||
|
43
zfs/zfs.go
43
zfs/zfs.go
@ -3,31 +3,33 @@ package zfs
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/zrepl/zrepl/util"
|
||||
"io"
|
||||
"os/exec"
|
||||
"strings"
|
||||
|
||||
"github.com/zrepl/zrepl/util"
|
||||
)
|
||||
|
||||
type DatasetPath struct {
|
||||
comps []string
|
||||
}
|
||||
|
||||
func (p DatasetPath) ToString() string {
|
||||
func (p *DatasetPath) ToString() string {
|
||||
return strings.Join(p.comps, "/")
|
||||
}
|
||||
|
||||
func (p DatasetPath) Empty() bool {
|
||||
func (p *DatasetPath) Empty() bool {
|
||||
return len(p.comps) == 0
|
||||
}
|
||||
|
||||
func (p *DatasetPath) Extend(extend DatasetPath) {
|
||||
func (p *DatasetPath) Extend(extend *DatasetPath) {
|
||||
p.comps = append(p.comps, extend.comps...)
|
||||
}
|
||||
|
||||
func (p DatasetPath) HasPrefix(prefix DatasetPath) bool {
|
||||
func (p *DatasetPath) HasPrefix(prefix *DatasetPath) bool {
|
||||
if len(prefix.comps) > len(p.comps) {
|
||||
return false
|
||||
}
|
||||
@ -39,7 +41,7 @@ func (p DatasetPath) HasPrefix(prefix DatasetPath) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *DatasetPath) TrimPrefix(prefix DatasetPath) {
|
||||
func (p *DatasetPath) TrimPrefix(prefix *DatasetPath) {
|
||||
if !p.HasPrefix(prefix) {
|
||||
return
|
||||
}
|
||||
@ -64,7 +66,7 @@ func (p *DatasetPath) TrimNPrefixComps(n int) {
|
||||
|
||||
}
|
||||
|
||||
func (p DatasetPath) Equal(q DatasetPath) bool {
|
||||
func (p DatasetPath) Equal(q *DatasetPath) bool {
|
||||
if len(p.comps) != len(q.comps) {
|
||||
return false
|
||||
}
|
||||
@ -76,17 +78,28 @@ func (p DatasetPath) Equal(q DatasetPath) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (p DatasetPath) Length() int {
|
||||
func (p *DatasetPath) Length() int {
|
||||
return len(p.comps)
|
||||
}
|
||||
|
||||
func (p DatasetPath) Copy() (c DatasetPath) {
|
||||
func (p *DatasetPath) Copy() (c *DatasetPath) {
|
||||
c = &DatasetPath{}
|
||||
c.comps = make([]string, len(p.comps))
|
||||
copy(c.comps, p.comps)
|
||||
return
|
||||
}
|
||||
|
||||
func NewDatasetPath(s string) (p DatasetPath, err error) {
|
||||
func (p *DatasetPath) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(p.comps)
|
||||
}
|
||||
|
||||
func (p *DatasetPath) UnmarshalJSON(b []byte) error {
|
||||
p.comps = make([]string, 0)
|
||||
return json.Unmarshal(b, &p.comps)
|
||||
}
|
||||
|
||||
func NewDatasetPath(s string) (p *DatasetPath, err error) {
|
||||
p = &DatasetPath{}
|
||||
if s == "" {
|
||||
p.comps = make([]string, 0)
|
||||
return p, nil // the empty dataset path
|
||||
@ -104,7 +117,7 @@ func NewDatasetPath(s string) (p DatasetPath, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func toDatasetPath(s string) DatasetPath {
|
||||
func toDatasetPath(s string) *DatasetPath {
|
||||
p, err := NewDatasetPath(s)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@ -172,7 +185,7 @@ func ZFSList(properties []string, zfsArgs ...string) (res [][]string, err error)
|
||||
return
|
||||
}
|
||||
|
||||
func ZFSSend(fs DatasetPath, from, to *FilesystemVersion) (stream io.Reader, err error) {
|
||||
func ZFSSend(fs *DatasetPath, from, to *FilesystemVersion) (stream io.Reader, err error) {
|
||||
|
||||
args := make([]string, 0)
|
||||
args = append(args, "send")
|
||||
@ -188,7 +201,7 @@ func ZFSSend(fs DatasetPath, from, to *FilesystemVersion) (stream io.Reader, err
|
||||
return
|
||||
}
|
||||
|
||||
func ZFSRecv(fs DatasetPath, stream io.Reader, additionalArgs ...string) (err error) {
|
||||
func ZFSRecv(fs *DatasetPath, stream io.Reader, additionalArgs ...string) (err error) {
|
||||
|
||||
args := make([]string, 0)
|
||||
args = append(args, "recv")
|
||||
@ -226,7 +239,7 @@ func ZFSRecv(fs DatasetPath, stream io.Reader, additionalArgs ...string) (err er
|
||||
return nil
|
||||
}
|
||||
|
||||
func ZFSSet(fs DatasetPath, prop, val string) (err error) {
|
||||
func ZFSSet(fs *DatasetPath, prop, val string) (err error) {
|
||||
|
||||
if strings.ContainsRune(prop, '=') {
|
||||
panic("prop contains rune '=' which is the delimiter between property name and value")
|
||||
@ -273,7 +286,7 @@ func ZFSDestroy(dataset string) (err error) {
|
||||
|
||||
}
|
||||
|
||||
func ZFSSnapshot(fs DatasetPath, name string, recursive bool) (err error) {
|
||||
func ZFSSnapshot(fs *DatasetPath, name string, recursive bool) (err error) {
|
||||
|
||||
snapname := fmt.Sprintf("%s@%s", fs.ToString(), name)
|
||||
cmd := exec.Command(ZFS_BINARY, "snapshot", snapname)
|
||||
|
Loading…
Reference in New Issue
Block a user