Initial working version

Summary:
* Logging is still bad
* test output in a lot of placed
* FIXMEs every where

Test Plan: None, just review

Differential Revision: https://phabricator.cschwarz.com/D2
This commit is contained in:
Christian Schwarz 2018-06-20 20:20:37 +02:00
parent fa6426f803
commit 8cca0a8547
31 changed files with 1536 additions and 1586 deletions

View File

@ -29,6 +29,9 @@ generate: #not part of the build, must do that manually
@for pkg in $(_TESTPKGS); do\ @for pkg in $(_TESTPKGS); do\
go generate "$$pkg" || exit 1; \ go generate "$$pkg" || exit 1; \
done; done;
protoc -I=cmd/replication --go_out=cmd/replication cmd/replication/pdu.proto
# FIXME fix docker build!
build: build:
@echo "INFO: In case of missing dependencies, run 'make vendordeps'" @echo "INFO: In case of missing dependencies, run 'make vendordeps'"

View File

@ -29,8 +29,8 @@ func parseSnapshotPrefix(i string) (p string, err error) {
return return
} }
func (f *PrefixFilter) Filter(fsv zfs.FilesystemVersion) (accept bool, err error) { func (f *PrefixFilter) Filter(t zfs.VersionType, name string) (accept bool, err error) {
fstypeMatches := (!f.fstypeSet || fsv.Type == f.fstype) fstypeMatches := (!f.fstypeSet || t == f.fstype)
prefixMatches := strings.HasPrefix(fsv.Name, f.prefix) prefixMatches := strings.HasPrefix(name, f.prefix)
return fstypeMatches && prefixMatches, nil return fstypeMatches && prefixMatches, nil
} }

View File

@ -6,9 +6,9 @@ import (
"context" "context"
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/zrepl/zrepl/rpc"
"github.com/zrepl/zrepl/zfs" "github.com/zrepl/zrepl/zfs"
"sync" "sync"
"github.com/zrepl/zrepl/cmd/replication"
) )
type LocalJob struct { type LocalJob struct {
@ -96,15 +96,19 @@ func (j *LocalJob) JobStart(ctx context.Context) {
j.pruneRHSTask = NewTask("prune_rhs", j, rootLog) j.pruneRHSTask = NewTask("prune_rhs", j, rootLog)
j.pruneLHSTask = NewTask("prune_lhs", j, rootLog) j.pruneLHSTask = NewTask("prune_lhs", j, rootLog)
local := rpc.NewLocalRPC()
// Allow access to any dataset since we control what mapping // Allow access to any dataset since we control what mapping
// is passed to the pull routine. // is passed to the pull routine.
// All local datasets will be passed to its Map() function, // All local datasets will be passed to its Map() function,
// but only those for which a mapping exists will actually be pulled. // but only those for which a mapping exists will actually be pulled.
// We can pay this small performance penalty for now. // We can pay this small performance penalty for now.
handler := NewHandler(j.handlerTask.Log(), localPullACL{}, NewPrefixFilter(j.SnapshotPrefix)) wildcardMapFilter := NewDatasetMapFilter(1, false)
wildcardMapFilter.Add("<", "<")
sender := &SenderEndpoint{wildcardMapFilter, NewPrefixFilter(j.SnapshotPrefix)}
registerEndpoints(local, handler) receiver, err := NewReceiverEndpoint(j.Mapping, NewPrefixFilter(j.SnapshotPrefix))
if err != nil {
rootLog.WithError(err).Error("unexpected error setting up local handler")
}
snapper := IntervalAutosnap{ snapper := IntervalAutosnap{
task: j.snapperTask, task: j.snapperTask,
@ -141,8 +145,14 @@ outer:
j.mainTask.Log().Debug("replicating from lhs to rhs") j.mainTask.Log().Debug("replicating from lhs to rhs")
j.mainTask.Enter("replicate") j.mainTask.Enter("replicate")
puller := Puller{j.mainTask, local, j.Mapping, j.InitialReplPolicy}
puller.Pull()
replication.Replicate(
ctx,
replication.NewEndpointPairPull(sender, receiver),
replication.NewIncrementalPathReplicator(),
)
j.mainTask.Finish() j.mainTask.Finish()
// use a ctx as soon as Pull gains ctx support // use a ctx as soon as Pull gains ctx support

View File

@ -7,8 +7,9 @@ import (
"fmt" "fmt"
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/zrepl/zrepl/rpc"
"github.com/zrepl/zrepl/util" "github.com/zrepl/zrepl/util"
"github.com/zrepl/zrepl/cmd/replication"
"github.com/problame/go-streamrpc"
) )
type PullJob struct { type PullJob struct {
@ -116,6 +117,13 @@ func (j *PullJob) JobStart(ctx context.Context) {
} }
} }
var STREAMRPC_CONFIG = &streamrpc.ConnConfig{ // FIXME oversight and configurability
RxHeaderMaxLen: 4096,
RxStructuredMaxLen: 4096 * 4096,
RxStreamMaxChunkSize: 4096 * 4096,
TxChunkSize: 4096 * 4096,
}
func (j *PullJob) doRun(ctx context.Context) { func (j *PullJob) doRun(ctx context.Context) {
j.task.Enter("run") j.task.Enter("run")
@ -133,14 +141,28 @@ func (j *PullJob) doRun(ctx context.Context) {
return return
} }
client := rpc.NewClient(rwc)
client := RemoteEndpoint{streamrpc.NewClientOnConn(rwc, STREAMRPC_CONFIG)}
if j.Debug.RPC.Log { if j.Debug.RPC.Log {
client.SetLogger(j.task.Log(), true) // FIXME implement support
// client.SetLogger(j.task.Log(), true)
} }
j.task.Enter("pull") j.task.Enter("pull")
puller := Puller{j.task, client, j.Mapping, j.InitialReplPolicy}
puller.Pull() puller, err := NewReceiverEndpoint(
j.Mapping,
NewPrefixFilter(j.SnapshotPrefix),
)
if err != nil {
j.task.Log().WithError(err).Error("error creating receiver endpoint")
j.task.Finish()
return
}
replicator := replication.NewIncrementalPathReplicator()
replication.Replicate(context.WithValue(ctx, replication.ContextKeyLog, j.task.Log()), replication.NewEndpointPairPull(client, puller), replicator)
closeRPCWithTimeout(j.task, client, time.Second*1, "") closeRPCWithTimeout(j.task, client, time.Second*1, "")
rwc.Close() rwc.Close()
j.task.Finish() j.task.Finish()
@ -172,7 +194,7 @@ func (j *PullJob) Pruner(task *Task, side PrunePolicySide, dryRun bool) (p Prune
return return
} }
func closeRPCWithTimeout(task *Task, remote rpc.RPCClient, timeout time.Duration, goodbye string) { func closeRPCWithTimeout(task *Task, remote RemoteEndpoint, timeout time.Duration, goodbye string) {
task.Log().Info("closing rpc connection") task.Log().Info("closing rpc connection")

View File

@ -5,10 +5,10 @@ import (
"io" "io"
"time" "time"
mapstructure "github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/zrepl/zrepl/rpc"
"github.com/zrepl/zrepl/util" "github.com/zrepl/zrepl/util"
"github.com/problame/go-streamrpc"
) )
type SourceJob struct { type SourceJob struct {
@ -206,17 +206,16 @@ func (j *SourceJob) handleConnection(rwc io.ReadWriteCloser, task *Task) {
panic(err) panic(err)
} }
// construct connection handler senderEP := NewSenderEndpoint(j.Filesystems, NewPrefixFilter(j.SnapshotPrefix))
handler := NewHandler(task.Log(), j.Filesystems, NewPrefixFilter(j.SnapshotPrefix))
// handle connection handler := HandlerAdaptor{senderEP}
rpcServer := rpc.NewServer(rwc) // FIXME logging support or erase config
if j.Debug.RPC.Log { //if j.Debug.RPC.Log {
rpclog := task.Log().WithField("subsystem", "rpc") // rpclog := task.Log().WithField("subsystem", "rpc")
rpcServer.SetLogger(rpclog, true) // rpcServer.SetLogger(rpclog, true)
} //}
registerEndpoints(rpcServer, handler)
if err = rpcServer.Serve(); err != nil { if err := streamrpc.ServeConn(rwc, STREAMRPC_CONFIG, handler.Handle); err != nil {
task.Log().WithError(err).Error("error serving connection") task.Log().WithError(err).Error("error serving connection")
} }

View File

@ -101,6 +101,7 @@ func (m DatasetMapFilter) mostSpecificPrefixMapping(path *zfs.DatasetPath) (idx
return return
} }
// Returns target == nil if there is no mapping
func (m DatasetMapFilter) Map(source *zfs.DatasetPath) (target *zfs.DatasetPath, err error) { func (m DatasetMapFilter) Map(source *zfs.DatasetPath) (target *zfs.DatasetPath, err error) {
if m.filterMode { if m.filterMode {
@ -114,9 +115,17 @@ func (m DatasetMapFilter) Map(source *zfs.DatasetPath) (target *zfs.DatasetPath,
} }
me := m.entries[mi] me := m.entries[mi]
if strings.HasPrefix("!", me.mapping) { if me.mapping == "" {
// reject mapping // Special case treatment: 'foo/bar<' => ''
return nil, nil if !me.subtreeMatch {
return nil, fmt.Errorf("mapping to '' must be a subtree match")
}
// ok...
} else {
if strings.HasPrefix("!", me.mapping) {
// reject mapping
return nil, nil
}
} }
target, err = zfs.NewDatasetPath(me.mapping) target, err = zfs.NewDatasetPath(me.mapping)
@ -177,6 +186,38 @@ func (m DatasetMapFilter) InvertedFilter() (inv *DatasetMapFilter, err error) {
return inv, nil return inv, nil
} }
// FIXME investigate whether we can support more...
func (m DatasetMapFilter) Invert() (inv *DatasetMapFilter, err error) {
if m.filterMode {
err = errors.Errorf("can only invert mappings")
return
}
if len(m.entries) != 1 {
return nil, errors.Errorf("inversion of complicated mappings is not implemented") // FIXME
}
e := m.entries[0]
inv = &DatasetMapFilter{
make([]datasetMapFilterEntry, len(m.entries)),
false,
}
mp, err := zfs.NewDatasetPath(e.mapping)
if err != nil {
return nil, err
}
inv.entries[0] = datasetMapFilterEntry{
path: mp,
mapping: e.path.ToString(),
subtreeMatch: e.subtreeMatch,
}
return inv, nil
}
// Creates a new DatasetMapFilter in filter mode from a mapping // Creates a new DatasetMapFilter in filter mode from a mapping
// All accepting mapping results are mapped to accepting filter results // All accepting mapping results are mapped to accepting filter results
// All rejecting mapping results are mapped to rejecting filter results // All rejecting mapping results are mapped to rejecting filter results

View File

@ -135,6 +135,25 @@ func TestDatasetMapFilter(t *testing.T) {
expectMapping(map1, "b", "") expectMapping(map1, "b", "")
expectMapping(map1, "q/r", "root4/1/2/r") expectMapping(map1, "q/r", "root4/1/2/r")
map2 := map[string]string{ // identity mapping
"<":"",
}
expectMapping(map2, "foo/bar", "foo/bar")
map3 := map[string]string{ // subtree to local mapping, need that for Invert()
"foo/bar<": "",
}
{
m, _ := parseDatasetMapFilter(map3, false)
p, _ := zfs.NewDatasetPath("foo/bar")
tp, err := m.Map(p)
assert.Nil(t, err)
assert.True(t, tp.Empty())
expectMapping(map3, "foo/bar/x", "x")
expectMapping(map3, "x", "")
}
filter1 := map[string]string{ filter1 := map[string]string{
"<": "!", "<": "!",
"a<": "ok", "a<": "ok",
@ -220,3 +239,43 @@ func TestDatasetMapFilter_InvertedFilter(t *testing.T) {
expectMapping(inv, "1/2/a/b", true) expectMapping(inv, "1/2/a/b", true)
} }
func TestDatasetMapFilter_Invert(t *testing.T) {
mapspec := map[string]string{
"<": "foo/bar",
}
m, err := parseDatasetMapFilter(mapspec, false)
assert.NoError(t, err)
inv, err := m.Invert()
assert.NoError(t, err)
expectMapping := func(m *DatasetMapFilter, input, expect string, expErr bool, expEmpty bool) {
p, err := zfs.NewDatasetPath(input)
assert.Nil(t, err)
r, err := m.Map(p)
if expErr {
assert.Nil(t, r)
assert.Error(t, err)
return
}
if expEmpty {
assert.Nil(t, err)
assert.True(t, r.Empty())
} else if expect == "" {
assert.Nil(t, r)
assert.Nil(t, err)
} else {
assert.Nil(t, err)
assert.NotNil(t, r)
assert.Equal(t, expect, r.ToString())
}
}
expectMapping(inv, "x", "", false, false)
expectMapping(inv, "foo/bar", "", false, true)
expectMapping(inv, "foo/bar/bee", "bee", false, false)
}

View File

@ -1,181 +0,0 @@
package cmd
import (
"fmt"
"io"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/rpc"
"github.com/zrepl/zrepl/zfs"
)
type DatasetMapping interface {
Map(source *zfs.DatasetPath) (target *zfs.DatasetPath, err error)
}
type FilesystemRequest struct {
Roots []string // may be nil, indicating interest in all filesystems
}
type FilesystemVersionsRequest struct {
Filesystem *zfs.DatasetPath
}
type InitialTransferRequest struct {
Filesystem *zfs.DatasetPath
FilesystemVersion zfs.FilesystemVersion
}
type IncrementalTransferRequest struct {
Filesystem *zfs.DatasetPath
From zfs.FilesystemVersion
To zfs.FilesystemVersion
}
type Handler struct {
logger Logger
dsf zfs.DatasetFilter
fsvf zfs.FilesystemVersionFilter
}
func NewHandler(logger Logger, dsfilter zfs.DatasetFilter, snapfilter zfs.FilesystemVersionFilter) (h Handler) {
return Handler{logger, dsfilter, snapfilter}
}
func registerEndpoints(server rpc.RPCServer, handler Handler) (err error) {
err = server.RegisterEndpoint("FilesystemRequest", handler.HandleFilesystemRequest)
if err != nil {
panic(err)
}
err = server.RegisterEndpoint("FilesystemVersionsRequest", handler.HandleFilesystemVersionsRequest)
if err != nil {
panic(err)
}
err = server.RegisterEndpoint("InitialTransferRequest", handler.HandleInitialTransferRequest)
if err != nil {
panic(err)
}
err = server.RegisterEndpoint("IncrementalTransferRequest", handler.HandleIncrementalTransferRequest)
if err != nil {
panic(err)
}
return nil
}
func (h Handler) HandleFilesystemRequest(r *FilesystemRequest, roots *[]*zfs.DatasetPath) (err error) {
log := h.logger.WithField("endpoint", "FilesystemRequest")
log.WithField("request", r).Debug("request")
log.WithField("dataset_filter", h.dsf).Debug("dsf")
allowed, err := zfs.ZFSListMapping(h.dsf)
if err != nil {
log.WithError(err).Error("error listing filesystems")
return
}
log.WithField("response", allowed).Debug("response")
*roots = allowed
return
}
func (h Handler) HandleFilesystemVersionsRequest(r *FilesystemVersionsRequest, versions *[]zfs.FilesystemVersion) (err error) {
log := h.logger.WithField("endpoint", "FilesystemVersionsRequest")
log.WithField("request", r).Debug("request")
// allowed to request that?
if h.pullACLCheck(r.Filesystem, nil); err != nil {
log.WithError(err).Warn("pull ACL check failed")
return
}
// find our versions
vs, err := zfs.ZFSListFilesystemVersions(r.Filesystem, h.fsvf)
if err != nil {
log.WithError(err).Error("cannot list filesystem versions")
return
}
log.WithField("response", vs).Debug("response")
*versions = vs
return
}
func (h Handler) HandleInitialTransferRequest(r *InitialTransferRequest, stream *io.Reader) (err error) {
log := h.logger.WithField("endpoint", "InitialTransferRequest")
log.WithField("request", r).Debug("request")
if err = h.pullACLCheck(r.Filesystem, &r.FilesystemVersion); err != nil {
log.WithError(err).Warn("pull ACL check failed")
return
}
log.Debug("invoking zfs send")
s, err := zfs.ZFSSend(r.Filesystem, &r.FilesystemVersion, nil)
if err != nil {
log.WithError(err).Error("cannot send filesystem")
}
*stream = s
return
}
func (h Handler) HandleIncrementalTransferRequest(r *IncrementalTransferRequest, stream *io.Reader) (err error) {
log := h.logger.WithField("endpoint", "IncrementalTransferRequest")
log.WithField("request", r).Debug("request")
if err = h.pullACLCheck(r.Filesystem, &r.From); err != nil {
log.WithError(err).Warn("pull ACL check failed")
return
}
if err = h.pullACLCheck(r.Filesystem, &r.To); err != nil {
log.WithError(err).Warn("pull ACL check failed")
return
}
log.Debug("invoking zfs send")
s, err := zfs.ZFSSend(r.Filesystem, &r.From, &r.To)
if err != nil {
log.WithError(err).Error("cannot send filesystem")
}
*stream = s
return
}
func (h Handler) pullACLCheck(p *zfs.DatasetPath, v *zfs.FilesystemVersion) (err error) {
var fsAllowed, vAllowed bool
fsAllowed, err = h.dsf.Filter(p)
if err != nil {
err = fmt.Errorf("error evaluating ACL: %s", err)
return
}
if !fsAllowed {
err = fmt.Errorf("ACL prohibits access to %s", p.ToString())
return
}
if v == nil {
return
}
vAllowed, err = h.fsvf.Filter(*v)
if err != nil {
err = errors.Wrap(err, "error evaluating version filter")
return
}
if !vAllowed {
err = fmt.Errorf("ACL prohibits access to %s", v.ToAbsPath(p))
return
}
return
}

View File

@ -2,22 +2,16 @@ package cmd
import ( import (
"fmt" "fmt"
"io" "github.com/zrepl/zrepl/cmd/replication"
"github.com/problame/go-streamrpc"
"bytes"
"encoding/json"
"github.com/zrepl/zrepl/rpc"
"github.com/zrepl/zrepl/zfs" "github.com/zrepl/zrepl/zfs"
"io"
"github.com/pkg/errors"
"github.com/golang/protobuf/proto"
"bytes"
"os"
) )
type localPullACL struct{}
func (a localPullACL) Filter(p *zfs.DatasetPath) (pass bool, err error) {
return true, nil
}
const DEFAULT_INITIAL_REPL_POLICY = InitialReplPolicyMostRecent
type InitialReplPolicy string type InitialReplPolicy string
const ( const (
@ -25,299 +19,383 @@ const (
InitialReplPolicyAll InitialReplPolicy = "all" InitialReplPolicyAll InitialReplPolicy = "all"
) )
type Puller struct { const DEFAULT_INITIAL_REPL_POLICY = InitialReplPolicyMostRecent
task *Task
Remote rpc.RPCClient // SenderEndpoint implements replication.ReplicationEndpoint for a sending side
Mapping DatasetMapping type SenderEndpoint struct {
InitialReplPolicy InitialReplPolicy FSFilter zfs.DatasetFilter
FilesystemVersionFilter zfs.FilesystemVersionFilter
} }
type remoteLocalMapping struct { func NewSenderEndpoint(fsf zfs.DatasetFilter, fsvf zfs.FilesystemVersionFilter) *SenderEndpoint {
Remote *zfs.DatasetPath return &SenderEndpoint{fsf, fsvf}
Local *zfs.DatasetPath
} }
func (p *Puller) getRemoteFilesystems() (rfs []*zfs.DatasetPath, ok bool) { func (p *SenderEndpoint) ListFilesystems() ([]*replication.Filesystem, error) {
p.task.Enter("fetch_remote_fs_list") fss, err := zfs.ZFSListMapping(p.FSFilter)
defer p.task.Finish()
fsr := FilesystemRequest{}
if err := p.Remote.Call("FilesystemRequest", &fsr, &rfs); err != nil {
p.task.Log().WithError(err).Error("cannot fetch remote filesystem list")
return nil, false
}
return rfs, true
}
func (p *Puller) buildReplMapping(remoteFilesystems []*zfs.DatasetPath) (replMapping map[string]remoteLocalMapping, ok bool) {
p.task.Enter("build_repl_mapping")
defer p.task.Finish()
replMapping = make(map[string]remoteLocalMapping, len(remoteFilesystems))
for fs := range remoteFilesystems {
var err error
var localFs *zfs.DatasetPath
localFs, err = p.Mapping.Map(remoteFilesystems[fs])
if err != nil {
err := fmt.Errorf("error mapping %s: %s", remoteFilesystems[fs], err)
p.task.Log().WithError(err).WithField(logMapFromField, remoteFilesystems[fs]).Error("cannot map")
return nil, false
}
if localFs == nil {
continue
}
p.task.Log().WithField(logMapFromField, remoteFilesystems[fs].ToString()).
WithField(logMapToField, localFs.ToString()).Debug("mapping")
m := remoteLocalMapping{remoteFilesystems[fs], localFs}
replMapping[m.Local.ToString()] = m
}
return replMapping, true
}
// returns true if the receiving filesystem (local side) exists and can have child filesystems
func (p *Puller) replFilesystem(m remoteLocalMapping, localFilesystemState map[string]zfs.FilesystemState) (localExists bool) {
p.task.Enter("repl_fs")
defer p.task.Finish()
var err error
remote := p.Remote
log := p.task.Log().
WithField(logMapFromField, m.Remote.ToString()).
WithField(logMapToField, m.Local.ToString())
log.Debug("examining local filesystem state")
localState, localExists := localFilesystemState[m.Local.ToString()]
var versions []zfs.FilesystemVersion
switch {
case !localExists:
log.Info("local filesystem does not exist")
case localState.Placeholder:
log.Info("local filesystem is marked as placeholder")
default:
log.Debug("local filesystem exists")
log.Debug("requesting local filesystem versions")
if versions, err = zfs.ZFSListFilesystemVersions(m.Local, nil); err != nil {
log.WithError(err).Error("cannot get local filesystem versions")
return false
}
}
log.Info("requesting remote filesystem versions")
r := FilesystemVersionsRequest{
Filesystem: m.Remote,
}
var theirVersions []zfs.FilesystemVersion
if err = remote.Call("FilesystemVersionsRequest", &r, &theirVersions); err != nil {
log.WithError(err).Error("cannot get remote filesystem versions")
log.Warn("stopping replication for all filesystems mapped as children of receiving filesystem")
return false
}
log.Debug("computing diff between remote and local filesystem versions")
diff := zfs.MakeFilesystemDiff(versions, theirVersions)
log.WithField("diff", diff).Debug("diff between local and remote filesystem")
if localState.Placeholder && diff.Conflict != zfs.ConflictAllRight {
panic("internal inconsistency: local placeholder implies ConflictAllRight")
}
switch diff.Conflict {
case zfs.ConflictAllRight:
log.WithField("replication_policy", p.InitialReplPolicy).Info("performing initial sync, following policy")
if p.InitialReplPolicy != InitialReplPolicyMostRecent {
panic(fmt.Sprintf("policy '%s' not implemented", p.InitialReplPolicy))
}
snapsOnly := make([]zfs.FilesystemVersion, 0, len(diff.MRCAPathRight))
for s := range diff.MRCAPathRight {
if diff.MRCAPathRight[s].Type == zfs.Snapshot {
snapsOnly = append(snapsOnly, diff.MRCAPathRight[s])
}
}
if len(snapsOnly) < 1 {
log.Warn("cannot perform initial sync: no remote snapshots")
return false
}
r := InitialTransferRequest{
Filesystem: m.Remote,
FilesystemVersion: snapsOnly[len(snapsOnly)-1],
}
log.WithField("version", r.FilesystemVersion).Debug("requesting snapshot stream")
var stream io.Reader
if err = remote.Call("InitialTransferRequest", &r, &stream); err != nil {
log.WithError(err).Error("cannot request initial transfer")
return false
}
log.Debug("received initial transfer request response")
log.Debug("invoke zfs receive")
recvArgs := []string{"-u"}
if localState.Placeholder {
log.Info("receive with forced rollback to replace placeholder filesystem")
recvArgs = append(recvArgs, "-F")
}
progressStream := p.task.ProgressUpdater(stream)
if err = zfs.ZFSRecv(m.Local, progressStream, recvArgs...); err != nil {
log.WithError(err).Error("cannot receive stream")
return false
}
log.Info("finished receiving stream") // TODO rx delta
// TODO unify with recv path of ConflictIncremental
log.Debug("configuring properties of received filesystem")
props := zfs.NewZFSProperties()
props.Set("readonly", "on")
if err = zfs.ZFSSet(m.Local, props); err != nil {
log.WithError(err).Error("cannot set readonly property")
}
log.Info("finished initial transfer")
return true
case zfs.ConflictIncremental:
if len(diff.IncrementalPath) < 2 {
log.Info("remote and local are in sync")
return true
}
log.Info("following incremental path from diff")
for i := 0; i < len(diff.IncrementalPath)-1; i++ {
from, to := diff.IncrementalPath[i], diff.IncrementalPath[i+1]
log, _ := log.WithField(logIncFromField, from.Name).WithField(logIncToField, to.Name), 0
log.Debug("requesting incremental snapshot stream")
r := IncrementalTransferRequest{
Filesystem: m.Remote,
From: from,
To: to,
}
var stream io.Reader
if err = remote.Call("IncrementalTransferRequest", &r, &stream); err != nil {
log.WithError(err).Error("cannot request incremental snapshot stream")
return false
}
log.Debug("invoking zfs receive")
progressStream := p.task.ProgressUpdater(stream)
// TODO protect against malicious incremental stream
if err = zfs.ZFSRecv(m.Local, progressStream); err != nil {
log.WithError(err).Error("cannot receive stream")
return false
}
log.Info("finished incremental transfer") // TODO increment rx
}
log.Info("finished following incremental path") // TODO path rx
return true
case zfs.ConflictNoCommonAncestor:
fallthrough
case zfs.ConflictDiverged:
var jsonDiff bytes.Buffer
if err := json.NewEncoder(&jsonDiff).Encode(diff); err != nil {
log.WithError(err).Error("cannot JSON-encode diff")
return false
}
var problem, resolution string
switch diff.Conflict {
case zfs.ConflictNoCommonAncestor:
problem = "remote and local filesystem have snapshots, but no common one"
resolution = "perform manual establish a common snapshot history"
case zfs.ConflictDiverged:
problem = "remote and local filesystem share a history but have diverged"
resolution = "perform manual replication or delete snapshots on the receiving" +
"side to establish an incremental replication parse"
}
log.WithField("diff", jsonDiff.String()).
WithField("problem", problem).
WithField("resolution", resolution).
Error("manual conflict resolution required")
return false
}
panic("should not be reached")
}
func (p *Puller) Pull() {
p.task.Enter("run")
defer p.task.Finish()
p.task.Log().Info("request remote filesystem list")
remoteFilesystems, ok := p.getRemoteFilesystems()
if !ok {
return
}
p.task.Log().Debug("map remote filesystems to local paths and determine order for per-filesystem sync")
replMapping, ok := p.buildReplMapping(remoteFilesystems)
if !ok {
}
p.task.Log().Debug("build cache for already present local filesystem state")
p.task.Enter("cache_local_fs_state")
localFilesystemState, err := zfs.ZFSListFilesystemState()
p.task.Finish()
if err != nil { if err != nil {
p.task.Log().WithError(err).Error("cannot request local filesystem state") return nil, err
return }
rfss := make([]*replication.Filesystem, len(fss))
for i := range fss {
rfss[i] = &replication.Filesystem{
Path: fss[i].ToString(),
// FIXME: not supporting ResumeToken yet
}
}
return rfss, nil
}
func (p *SenderEndpoint) ListFilesystemVersions(fs string) ([]*replication.FilesystemVersion, error) {
dp, err := zfs.NewDatasetPath(fs)
if err != nil {
return nil, err
}
pass, err := p.FSFilter.Filter(dp)
if err != nil {
return nil, err
}
if !pass {
return nil, replication.NewFilteredError(fs)
}
fsvs, err := zfs.ZFSListFilesystemVersions(dp, p.FilesystemVersionFilter)
if err != nil {
return nil, err
}
rfsvs := make([]*replication.FilesystemVersion, len(fsvs))
for i := range fsvs {
rfsvs[i] = replication.FilesystemVersionFromZFS(fsvs[i])
}
return rfsvs, nil
}
func (p *SenderEndpoint) Send(r *replication.SendReq) (*replication.SendRes, io.Reader, error) {
os.Stderr.WriteString("sending " + r.String() + "\n")
dp, err := zfs.NewDatasetPath(r.Filesystem)
if err != nil {
return nil, nil, err
}
pass, err := p.FSFilter.Filter(dp)
if err != nil {
return nil, nil, err
}
if !pass {
return nil, nil, replication.NewFilteredError(r.Filesystem)
}
stream, err := zfs.ZFSSend(r.Filesystem, r.From, r.To)
if err != nil {
return nil, nil, err
}
return &replication.SendRes{}, stream, nil
}
func (p *SenderEndpoint) Receive(r *replication.ReceiveReq, sendStream io.Reader) (error) {
return fmt.Errorf("sender endpoint does not receive")
}
// ReceiverEndpoint implements replication.ReplicationEndpoint for a receiving side
type ReceiverEndpoint struct {
fsmapInv *DatasetMapFilter
fsmap *DatasetMapFilter
fsvf zfs.FilesystemVersionFilter
}
func NewReceiverEndpoint(fsmap *DatasetMapFilter, fsvf zfs.FilesystemVersionFilter) (*ReceiverEndpoint, error) {
fsmapInv, err := fsmap.Invert()
if err != nil {
return nil, err
}
return &ReceiverEndpoint{fsmapInv, fsmap, fsvf}, nil
}
func (e *ReceiverEndpoint) ListFilesystems() ([]*replication.Filesystem, error) {
filtered, err := zfs.ZFSListMapping(e.fsmapInv.AsFilter())
if err != nil {
return nil, errors.Wrap(err, "error checking client permission")
}
fss := make([]*replication.Filesystem, len(filtered))
for i, a := range filtered {
mapped, err := e.fsmapInv.Map(a)
if err != nil {
return nil, err
}
fss[i] = &replication.Filesystem{Path: mapped.ToString()}
}
return fss, nil
}
func (e *ReceiverEndpoint) ListFilesystemVersions(fs string) ([]*replication.FilesystemVersion, error) {
p, err := zfs.NewDatasetPath(fs)
if err != nil {
return nil, err
}
lp, err := e.fsmap.Map(p)
if err != nil {
return nil, err
}
if lp == nil {
return nil, errors.New("access to filesystem denied")
} }
localTraversal := zfs.NewDatasetPathForest() fsvs, err := zfs.ZFSListFilesystemVersions(lp, e.fsvf)
for _, m := range replMapping { if err != nil {
localTraversal.Add(m.Local) return nil, err
} }
p.task.Log().Info("start per-filesystem sync") rfsvs := make([]*replication.FilesystemVersion, len(fsvs))
localTraversal.WalkTopDown(func(v zfs.DatasetPathVisit) bool { for i := range fsvs {
rfsvs[i] = replication.FilesystemVersionFromZFS(fsvs[i])
}
p.task.Enter("tree_walk") return rfsvs, nil
defer p.task.Finish() }
log := p.task.Log().WithField(logFSField, v.Path.ToString()) func (e *ReceiverEndpoint) Send(req *replication.SendReq) (*replication.SendRes, io.Reader, error) {
return nil, nil, errors.New("receiver endpoint does not send")
}
if v.FilledIn { func (e *ReceiverEndpoint) Receive(req *replication.ReceiveReq, sendStream io.Reader) error {
if _, exists := localFilesystemState[v.Path.ToString()]; exists { p, err := zfs.NewDatasetPath(req.Filesystem)
// No need to verify if this is a placeholder or not. It is sufficient if err != nil {
// to know we can add child filesystems to it return err
return true }
} lp, err := e.fsmap.Map(p)
log.Debug("create placeholder filesystem") if err != nil {
p.task.Enter("create_placeholder") return err
err = zfs.ZFSCreatePlaceholderFilesystem(v.Path) }
p.task.Finish() if lp == nil {
if err != nil { return errors.New("receive to filesystem denied")
log.Error("cannot create placeholder filesystem") }
// create placeholder parent filesystems as appropriate
var visitErr error
f := zfs.NewDatasetPathForest()
f.Add(lp)
f.WalkTopDown(func(v zfs.DatasetPathVisit) (visitChildTree bool) {
if v.Path.Equal(lp) {
return false
}
_, err := zfs.ZFSGet(v.Path, []string{zfs.ZREPL_PLACEHOLDER_PROPERTY_NAME})
if err != nil {
os.Stderr.WriteString("error zfsget " + err.Error() + "\n")
// interpret this as an early exit of the zfs binary due to the fs not existing
if err := zfs.ZFSCreatePlaceholderFilesystem(v.Path); err != nil {
os.Stderr.WriteString("error creating placeholder " + v.Path.ToString() + "\n")
visitErr = err
return false return false
} }
return true
} }
os.Stderr.WriteString(v.Path.ToString() + " exists\n")
m, ok := replMapping[v.Path.ToString()] return true // leave this fs as is
if !ok {
panic("internal inconsistency: replMapping should contain mapping for any path that was not filled in by WalkTopDown()")
}
return p.replFilesystem(m, localFilesystemState)
}) })
return if visitErr != nil {
return visitErr
}
needForceRecv := false
props, err := zfs.ZFSGet(lp, []string{zfs.ZREPL_PLACEHOLDER_PROPERTY_NAME})
if err == nil {
if isPlaceholder, _ := zfs.IsPlaceholder(lp, props.Get(zfs.ZREPL_PLACEHOLDER_PROPERTY_NAME)); isPlaceholder {
needForceRecv = true
}
}
args := make([]string, 0, 1)
if needForceRecv {
args = append(args, "-F")
}
os.Stderr.WriteString("receiving...\n")
if err := zfs.ZFSRecv(lp.ToString(), sendStream, args...); err != nil {
// FIXME sendStream is on the wire and contains data, if we don't consume it, wire must be closed
return err
}
return nil
}
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
// RPC STUBS
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
const (
RPCListFilesystems = "ListFilesystems"
RPCListFilesystemVersions = "ListFilesystemVersions"
RPCReceive = "Receive"
RPCSend = "Send"
)
type RemoteEndpoint struct {
*streamrpc.Client
}
func (s RemoteEndpoint) ListFilesystems() ([]*replication.Filesystem, error) {
req := replication.ListFilesystemReq{}
b, err := proto.Marshal(&req)
if err != nil {
return nil, err
}
rb, rs, err := s.RequestReply(RPCListFilesystems, bytes.NewBuffer(b), nil)
if err != nil {
return nil, err
}
if rs != nil {
os.Stderr.WriteString(fmt.Sprintf("%#v\n", rs))
s.Close() // FIXME
return nil, errors.New("response contains unexpected stream")
}
var res replication.ListFilesystemRes
if err := proto.Unmarshal(rb.Bytes(), &res); err != nil {
return nil, err
}
return res.Filesystems, nil
}
func (s RemoteEndpoint) ListFilesystemVersions(fs string) ([]*replication.FilesystemVersion, error) {
req := replication.ListFilesystemVersionsReq{
Filesystem: fs,
}
b, err := proto.Marshal(&req)
if err != nil {
return nil, err
}
rb, rs, err := s.RequestReply(RPCListFilesystemVersions, bytes.NewBuffer(b), nil)
if err != nil {
return nil, err
}
if rs != nil {
s.Close() // FIXME
return nil, errors.New("response contains unexpected stream")
}
var res replication.ListFilesystemVersionsRes
if err := proto.Unmarshal(rb.Bytes(), &res); err != nil {
return nil, err
}
return res.Versions, nil
}
func (s RemoteEndpoint) Send(r *replication.SendReq) (*replication.SendRes, io.Reader, error) {
b, err := proto.Marshal(r)
if err != nil {
return nil, nil, err
}
rb, rs, err := s.RequestReply(RPCSend, bytes.NewBuffer(b), nil)
if err != nil {
return nil, nil, err
}
if rs == nil {
return nil, nil, errors.New("response does not contain a stream")
}
var res replication.SendRes
if err := proto.Unmarshal(rb.Bytes(), &res); err != nil {
s.Close() // FIXME
return nil, nil, err
}
// FIXME make sure the consumer will read the reader until the end...
return &res, rs, nil
}
func (s RemoteEndpoint) Receive(r *replication.ReceiveReq, sendStream io.Reader) (error) {
b, err := proto.Marshal(r)
if err != nil {
return err
}
rb, rs, err := s.RequestReply(RPCReceive, bytes.NewBuffer(b), sendStream)
if err != nil {
s.Close() // FIXME
return err
}
if rs != nil {
return errors.New("response contains unexpected stream")
}
var res replication.ReceiveRes
if err := proto.Unmarshal(rb.Bytes(), &res); err != nil {
return err
}
return nil
}
type HandlerAdaptor struct {
ep replication.ReplicationEndpoint
}
func (a *HandlerAdaptor) Handle(endpoint string, reqStructured *bytes.Buffer, reqStream io.Reader) (resStructured *bytes.Buffer, resStream io.Reader, err error) {
switch endpoint {
case RPCListFilesystems:
var req replication.ListFilesystemReq
if err := proto.Unmarshal(reqStructured.Bytes(), &req); err != nil {
return nil, nil, err
}
fsses, err := a.ep.ListFilesystems()
if err != nil {
return nil, nil, err
}
res := &replication.ListFilesystemRes{
Filesystems: fsses,
}
b, err := proto.Marshal(res)
if err != nil {
return nil, nil, err
}
return bytes.NewBuffer(b), nil, nil
case RPCListFilesystemVersions:
var req replication.ListFilesystemVersionsReq
if err := proto.Unmarshal(reqStructured.Bytes(), &req); err != nil {
return nil, nil, err
}
fsvs, err := a.ep.ListFilesystemVersions(req.Filesystem)
if err != nil {
return nil, nil, err
}
res := &replication.ListFilesystemVersionsRes{
Versions: fsvs,
}
b, err := proto.Marshal(res)
if err != nil {
return nil, nil, err
}
return bytes.NewBuffer(b), nil, nil
case RPCSend:
var req replication.SendReq
if err := proto.Unmarshal(reqStructured.Bytes(), &req); err != nil {
return nil, nil, err
}
res, sendStream, err := a.ep.Send(&req)
if err != nil {
return nil, nil, err
}
b, err := proto.Marshal(res)
if err != nil {
return nil, nil, err
}
return bytes.NewBuffer(b), sendStream, err
case RPCReceive:
var req replication.ReceiveReq
if err := proto.Unmarshal(reqStructured.Bytes(), &req); err != nil {
return nil, nil, err
}
err := a.ep.Receive(&req, reqStream)
if err != nil {
return nil, nil, err
}
b, err := proto.Marshal(&replication.ReceiveRes{})
if err != nil {
return nil, nil, err
}
return bytes.NewBuffer(b), nil, err
default:
return nil, nil, errors.New("no handler for given endpoint")
}
} }

View File

@ -1,12 +1,11 @@
package replication package replication
import ( import (
"github.com/zrepl/zrepl/zfs"
"sort" "sort"
) )
type ConflictNoCommonAncestor struct { type ConflictNoCommonAncestor struct {
SortedSenderVersions, SortedReceiverVersions []zfs.FilesystemVersion SortedSenderVersions, SortedReceiverVersions []*FilesystemVersion
} }
func (c *ConflictNoCommonAncestor) Error() string { func (c *ConflictNoCommonAncestor) Error() string {
@ -14,24 +13,24 @@ func (c *ConflictNoCommonAncestor) Error() string {
} }
type ConflictDiverged struct { type ConflictDiverged struct {
SortedSenderVersions, SortedReceiverVersions []zfs.FilesystemVersion SortedSenderVersions, SortedReceiverVersions []*FilesystemVersion
CommonAncestor zfs.FilesystemVersion CommonAncestor *FilesystemVersion
SenderOnly, ReceiverOnly []zfs.FilesystemVersion SenderOnly, ReceiverOnly []*FilesystemVersion
} }
func (c *ConflictDiverged) Error() string { func (c *ConflictDiverged) Error() string {
return "the receiver's latest snapshot is not present on sender" return "the receiver's latest snapshot is not present on sender"
} }
func SortVersionListByCreateTXGThenBookmarkLTSnapshot(fsvslice []zfs.FilesystemVersion) []zfs.FilesystemVersion { func SortVersionListByCreateTXGThenBookmarkLTSnapshot(fsvslice []*FilesystemVersion) []*FilesystemVersion {
lesser := func(s []zfs.FilesystemVersion) func(i, j int) bool { lesser := func(s []*FilesystemVersion) func(i, j int) bool {
return func(i, j int) bool { return func(i, j int) bool {
if s[i].CreateTXG < s[j].CreateTXG { if s[i].CreateTXG < s[j].CreateTXG {
return true return true
} }
if s[i].CreateTXG == s[j].CreateTXG { if s[i].CreateTXG == s[j].CreateTXG {
// Bookmark < Snapshot // Bookmark < Snapshot
return s[i].Type == zfs.Bookmark && s[j].Type == zfs.Snapshot return s[i].Type == FilesystemVersion_Bookmark && s[j].Type == FilesystemVersion_Snapshot
} }
return false return false
} }
@ -39,14 +38,14 @@ func SortVersionListByCreateTXGThenBookmarkLTSnapshot(fsvslice []zfs.FilesystemV
if sort.SliceIsSorted(fsvslice, lesser(fsvslice)) { if sort.SliceIsSorted(fsvslice, lesser(fsvslice)) {
return fsvslice return fsvslice
} }
sorted := make([]zfs.FilesystemVersion, len(fsvslice)) sorted := make([]*FilesystemVersion, len(fsvslice))
copy(sorted, fsvslice) copy(sorted, fsvslice)
sort.Slice(sorted, lesser(sorted)) sort.Slice(sorted, lesser(sorted))
return sorted return sorted
} }
// conflict may be a *ConflictDiverged or a *ConflictNoCommonAncestor // conflict may be a *ConflictDiverged or a *ConflictNoCommonAncestor
func IncrementalPath(receiver, sender []zfs.FilesystemVersion) (incPath []zfs.FilesystemVersion, conflict error) { func IncrementalPath(receiver, sender []*FilesystemVersion) (incPath []*FilesystemVersion, conflict error) {
if receiver == nil { if receiver == nil {
panic("receiver must not be nil") panic("receiver must not be nil")
@ -59,7 +58,7 @@ func IncrementalPath(receiver, sender []zfs.FilesystemVersion) (incPath []zfs.Fi
sender = SortVersionListByCreateTXGThenBookmarkLTSnapshot(sender) sender = SortVersionListByCreateTXGThenBookmarkLTSnapshot(sender)
if len(sender) == 0 { if len(sender) == 0 {
return []zfs.FilesystemVersion{}, nil return []*FilesystemVersion{}, nil
} }
// Find most recent common ancestor by name, preferring snapshots over bookmarks // Find most recent common ancestor by name, preferring snapshots over bookmarks
@ -69,7 +68,7 @@ func IncrementalPath(receiver, sender []zfs.FilesystemVersion) (incPath []zfs.Fi
for mrcaRcv >= 0 && mrcaSnd >= 0 { for mrcaRcv >= 0 && mrcaSnd >= 0 {
if receiver[mrcaRcv].Guid == sender[mrcaSnd].Guid { if receiver[mrcaRcv].Guid == sender[mrcaSnd].Guid {
if mrcaSnd-1 >= 0 && sender[mrcaSnd-1].Guid == sender[mrcaSnd].Guid && sender[mrcaSnd-1].Type == zfs.Bookmark { if mrcaSnd-1 >= 0 && sender[mrcaSnd-1].Guid == sender[mrcaSnd].Guid && sender[mrcaSnd-1].Type == FilesystemVersion_Bookmark {
// prefer bookmarks over snapshots as the snapshot might go away sooner // prefer bookmarks over snapshots as the snapshot might go away sooner
mrcaSnd -= 1 mrcaSnd -= 1
} }
@ -100,11 +99,11 @@ func IncrementalPath(receiver, sender []zfs.FilesystemVersion) (incPath []zfs.Fi
} }
// incPath must not contain bookmarks except initial one, // incPath must not contain bookmarks except initial one,
incPath = make([]zfs.FilesystemVersion, 0, len(sender)) incPath = make([]*FilesystemVersion, 0, len(sender))
incPath = append(incPath, sender[mrcaSnd]) incPath = append(incPath, sender[mrcaSnd])
// it's ok if incPath[0] is a bookmark, but not the subsequent ones in the incPath // it's ok if incPath[0] is a bookmark, but not the subsequent ones in the incPath
for i := mrcaSnd + 1; i < len(sender); i++ { for i := mrcaSnd + 1; i < len(sender); i++ {
if sender[i].Type == zfs.Snapshot && incPath[len(incPath)-1].Guid != sender[i].Guid { if sender[i].Type == FilesystemVersion_Snapshot && incPath[len(incPath)-1].Guid != sender[i].Guid {
incPath = append(incPath, sender[i]) incPath = append(incPath, sender[i])
} }
} }

View File

@ -3,16 +3,15 @@ package replication_test
import ( import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/zrepl/zrepl/cmd/replication" "github.com/zrepl/zrepl/cmd/replication"
"github.com/zrepl/zrepl/zfs"
"strconv" "strconv"
"strings" "strings"
"testing" "testing"
"time" "time"
) )
func fsvlist(fsv ...string) (r []zfs.FilesystemVersion) { func fsvlist(fsv ...string) (r []*replication.FilesystemVersion) {
r = make([]zfs.FilesystemVersion, len(fsv)) r = make([]*replication.FilesystemVersion, len(fsv))
for i, f := range fsv { for i, f := range fsv {
// parse the id from fsvlist. it is used to derivce Guid,CreateTXG and Creation attrs // parse the id from fsvlist. it is used to derivce Guid,CreateTXG and Creation attrs
@ -26,20 +25,20 @@ func fsvlist(fsv ...string) (r []zfs.FilesystemVersion) {
} }
if strings.HasPrefix(f, "#") { if strings.HasPrefix(f, "#") {
r[i] = zfs.FilesystemVersion{ r[i] = &replication.FilesystemVersion{
Name: strings.TrimPrefix(f, "#"), Name: strings.TrimPrefix(f, "#"),
Type: zfs.Bookmark, Type: replication.FilesystemVersion_Bookmark,
Guid: uint64(id), Guid: uint64(id),
CreateTXG: uint64(id), CreateTXG: uint64(id),
Creation: time.Unix(0, 0).Add(time.Duration(id) * time.Second), Creation: time.Unix(0, 0).Add(time.Duration(id) * time.Second).Format(time.RFC3339),
} }
} else if strings.HasPrefix(f, "@") { } else if strings.HasPrefix(f, "@") {
r[i] = zfs.FilesystemVersion{ r[i] = &replication.FilesystemVersion{
Name: strings.TrimPrefix(f, "@"), Name: strings.TrimPrefix(f, "@"),
Type: zfs.Snapshot, Type: replication.FilesystemVersion_Snapshot,
Guid: uint64(id), Guid: uint64(id),
CreateTXG: uint64(id), CreateTXG: uint64(id),
Creation: time.Unix(0, 0).Add(time.Duration(id) * time.Second), Creation: time.Unix(0, 0).Add(time.Duration(id) * time.Second).Format(time.RFC3339),
} }
} else { } else {
panic("invalid character") panic("invalid character")
@ -49,14 +48,14 @@ func fsvlist(fsv ...string) (r []zfs.FilesystemVersion) {
} }
type incPathResult struct { type incPathResult struct {
incPath []zfs.FilesystemVersion incPath []*replication.FilesystemVersion
conflict error conflict error
} }
type IncrementalPathTest struct { type IncrementalPathTest struct {
Msg string Msg string
Receiver, Sender []zfs.FilesystemVersion Receiver, Sender []*replication.FilesystemVersion
ExpectIncPath []zfs.FilesystemVersion ExpectIncPath []*replication.FilesystemVersion
ExpectNoCommonAncestor bool ExpectNoCommonAncestor bool
ExpectDiverged *replication.ConflictDiverged ExpectDiverged *replication.ConflictDiverged
ExpectPanic bool ExpectPanic bool
@ -212,7 +211,7 @@ func TestSortVersionListByCreateTXGThenBookmarkLTSnapshot(t *testing.T) {
type Test struct { type Test struct {
Msg string Msg string
Input, Output []zfs.FilesystemVersion Input, Output []*replication.FilesystemVersion
} }
l := fsvlist l := fsvlist
@ -258,7 +257,7 @@ func TestSortVersionListByCreateTXGThenBookmarkLTSnapshot(t *testing.T) {
break break
} }
if s.CreateTXG == last.CreateTXG { if s.CreateTXG == last.CreateTXG {
if last.Type == zfs.Bookmark && s.Type != zfs.Snapshot { if last.Type == replication.FilesystemVersion_Bookmark && s.Type != replication.FilesystemVersion_Snapshot {
t.Errorf("snapshots must come after bookmarks") t.Errorf("snapshots must come after bookmarks")
} }
} }

384
cmd/replication/pdu.pb.go Normal file
View File

@ -0,0 +1,384 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: pdu.proto
/*
Package replication is a generated protocol buffer package.
It is generated from these files:
pdu.proto
It has these top-level messages:
ListFilesystemReq
ListFilesystemRes
Filesystem
ListFilesystemVersionsReq
ListFilesystemVersionsRes
FilesystemVersion
SendReq
Property
SendRes
ReceiveReq
ReceiveRes
*/
package replication
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type FilesystemVersion_VersionType int32
const (
FilesystemVersion_Snapshot FilesystemVersion_VersionType = 0
FilesystemVersion_Bookmark FilesystemVersion_VersionType = 1
)
var FilesystemVersion_VersionType_name = map[int32]string{
0: "Snapshot",
1: "Bookmark",
}
var FilesystemVersion_VersionType_value = map[string]int32{
"Snapshot": 0,
"Bookmark": 1,
}
func (x FilesystemVersion_VersionType) String() string {
return proto.EnumName(FilesystemVersion_VersionType_name, int32(x))
}
func (FilesystemVersion_VersionType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor0, []int{5, 0}
}
type ListFilesystemReq struct {
}
func (m *ListFilesystemReq) Reset() { *m = ListFilesystemReq{} }
func (m *ListFilesystemReq) String() string { return proto.CompactTextString(m) }
func (*ListFilesystemReq) ProtoMessage() {}
func (*ListFilesystemReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
type ListFilesystemRes struct {
Filesystems []*Filesystem `protobuf:"bytes,1,rep,name=Filesystems" json:"Filesystems,omitempty"`
}
func (m *ListFilesystemRes) Reset() { *m = ListFilesystemRes{} }
func (m *ListFilesystemRes) String() string { return proto.CompactTextString(m) }
func (*ListFilesystemRes) ProtoMessage() {}
func (*ListFilesystemRes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *ListFilesystemRes) GetFilesystems() []*Filesystem {
if m != nil {
return m.Filesystems
}
return nil
}
type Filesystem struct {
Path string `protobuf:"bytes,1,opt,name=Path" json:"Path,omitempty"`
ResumeToken string `protobuf:"bytes,2,opt,name=ResumeToken" json:"ResumeToken,omitempty"`
}
func (m *Filesystem) Reset() { *m = Filesystem{} }
func (m *Filesystem) String() string { return proto.CompactTextString(m) }
func (*Filesystem) ProtoMessage() {}
func (*Filesystem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *Filesystem) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
func (m *Filesystem) GetResumeToken() string {
if m != nil {
return m.ResumeToken
}
return ""
}
type ListFilesystemVersionsReq struct {
Filesystem string `protobuf:"bytes,1,opt,name=Filesystem" json:"Filesystem,omitempty"`
}
func (m *ListFilesystemVersionsReq) Reset() { *m = ListFilesystemVersionsReq{} }
func (m *ListFilesystemVersionsReq) String() string { return proto.CompactTextString(m) }
func (*ListFilesystemVersionsReq) ProtoMessage() {}
func (*ListFilesystemVersionsReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *ListFilesystemVersionsReq) GetFilesystem() string {
if m != nil {
return m.Filesystem
}
return ""
}
type ListFilesystemVersionsRes struct {
Versions []*FilesystemVersion `protobuf:"bytes,1,rep,name=Versions" json:"Versions,omitempty"`
}
func (m *ListFilesystemVersionsRes) Reset() { *m = ListFilesystemVersionsRes{} }
func (m *ListFilesystemVersionsRes) String() string { return proto.CompactTextString(m) }
func (*ListFilesystemVersionsRes) ProtoMessage() {}
func (*ListFilesystemVersionsRes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func (m *ListFilesystemVersionsRes) GetVersions() []*FilesystemVersion {
if m != nil {
return m.Versions
}
return nil
}
type FilesystemVersion struct {
Type FilesystemVersion_VersionType `protobuf:"varint,1,opt,name=Type,enum=replication.FilesystemVersion_VersionType" json:"Type,omitempty"`
Name string `protobuf:"bytes,2,opt,name=Name" json:"Name,omitempty"`
Guid uint64 `protobuf:"varint,3,opt,name=Guid" json:"Guid,omitempty"`
CreateTXG uint64 `protobuf:"varint,4,opt,name=CreateTXG" json:"CreateTXG,omitempty"`
Creation string `protobuf:"bytes,5,opt,name=Creation" json:"Creation,omitempty"`
}
func (m *FilesystemVersion) Reset() { *m = FilesystemVersion{} }
func (m *FilesystemVersion) String() string { return proto.CompactTextString(m) }
func (*FilesystemVersion) ProtoMessage() {}
func (*FilesystemVersion) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} }
func (m *FilesystemVersion) GetType() FilesystemVersion_VersionType {
if m != nil {
return m.Type
}
return FilesystemVersion_Snapshot
}
func (m *FilesystemVersion) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *FilesystemVersion) GetGuid() uint64 {
if m != nil {
return m.Guid
}
return 0
}
func (m *FilesystemVersion) GetCreateTXG() uint64 {
if m != nil {
return m.CreateTXG
}
return 0
}
func (m *FilesystemVersion) GetCreation() string {
if m != nil {
return m.Creation
}
return ""
}
type SendReq struct {
Filesystem string `protobuf:"bytes,1,opt,name=Filesystem" json:"Filesystem,omitempty"`
From string `protobuf:"bytes,2,opt,name=From" json:"From,omitempty"`
To string `protobuf:"bytes,3,opt,name=To" json:"To,omitempty"`
// If ResumeToken is not empty, the resume token that CAN be tried for 'zfs send' by the sender.
// The sender MUST indicate in SendRes.UsedResumeToken
// If it does not work, the sender SHOULD clear the resume token on their side
// and use From and To instead
// If ResumeToken is not empty, the GUIDs of From and To
// MUST correspond to those encoded in the ResumeToken.
// Otherwise, the Sender MUST return an error.
ResumeToken string `protobuf:"bytes,4,opt,name=ResumeToken" json:"ResumeToken,omitempty"`
Compress bool `protobuf:"varint,5,opt,name=Compress" json:"Compress,omitempty"`
Dedup bool `protobuf:"varint,6,opt,name=Dedup" json:"Dedup,omitempty"`
}
func (m *SendReq) Reset() { *m = SendReq{} }
func (m *SendReq) String() string { return proto.CompactTextString(m) }
func (*SendReq) ProtoMessage() {}
func (*SendReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} }
func (m *SendReq) GetFilesystem() string {
if m != nil {
return m.Filesystem
}
return ""
}
func (m *SendReq) GetFrom() string {
if m != nil {
return m.From
}
return ""
}
func (m *SendReq) GetTo() string {
if m != nil {
return m.To
}
return ""
}
func (m *SendReq) GetResumeToken() string {
if m != nil {
return m.ResumeToken
}
return ""
}
func (m *SendReq) GetCompress() bool {
if m != nil {
return m.Compress
}
return false
}
func (m *SendReq) GetDedup() bool {
if m != nil {
return m.Dedup
}
return false
}
type Property struct {
Name string `protobuf:"bytes,1,opt,name=Name" json:"Name,omitempty"`
Value string `protobuf:"bytes,2,opt,name=Value" json:"Value,omitempty"`
}
func (m *Property) Reset() { *m = Property{} }
func (m *Property) String() string { return proto.CompactTextString(m) }
func (*Property) ProtoMessage() {}
func (*Property) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} }
func (m *Property) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Property) GetValue() string {
if m != nil {
return m.Value
}
return ""
}
type SendRes struct {
// Whether the resume token provided in the request has been used or not.
UsedResumeToken bool `protobuf:"varint,1,opt,name=UsedResumeToken" json:"UsedResumeToken,omitempty"`
Properties []*Property `protobuf:"bytes,2,rep,name=Properties" json:"Properties,omitempty"`
}
func (m *SendRes) Reset() { *m = SendRes{} }
func (m *SendRes) String() string { return proto.CompactTextString(m) }
func (*SendRes) ProtoMessage() {}
func (*SendRes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} }
func (m *SendRes) GetUsedResumeToken() bool {
if m != nil {
return m.UsedResumeToken
}
return false
}
func (m *SendRes) GetProperties() []*Property {
if m != nil {
return m.Properties
}
return nil
}
type ReceiveReq struct {
Filesystem string `protobuf:"bytes,1,opt,name=Filesystem" json:"Filesystem,omitempty"`
// If true, the receiver should clear the resume token before perfoming the zfs recv of the stream in the request
ClearResumeToken bool `protobuf:"varint,2,opt,name=ClearResumeToken" json:"ClearResumeToken,omitempty"`
}
func (m *ReceiveReq) Reset() { *m = ReceiveReq{} }
func (m *ReceiveReq) String() string { return proto.CompactTextString(m) }
func (*ReceiveReq) ProtoMessage() {}
func (*ReceiveReq) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} }
func (m *ReceiveReq) GetFilesystem() string {
if m != nil {
return m.Filesystem
}
return ""
}
func (m *ReceiveReq) GetClearResumeToken() bool {
if m != nil {
return m.ClearResumeToken
}
return false
}
type ReceiveRes struct {
}
func (m *ReceiveRes) Reset() { *m = ReceiveRes{} }
func (m *ReceiveRes) String() string { return proto.CompactTextString(m) }
func (*ReceiveRes) ProtoMessage() {}
func (*ReceiveRes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} }
func init() {
proto.RegisterType((*ListFilesystemReq)(nil), "replication.ListFilesystemReq")
proto.RegisterType((*ListFilesystemRes)(nil), "replication.ListFilesystemRes")
proto.RegisterType((*Filesystem)(nil), "replication.Filesystem")
proto.RegisterType((*ListFilesystemVersionsReq)(nil), "replication.ListFilesystemVersionsReq")
proto.RegisterType((*ListFilesystemVersionsRes)(nil), "replication.ListFilesystemVersionsRes")
proto.RegisterType((*FilesystemVersion)(nil), "replication.FilesystemVersion")
proto.RegisterType((*SendReq)(nil), "replication.SendReq")
proto.RegisterType((*Property)(nil), "replication.Property")
proto.RegisterType((*SendRes)(nil), "replication.SendRes")
proto.RegisterType((*ReceiveReq)(nil), "replication.ReceiveReq")
proto.RegisterType((*ReceiveRes)(nil), "replication.ReceiveRes")
proto.RegisterEnum("replication.FilesystemVersion_VersionType", FilesystemVersion_VersionType_name, FilesystemVersion_VersionType_value)
}
func init() { proto.RegisterFile("pdu.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 454 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4d, 0x6f, 0xd3, 0x40,
0x10, 0x65, 0x53, 0xa7, 0x38, 0xe3, 0xaa, 0xa4, 0x4b, 0x11, 0x06, 0xa1, 0x2a, 0xda, 0x53, 0xe8,
0x21, 0x87, 0x02, 0x07, 0x40, 0xe2, 0xd0, 0xa2, 0xf6, 0x82, 0xaa, 0x6a, 0x6b, 0x4a, 0xaf, 0xa6,
0x1e, 0xa9, 0x4b, 0x62, 0xaf, 0xbb, 0x63, 0x23, 0xe5, 0xe7, 0xf0, 0xcf, 0xf8, 0x29, 0xc8, 0x53,
0x3b, 0xd9, 0x26, 0x2a, 0xca, 0xc9, 0xf3, 0xde, 0x7c, 0xbd, 0x7d, 0xeb, 0x85, 0x41, 0x99, 0xd5,
0x93, 0xd2, 0xd9, 0xca, 0xca, 0xc8, 0x61, 0x39, 0x33, 0x37, 0x69, 0x65, 0x6c, 0xa1, 0x9e, 0xc3,
0xde, 0x37, 0x43, 0xd5, 0xa9, 0x99, 0x21, 0xcd, 0xa9, 0xc2, 0x5c, 0xe3, 0x9d, 0x3a, 0x5f, 0x27,
0x49, 0x7e, 0x84, 0x68, 0x49, 0x50, 0x2c, 0x46, 0x5b, 0xe3, 0xe8, 0xe8, 0xe5, 0xc4, 0x1b, 0x36,
0xf1, 0x1a, 0xfc, 0x5a, 0x75, 0x0c, 0xb0, 0x84, 0x52, 0x42, 0x70, 0x91, 0x56, 0xb7, 0xb1, 0x18,
0x89, 0xf1, 0x40, 0x73, 0x2c, 0x47, 0x10, 0x69, 0xa4, 0x3a, 0xc7, 0xc4, 0x4e, 0xb1, 0x88, 0x7b,
0x9c, 0xf2, 0x29, 0xf5, 0x19, 0x5e, 0x3d, 0xd4, 0x74, 0x85, 0x8e, 0x8c, 0x2d, 0x48, 0xe3, 0x9d,
0x3c, 0xf0, 0x17, 0xb4, 0x83, 0x3d, 0x46, 0xfd, 0x78, 0xbc, 0x99, 0xe4, 0x27, 0x08, 0x3b, 0xd8,
0x9e, 0xea, 0xe0, 0x91, 0x53, 0xb5, 0x65, 0x7a, 0x51, 0xaf, 0xfe, 0x0a, 0xd8, 0x5b, 0xcb, 0xcb,
0x2f, 0x10, 0x24, 0xf3, 0x12, 0x59, 0xc8, 0xee, 0xd1, 0xe1, 0xff, 0xa7, 0x4d, 0xda, 0x6f, 0xd3,
0xa1, 0xb9, 0xaf, 0x71, 0xe8, 0x3c, 0xcd, 0xb1, 0xb5, 0x81, 0xe3, 0x86, 0x3b, 0xab, 0x4d, 0x16,
0x6f, 0x8d, 0xc4, 0x38, 0xd0, 0x1c, 0xcb, 0x37, 0x30, 0x38, 0x71, 0x98, 0x56, 0x98, 0x5c, 0x9f,
0xc5, 0x01, 0x27, 0x96, 0x84, 0x7c, 0x0d, 0x21, 0x03, 0x63, 0x8b, 0xb8, 0xcf, 0x93, 0x16, 0x58,
0xbd, 0x85, 0xc8, 0x5b, 0x2b, 0x77, 0x20, 0xbc, 0x2c, 0xd2, 0x92, 0x6e, 0x6d, 0x35, 0x7c, 0xd2,
0xa0, 0x63, 0x6b, 0xa7, 0x79, 0xea, 0xa6, 0x43, 0xa1, 0xfe, 0x08, 0x78, 0x7a, 0x89, 0x45, 0xb6,
0x81, 0xcf, 0x8d, 0xc8, 0x53, 0x67, 0xf3, 0x4e, 0x78, 0x13, 0xcb, 0x5d, 0xe8, 0x25, 0x96, 0x65,
0x0f, 0x74, 0x2f, 0xb1, 0xab, 0x57, 0x1d, 0xac, 0x5d, 0x35, 0x0b, 0xb7, 0x79, 0xe9, 0x90, 0x88,
0x85, 0x87, 0x7a, 0x81, 0xe5, 0x3e, 0xf4, 0xbf, 0x62, 0x56, 0x97, 0xf1, 0x36, 0x27, 0xee, 0x81,
0x7a, 0x0f, 0xe1, 0x85, 0xb3, 0x25, 0xba, 0x6a, 0xbe, 0x30, 0x4f, 0x78, 0xe6, 0xed, 0x43, 0xff,
0x2a, 0x9d, 0xd5, 0x9d, 0xa3, 0xf7, 0x40, 0xfd, 0xea, 0x0e, 0x46, 0x72, 0x0c, 0xcf, 0xbe, 0x13,
0x66, 0xbe, 0x30, 0xc1, 0x0b, 0x56, 0x69, 0xf9, 0x01, 0xa0, 0x5d, 0x65, 0x90, 0xe2, 0x1e, 0xff,
0x2f, 0x2f, 0x1e, 0xdc, 0x70, 0xa7, 0x44, 0x7b, 0x85, 0xea, 0x1a, 0x40, 0xe3, 0x0d, 0x9a, 0xdf,
0xb8, 0x89, 0x8f, 0x87, 0x30, 0x3c, 0x99, 0x61, 0xea, 0x56, 0xdf, 0x44, 0xa8, 0xd7, 0x78, 0xb5,
0xe3, 0x4d, 0xa6, 0x9f, 0xdb, 0xfc, 0xc6, 0xdf, 0xfd, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x5a,
0xf6, 0xa7, 0xf0, 0x03, 0x00, 0x00,
}

78
cmd/replication/pdu.proto Normal file
View File

@ -0,0 +1,78 @@
syntax = "proto3";
package replication;
message ListFilesystemReq {}
message ListFilesystemRes {
repeated Filesystem Filesystems = 1;
}
message Filesystem {
string Path = 1;
string ResumeToken = 2;
}
message ListFilesystemVersionsReq {
string Filesystem = 1;
}
message ListFilesystemVersionsRes {
repeated FilesystemVersion Versions = 1;
}
message FilesystemVersion {
enum VersionType {
Snapshot = 0;
Bookmark = 1;
}
VersionType Type = 1;
string Name = 2;
uint64 Guid = 3;
uint64 CreateTXG = 4;
string Creation = 5; // RFC 3339
}
message SendReq {
string Filesystem = 1;
string From = 2;
// May be empty / null to request a full transfer of From
string To = 3;
// If ResumeToken is not empty, the resume token that CAN be tried for 'zfs send' by the sender.
// The sender MUST indicate in SendRes.UsedResumeToken
// If it does not work, the sender SHOULD clear the resume token on their side
// and use From and To instead
// If ResumeToken is not empty, the GUIDs of From and To
// MUST correspond to those encoded in the ResumeToken.
// Otherwise, the Sender MUST return an error.
string ResumeToken = 4;
bool Compress = 5;
bool Dedup = 6;
}
message Property {
string Name = 1;
string Value = 2;
}
message SendRes {
// The actual stream is in the stream part of the streamrpc response
// Whether the resume token provided in the request has been used or not.
bool UsedResumeToken = 1;
repeated Property Properties = 2;
}
message ReceiveReq {
// The stream part of the streamrpc request contains the zfs send stream
string Filesystem = 1;
// If true, the receiver should clear the resume token before perfoming the zfs recv of the stream in the request
bool ClearResumeToken = 2;
}
message ReceiveRes {}

View File

@ -0,0 +1,60 @@
package replication
import (
"fmt"
"github.com/zrepl/zrepl/zfs"
"time"
)
func (v *FilesystemVersion) RelName() string {
zv := v.ZFSFilesystemVersion()
return zv.String()
}
func (v FilesystemVersion_VersionType) ZFSVersionType() zfs.VersionType {
switch v {
case FilesystemVersion_Snapshot:
return zfs.Snapshot
case FilesystemVersion_Bookmark:
return zfs.Bookmark
default:
panic(fmt.Sprintf("unexpected v.Type %#v", v))
}
}
func FilesystemVersionFromZFS(fsv zfs.FilesystemVersion) *FilesystemVersion {
var t FilesystemVersion_VersionType
switch fsv.Type {
case zfs.Bookmark:
t = FilesystemVersion_Bookmark
case zfs.Snapshot:
t = FilesystemVersion_Snapshot
default:
panic("unknown fsv.Type: " + fsv.Type)
}
return &FilesystemVersion{
Type: t,
Name: fsv.Name,
Guid: fsv.Guid,
CreateTXG: fsv.CreateTXG,
Creation: fsv.Creation.Format(time.RFC3339),
}
}
func (v *FilesystemVersion) ZFSFilesystemVersion() *zfs.FilesystemVersion {
ct := time.Time{}
if v.Creation != "" {
var err error
ct, err = time.Parse(time.RFC3339, v.Creation)
if err != nil {
panic(err)
}
}
return &zfs.FilesystemVersion{
Type: v.Type.ZFSVersionType(),
Name: v.Name,
Guid: v.Guid,
CreateTXG: v.CreateTXG,
Creation: ct,
}
}

View File

@ -0,0 +1,64 @@
package replication
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestFilesystemVersion_RelName(t *testing.T) {
type TestCase struct {
In FilesystemVersion
Out string
Panic bool
}
tcs := []TestCase{
{
In: FilesystemVersion{
Type: FilesystemVersion_Snapshot,
Name: "foobar",
},
Out: "@foobar",
},
{
In: FilesystemVersion{
Type: FilesystemVersion_Bookmark,
Name: "foobar",
},
Out: "#foobar",
},
{
In: FilesystemVersion{
Type: 2342,
Name: "foobar",
},
Panic: true,
},
}
for _, tc := range tcs {
if tc.Panic {
assert.Panics(t, func() {
tc.In.RelName()
})
} else {
o := tc.In.RelName()
assert.Equal(t, tc.Out, o)
}
}
}
func TestFilesystemVersion_ZFSFilesystemVersion(t *testing.T) {
empty := &FilesystemVersion{}
emptyZFS := empty.ZFSFilesystemVersion()
assert.Zero(t, emptyZFS.Creation)
dateInvalid := &FilesystemVersion{Creation:"foobar"}
assert.Panics(t, func() {
dateInvalid.ZFSFilesystemVersion()
})
}

View File

@ -2,54 +2,25 @@ package replication
import ( import (
"context" "context"
"github.com/zrepl/zrepl/zfs"
"io" "io"
) )
type ReplicationEndpoint interface { type ReplicationEndpoint interface {
// Does not include placeholder filesystems // Does not include placeholder filesystems
ListFilesystems() ([]Filesystem, error) ListFilesystems() ([]*Filesystem, error)
ListFilesystemVersions(fs string) ([]zfs.FilesystemVersion, error) // fix depS ListFilesystemVersions(fs string) ([]*FilesystemVersion, error) // fix depS
Sender Sender
Receiver Receiver
} }
type Filesystem struct {
Path string
ResumeToken string
}
type FilteredError struct{ fs string } type FilteredError struct{ fs string }
func NewFilteredError(fs string) FilteredError {
return FilteredError{fs}
}
func (f FilteredError) Error() string { return "endpoint does not allow access to filesystem " + f.fs } func (f FilteredError) Error() string { return "endpoint does not allow access to filesystem " + f.fs }
type SendRequest struct {
Filesystem string
From, To string
// If ResumeToken is not empty, the resume token that CAN be tried for 'zfs send' by the sender
// If it does not work, the sender SHOULD clear the resume token on their side
// and use From and To instead
// If ResumeToken is not empty, the GUIDs of From and To
// MUST correspond to those encoded in the ResumeToken.
// Otherwise, the Sender MUST return an error.
ResumeToken string
Compress bool
Dedup bool
}
type SendResponse struct {
Properties zfs.ZFSProperties // fix dep
Stream io.Reader
}
type ReceiveRequest struct {
Filesystem string
// The resume token used by the sending side.
// The receiver MUST discard the saved state on their side if ResumeToken
// does not match the zfs property of Filesystem on their side.
ResumeToken string
}
type ReplicationMode int type ReplicationMode int
const ( const (
@ -96,28 +67,90 @@ func (p EndpointPair) Mode() ReplicationMode {
return p.m return p.m
} }
type contextKey int
const (
ContextKeyLog contextKey = iota
)
type Logger interface{
Printf(fmt string, args ... interface{})
}
func Replicate(ctx context.Context, ep EndpointPair, ipr IncrementalPathReplicator) { func Replicate(ctx context.Context, ep EndpointPair, ipr IncrementalPathReplicator) {
log := ctx.Value(ContextKeyLog).(Logger)
sfss, err := ep.Sender().ListFilesystems() sfss, err := ep.Sender().ListFilesystems()
if err != nil { if err != nil {
// log error log.Printf("error listing sender filesystems: %s", err)
return
}
rfss, err := ep.Receiver().ListFilesystems()
if err != nil {
log.Printf("error listing receiver filesystems: %s", err)
return return
} }
for _, fs := range sfss { for _, fs := range sfss {
log.Printf("replication fs %s", fs.Path)
sfsvs, err := ep.Sender().ListFilesystemVersions(fs.Path) sfsvs, err := ep.Sender().ListFilesystemVersions(fs.Path)
rfsvs, err := ep.Receiver().ListFilesystemVersions(fs.Path)
if err != nil { if err != nil {
if _, ok := err.(FilteredError); ok { log.Printf("sender error %s", err)
// Remote does not map filesystem, don't try to tx it
continue
}
// log and ignore
continue continue
} }
if len(sfsvs) <= 1 {
log.Printf("sender does not have any versions")
continue
}
receiverFSExists := false
for _, rfs := range rfss {
if rfs.Path == fs.Path {
receiverFSExists = true
}
}
var rfsvs []*FilesystemVersion
if receiverFSExists {
rfsvs, err = ep.Receiver().ListFilesystemVersions(fs.Path)
if err != nil {
log.Printf("receiver error %s", err)
if _, ok := err.(FilteredError); ok {
// Remote does not map filesystem, don't try to tx it
continue
}
// log and ignore
continue
}
} else {
rfsvs = []*FilesystemVersion{}
}
path, conflict := IncrementalPath(rfsvs, sfsvs) path, conflict := IncrementalPath(rfsvs, sfsvs)
if conflict != nil { if noCommonAncestor, ok := conflict.(*ConflictNoCommonAncestor); ok {
if len(noCommonAncestor.SortedReceiverVersions) == 0 {
log.Printf("initial replication")
// FIXME hard-coded replication policy: most recent
// snapshot as source
var mostRecentSnap *FilesystemVersion
for n := len(sfsvs) -1; n >= 0; n-- {
if sfsvs[n].Type == FilesystemVersion_Snapshot {
mostRecentSnap = sfsvs[n]
break
}
}
if mostRecentSnap == nil {
log.Printf("no snapshot on sender side")
continue
}
log.Printf("starting at most recent snapshot %s", mostRecentSnap)
path = []*FilesystemVersion{mostRecentSnap}
}
} else if conflict != nil {
log.Printf("unresolvable conflict: %s", conflict)
// handle or ignore for now // handle or ignore for now
continue continue
} }
@ -129,11 +162,11 @@ func Replicate(ctx context.Context, ep EndpointPair, ipr IncrementalPathReplicat
} }
type Sender interface { type Sender interface {
Send(r SendRequest) (SendResponse, error) Send(r *SendReq) (*SendRes, io.Reader, error)
} }
type Receiver interface { type Receiver interface {
Receive(r ReceiveRequest) (io.Writer, error) Receive(r *ReceiveReq, sendStream io.Reader) (error)
} }
type Copier interface { type Copier interface {
@ -151,7 +184,7 @@ func NewCopier() Copier {
} }
type IncrementalPathReplicator interface { type IncrementalPathReplicator interface {
Replicate(ctx context.Context, sender Sender, receiver Receiver, copier Copier, fs Filesystem, path []zfs.FilesystemVersion) Replicate(ctx context.Context, sender Sender, receiver Receiver, copier Copier, fs *Filesystem, path []*FilesystemVersion)
} }
type incrementalPathReplicator struct{} type incrementalPathReplicator struct{}
@ -160,50 +193,82 @@ func NewIncrementalPathReplicator() IncrementalPathReplicator {
return incrementalPathReplicator{} return incrementalPathReplicator{}
} }
func (incrementalPathReplicator) Replicate(ctx context.Context, sender Sender, receiver Receiver, copier Copier, fs Filesystem, path []zfs.FilesystemVersion) { func (incrementalPathReplicator) Replicate(ctx context.Context, sender Sender, receiver Receiver, copier Copier, fs *Filesystem, path []*FilesystemVersion) {
log := ctx.Value(ContextKeyLog).(Logger)
if len(path) == 0 { if len(path) == 0 {
log.Printf("nothing to do")
// nothing to do // nothing to do
return return
} }
if len(path) == 1 {
log.Printf("full send of version %s", path[0])
sr := &SendReq{
Filesystem: fs.Path,
From: path[0].RelName(),
ResumeToken: fs.ResumeToken,
}
sres, sstream, err := sender.Send(sr)
if err != nil {
log.Printf("send request failed: %s", err)
// FIXME must close connection...
return
}
rr := &ReceiveReq{
Filesystem: fs.Path,
ClearResumeToken: fs.ResumeToken != "" && !sres.UsedResumeToken,
}
err = receiver.Receive(rr, sstream)
if err != nil {
// FIXME this failure could be due to an unexpected exit of ZFS on the sending side
// FIXME which is transported through the streamrpc protocol, and known to the sendStream.(*streamrpc.streamReader),
// FIXME but the io.Reader interface design doesn not allow us to infer that it is a *streamrpc.streamReader right now
log.Printf("receive request failed (might also be error on sender...): %s", err)
// FIXME must close connection
return
}
return
}
usedResumeToken := false usedResumeToken := false
incrementalLoop: incrementalLoop:
for j := 0; j < len(path)-1; j++ { for j := 0; j < len(path)-1; j++ {
rt := "" rt := ""
if !usedResumeToken { if !usedResumeToken { // only send resume token for first increment
rt = fs.ResumeToken rt = fs.ResumeToken
usedResumeToken = true usedResumeToken = true
} }
sr := SendRequest{ sr := &SendReq{
Filesystem: fs.Path, Filesystem: fs.Path,
From: path[j].String(), From: path[j].RelName(),
To: path[j+1].String(), To: path[j+1].RelName(),
ResumeToken: rt, ResumeToken: rt,
} }
sres, err := sender.Send(sr) sres, sstream, err := sender.Send(sr)
if err != nil { if err != nil {
log.Printf("send request failed: %s", err)
// handle and ignore // handle and ignore
break incrementalLoop break incrementalLoop
} }
// try to consume stream // try to consume stream
rr := ReceiveRequest{ rr := &ReceiveReq{
Filesystem: fs.Path, Filesystem: fs.Path,
ResumeToken: rt, ClearResumeToken: rt != "" && !sres.UsedResumeToken,
} }
recvWriter, err := receiver.Receive(rr) err = receiver.Receive(rr, sstream)
if err != nil {
// handle and ignore
break incrementalLoop
}
_, err = copier.Copy(recvWriter, sres.Stream)
if err != nil { if err != nil {
log.Printf("receive request failed: %s", err)
// handle and ignore // handle and ignore
break incrementalLoop break incrementalLoop
} }
// handle properties from sres // FIXME handle properties from sres
} }
} }

View File

@ -4,17 +4,16 @@ import (
"context" "context"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/zrepl/zrepl/cmd/replication" "github.com/zrepl/zrepl/cmd/replication"
"github.com/zrepl/zrepl/zfs"
"io" "io"
"testing" "testing"
) )
type IncrementalPathSequenceStep struct { type IncrementalPathSequenceStep struct {
SendRequest replication.SendRequest SendRequest *replication.SendReq
SendResponse replication.SendResponse SendResponse *replication.SendRes
SendReader io.Reader
SendError error SendError error
ReceiveRequest replication.ReceiveRequest ReceiveRequest *replication.ReceiveReq
ReceiveWriter io.Writer
ReceiveError error ReceiveError error
} }
@ -24,7 +23,7 @@ type MockIncrementalPathRecorder struct {
Pos int Pos int
} }
func (m *MockIncrementalPathRecorder) Receive(r replication.ReceiveRequest) (io.Writer, error) { func (m *MockIncrementalPathRecorder) Receive(r *replication.ReceiveReq, rs io.Reader) (error) {
if m.Pos >= len(m.Sequence) { if m.Pos >= len(m.Sequence) {
m.T.Fatal("unexpected Receive") m.T.Fatal("unexpected Receive")
} }
@ -33,10 +32,10 @@ func (m *MockIncrementalPathRecorder) Receive(r replication.ReceiveRequest) (io.
if !assert.Equal(m.T, i.ReceiveRequest, r) { if !assert.Equal(m.T, i.ReceiveRequest, r) {
m.T.FailNow() m.T.FailNow()
} }
return i.ReceiveWriter, i.ReceiveError return i.ReceiveError
} }
func (m *MockIncrementalPathRecorder) Send(r replication.SendRequest) (replication.SendResponse, error) { func (m *MockIncrementalPathRecorder) Send(r *replication.SendReq) (*replication.SendRes, io.Reader, error) {
if m.Pos >= len(m.Sequence) { if m.Pos >= len(m.Sequence) {
m.T.Fatal("unexpected Send") m.T.Fatal("unexpected Send")
} }
@ -45,7 +44,7 @@ func (m *MockIncrementalPathRecorder) Send(r replication.SendRequest) (replicati
if !assert.Equal(m.T, i.SendRequest, r) { if !assert.Equal(m.T, i.SendRequest, r) {
m.T.FailNow() m.T.FailNow()
} }
return i.SendResponse, i.SendError return i.SendResponse, i.SendReader, i.SendError
} }
func (m *MockIncrementalPathRecorder) Finished() bool { func (m *MockIncrementalPathRecorder) Finished() bool {
@ -60,8 +59,8 @@ func (DiscardCopier) Copy(writer io.Writer, reader io.Reader) (int64, error) {
type IncrementalPathReplicatorTest struct { type IncrementalPathReplicatorTest struct {
Msg string Msg string
Filesystem replication.Filesystem Filesystem *replication.Filesystem
Path []zfs.FilesystemVersion Path []*replication.FilesystemVersion
Steps []IncrementalPathSequenceStep Steps []IncrementalPathSequenceStep
} }
@ -74,9 +73,11 @@ func (test *IncrementalPathReplicatorTest) Test(t *testing.T) {
Sequence: test.Steps, Sequence: test.Steps,
} }
ctx := context.WithValue(context.Background(), replication.ContextKeyLog, testLog{t})
ipr := replication.NewIncrementalPathReplicator() ipr := replication.NewIncrementalPathReplicator()
ipr.Replicate( ipr.Replicate(
context.TODO(), ctx,
rec, rec,
rec, rec,
DiscardCopier{}, DiscardCopier{},
@ -88,40 +89,51 @@ func (test *IncrementalPathReplicatorTest) Test(t *testing.T) {
} }
type testLog struct {
t *testing.T
}
func (t testLog) Printf(fmt string, args ...interface{}) {
t.t.Logf(fmt, args)
}
func TestIncrementalPathReplicator_Replicate(t *testing.T) { func TestIncrementalPathReplicator_Replicate(t *testing.T) {
tbl := []IncrementalPathReplicatorTest{ tbl := []IncrementalPathReplicatorTest{
{ {
Msg: "generic happy place with resume token", Msg: "generic happy place with resume token",
Filesystem: replication.Filesystem{ Filesystem: &replication.Filesystem{
Path: "foo/bar", Path: "foo/bar",
ResumeToken: "blafoo", ResumeToken: "blafoo",
}, },
Path: fsvlist("@a,1", "@b,2", "@c,3"), Path: fsvlist("@a,1", "@b,2", "@c,3"),
Steps: []IncrementalPathSequenceStep{ Steps: []IncrementalPathSequenceStep{
{ {
SendRequest: replication.SendRequest{ SendRequest: &replication.SendReq{
Filesystem: "foo/bar", Filesystem: "foo/bar",
From: "@a,1", From: "@a,1",
To: "@b,2", To: "@b,2",
ResumeToken: "blafoo", ResumeToken: "blafoo",
}, },
}, SendResponse: &replication.SendRes{
{ UsedResumeToken: true,
ReceiveRequest: replication.ReceiveRequest{
Filesystem: "foo/bar",
ResumeToken: "blafoo",
}, },
}, },
{ {
SendRequest: replication.SendRequest{ ReceiveRequest: &replication.ReceiveReq{
Filesystem: "foo/bar",
ClearResumeToken: false,
},
},
{
SendRequest: &replication.SendReq{
Filesystem: "foo/bar", Filesystem: "foo/bar",
From: "@b,2", From: "@b,2",
To: "@c,3", To: "@c,3",
}, },
}, },
{ {
ReceiveRequest: replication.ReceiveRequest{ ReceiveRequest: &replication.ReceiveReq{
Filesystem: "foo/bar", Filesystem: "foo/bar",
}, },
}, },
@ -129,19 +141,36 @@ func TestIncrementalPathReplicator_Replicate(t *testing.T) {
}, },
{ {
Msg: "no action on empty sequence", Msg: "no action on empty sequence",
Filesystem: replication.Filesystem{ Filesystem: &replication.Filesystem{
Path: "foo/bar", Path: "foo/bar",
}, },
Path: fsvlist(), Path: fsvlist(),
Steps: []IncrementalPathSequenceStep{}, Steps: []IncrementalPathSequenceStep{},
}, },
{ {
Msg: "no action on invalid path", Msg: "full send on single entry path",
Filesystem: replication.Filesystem{ Filesystem: &replication.Filesystem{
Path: "foo/bar", Path: "foo/bar",
}, },
Path: fsvlist("@justone,1"), Path: fsvlist("@justone,1"),
Steps: []IncrementalPathSequenceStep{}, Steps: []IncrementalPathSequenceStep{
{
SendRequest: &replication.SendReq{
Filesystem: "foo/bar",
From: "@justone,1",
To: "", // empty means full send
},
SendResponse: &replication.SendRes{
UsedResumeToken: false,
},
},
{
ReceiveRequest: &replication.ReceiveReq{
Filesystem: "foo/bar",
ClearResumeToken: false,
},
},
},
}, },
} }

View File

@ -1,136 +0,0 @@
package rpc
import (
"bytes"
"encoding/json"
"io"
"reflect"
"github.com/pkg/errors"
)
type Client struct {
ml *MessageLayer
logger Logger
}
func NewClient(rwc io.ReadWriteCloser) *Client {
return &Client{NewMessageLayer(rwc), noLogger{}}
}
func (c *Client) SetLogger(logger Logger, logMessageLayer bool) {
c.logger = logger
if logMessageLayer {
c.ml.logger = logger
} else {
c.ml.logger = noLogger{}
}
}
func (c *Client) Close() (err error) {
c.logger.Printf("sending Close request")
header := Header{
DataType: DataTypeControl,
Endpoint: ControlEndpointClose,
Accept: DataTypeControl,
}
err = c.ml.WriteHeader(&header)
if err != nil {
return
}
c.logger.Printf("reading Close ACK")
ack, err := c.ml.ReadHeader()
if err != nil {
return err
}
c.logger.Printf("received Close ACK: %#v", ack)
if ack.Error != StatusOK {
err = errors.Errorf("error hanging up: remote error (%s) %s", ack.Error, ack.ErrorMessage)
return
}
c.logger.Printf("closing MessageLayer")
if err = c.ml.Close(); err != nil {
c.logger.Printf("error closing RWC: %+v", err)
return
}
return err
}
func (c *Client) recvResponse() (h *Header, err error) {
h, err = c.ml.ReadHeader()
if err != nil {
return nil, errors.Wrap(err, "cannot read header")
}
// TODO validate
return
}
func (c *Client) writeRequest(h *Header) (err error) {
// TODO validate
err = c.ml.WriteHeader(h)
if err != nil {
return errors.Wrap(err, "cannot write header")
}
return
}
func (c *Client) Call(endpoint string, in, out interface{}) (err error) {
var accept DataType
{
outType := reflect.TypeOf(out)
if typeIsIOReaderPtr(outType) {
accept = DataTypeOctets
} else {
accept = DataTypeMarshaledJSON
}
}
h := Header{
Endpoint: endpoint,
DataType: DataTypeMarshaledJSON,
Accept: accept,
}
if err = c.writeRequest(&h); err != nil {
return err
}
var buf bytes.Buffer
if err = json.NewEncoder(&buf).Encode(in); err != nil {
panic("cannot encode 'in' parameter")
}
if err = c.ml.WriteData(&buf); err != nil {
return err
}
rh, err := c.recvResponse()
if err != nil {
return err
}
if rh.Error != StatusOK {
return &RPCError{rh}
}
rd := c.ml.ReadData()
switch accept {
case DataTypeOctets:
c.logger.Printf("setting out to ML data reader")
outPtr := out.(*io.Reader) // we checked that above
*outPtr = rd
case DataTypeMarshaledJSON:
c.logger.Printf("decoding marshaled json")
if err = json.NewDecoder(c.ml.ReadData()).Decode(out); err != nil {
return errors.Wrap(err, "cannot decode marshaled reply")
}
default:
panic("implementation error") // accept is controlled by us
}
return
}

View File

@ -1,17 +0,0 @@
// Code generated by "stringer -type=DataType"; DO NOT EDIT.
package rpc
import "strconv"
const _DataType_name = "DataTypeNoneDataTypeControlDataTypeMarshaledJSONDataTypeOctets"
var _DataType_index = [...]uint8{0, 12, 27, 48, 62}
func (i DataType) String() string {
i -= 1
if i >= DataType(len(_DataType_index)-1) {
return "DataType(" + strconv.FormatInt(int64(i+1), 10) + ")"
}
return _DataType_name[_DataType_index[i]:_DataType_index[i+1]]
}

View File

@ -1,302 +0,0 @@
package rpc
import (
"bytes"
"encoding/binary"
"encoding/json"
"fmt"
"io"
"github.com/pkg/errors"
)
type Frame struct {
Type FrameType
NoMoreFrames bool
PayloadLength uint32
}
//go:generate stringer -type=FrameType
type FrameType uint8
const (
FrameTypeHeader FrameType = 0x01
FrameTypeData FrameType = 0x02
FrameTypeTrailer FrameType = 0x03
FrameTypeRST FrameType = 0xff
)
//go:generate stringer -type=Status
type Status uint64
const (
StatusOK Status = 1 + iota
StatusRequestError
StatusServerError
// Returned when an error occurred but the side at fault cannot be determined
StatusError
)
type Header struct {
// Request-only
Endpoint string
// Data type of body (request & reply)
DataType DataType
// Request-only
Accept DataType
// Reply-only
Error Status
// Reply-only
ErrorMessage string
}
func NewErrorHeader(status Status, format string, args ...interface{}) (h *Header) {
h = &Header{}
h.Error = status
h.ErrorMessage = fmt.Sprintf(format, args...)
return
}
//go:generate stringer -type=DataType
type DataType uint8
const (
DataTypeNone DataType = 1 + iota
DataTypeControl
DataTypeMarshaledJSON
DataTypeOctets
)
const (
MAX_PAYLOAD_LENGTH = 4 * 1024 * 1024
MAX_HEADER_LENGTH = 4 * 1024
)
type frameBridgingReader struct {
l *MessageLayer
frameType FrameType
// < 0 means no limit
bytesLeftToLimit int
f Frame
}
func NewFrameBridgingReader(l *MessageLayer, frameType FrameType, totalLimit int) *frameBridgingReader {
return &frameBridgingReader{l, frameType, totalLimit, Frame{}}
}
func (r *frameBridgingReader) Read(b []byte) (n int, err error) {
if r.bytesLeftToLimit == 0 {
r.l.logger.Printf("limit reached, returning EOF")
return 0, io.EOF
}
log := r.l.logger
if r.f.PayloadLength == 0 {
if r.f.NoMoreFrames {
r.l.logger.Printf("no more frames flag set, returning EOF")
err = io.EOF
return
}
log.Printf("reading frame")
r.f, err = r.l.readFrame()
if err != nil {
log.Printf("error reading frame: %+v", err)
return 0, err
}
log.Printf("read frame: %#v", r.f)
if r.f.Type != r.frameType {
err = errors.Wrapf(err, "expected frame of type %s", r.frameType)
return 0, err
}
}
maxread := len(b)
if maxread > int(r.f.PayloadLength) {
maxread = int(r.f.PayloadLength)
}
if r.bytesLeftToLimit > 0 && maxread > r.bytesLeftToLimit {
maxread = r.bytesLeftToLimit
}
nb, err := r.l.rwc.Read(b[:maxread])
log.Printf("read %v from rwc\n", nb)
if nb < 0 {
panic("should not return negative number of bytes")
}
r.f.PayloadLength -= uint32(nb)
r.bytesLeftToLimit -= nb
return nb, err // TODO io.EOF for maxread = r.f.PayloadLength ?
}
type frameBridgingWriter struct {
l *MessageLayer
frameType FrameType
// < 0 means no limit
bytesLeftToLimit int
payloadLength int
buffer *bytes.Buffer
}
func NewFrameBridgingWriter(l *MessageLayer, frameType FrameType, totalLimit int) *frameBridgingWriter {
return &frameBridgingWriter{l, frameType, totalLimit, MAX_PAYLOAD_LENGTH, bytes.NewBuffer(make([]byte, 0, MAX_PAYLOAD_LENGTH))}
}
func (w *frameBridgingWriter) Write(b []byte) (n int, err error) {
for n = 0; n < len(b); {
i, err := w.writeUntilFrameFull(b[n:])
n += i
if err != nil {
return n, errors.WithStack(err)
}
}
return
}
func (w *frameBridgingWriter) writeUntilFrameFull(b []byte) (n int, err error) {
if len(b) <= 0 {
return
}
if w.bytesLeftToLimit == 0 {
err = errors.Errorf("message exceeds max number of allowed bytes")
return
}
maxwrite := len(b)
remainingInFrame := w.payloadLength - w.buffer.Len()
if maxwrite > remainingInFrame {
maxwrite = remainingInFrame
}
if w.bytesLeftToLimit > 0 && maxwrite > w.bytesLeftToLimit {
maxwrite = w.bytesLeftToLimit
}
w.buffer.Write(b[:maxwrite])
w.bytesLeftToLimit -= maxwrite
n = maxwrite
if w.bytesLeftToLimit == 0 {
err = w.flush(true)
} else if w.buffer.Len() == w.payloadLength {
err = w.flush(false)
}
return
}
func (w *frameBridgingWriter) flush(nomore bool) (err error) {
f := Frame{w.frameType, nomore, uint32(w.buffer.Len())}
err = w.l.writeFrame(f)
if err != nil {
errors.WithStack(err)
}
_, err = w.buffer.WriteTo(w.l.rwc)
return
}
func (w *frameBridgingWriter) Close() (err error) {
return w.flush(true)
}
type MessageLayer struct {
rwc io.ReadWriteCloser
logger Logger
}
func NewMessageLayer(rwc io.ReadWriteCloser) *MessageLayer {
return &MessageLayer{rwc, noLogger{}}
}
func (l *MessageLayer) Close() (err error) {
f := Frame{
Type: FrameTypeRST,
NoMoreFrames: true,
}
if err = l.writeFrame(f); err != nil {
l.logger.Printf("error sending RST frame: %s", err)
return errors.WithStack(err)
}
return nil
}
var RST error = fmt.Errorf("reset frame observed on connection")
func (l *MessageLayer) readFrame() (f Frame, err error) {
err = binary.Read(l.rwc, binary.LittleEndian, &f.Type)
if err != nil {
err = errors.WithStack(err)
return
}
err = binary.Read(l.rwc, binary.LittleEndian, &f.NoMoreFrames)
if err != nil {
err = errors.WithStack(err)
return
}
err = binary.Read(l.rwc, binary.LittleEndian, &f.PayloadLength)
if err != nil {
err = errors.WithStack(err)
return
}
if f.Type == FrameTypeRST {
l.logger.Printf("read RST frame")
err = RST
return
}
if f.PayloadLength > MAX_PAYLOAD_LENGTH {
err = errors.Errorf("frame exceeds max payload length")
return
}
return
}
func (l *MessageLayer) writeFrame(f Frame) (err error) {
err = binary.Write(l.rwc, binary.LittleEndian, &f.Type)
if err != nil {
return errors.WithStack(err)
}
err = binary.Write(l.rwc, binary.LittleEndian, &f.NoMoreFrames)
if err != nil {
return errors.WithStack(err)
}
err = binary.Write(l.rwc, binary.LittleEndian, &f.PayloadLength)
if err != nil {
return errors.WithStack(err)
}
if f.PayloadLength > MAX_PAYLOAD_LENGTH {
err = errors.Errorf("frame exceeds max payload length")
return
}
return
}
func (l *MessageLayer) ReadHeader() (h *Header, err error) {
r := NewFrameBridgingReader(l, FrameTypeHeader, MAX_HEADER_LENGTH)
h = &Header{}
if err = json.NewDecoder(r).Decode(&h); err != nil {
l.logger.Printf("cannot decode marshaled header: %s", err)
return nil, err
}
return h, nil
}
func (l *MessageLayer) WriteHeader(h *Header) (err error) {
w := NewFrameBridgingWriter(l, FrameTypeHeader, MAX_HEADER_LENGTH)
err = json.NewEncoder(w).Encode(h)
if err != nil {
return errors.Wrap(err, "cannot encode header, probably fatal")
}
w.Close()
return
}
func (l *MessageLayer) ReadData() (reader io.Reader) {
r := NewFrameBridgingReader(l, FrameTypeData, -1)
return r
}
func (l *MessageLayer) WriteData(source io.Reader) (err error) {
w := NewFrameBridgingWriter(l, FrameTypeData, -1)
_, err = io.Copy(w, source)
if err != nil {
return errors.WithStack(err)
}
err = w.Close()
return
}

View File

@ -1,26 +0,0 @@
// Code generated by "stringer -type=FrameType"; DO NOT EDIT.
package rpc
import "strconv"
const (
_FrameType_name_0 = "FrameTypeHeaderFrameTypeDataFrameTypeTrailer"
_FrameType_name_1 = "FrameTypeRST"
)
var (
_FrameType_index_0 = [...]uint8{0, 15, 28, 44}
)
func (i FrameType) String() string {
switch {
case 1 <= i && i <= 3:
i -= 1
return _FrameType_name_0[_FrameType_index_0[i]:_FrameType_index_0[i+1]]
case i == 255:
return _FrameType_name_1
default:
return "FrameType(" + strconv.FormatInt(int64(i), 10) + ")"
}
}

View File

@ -1,63 +0,0 @@
package rpc
import (
"github.com/pkg/errors"
"reflect"
)
type LocalRPC struct {
endpoints map[string]reflect.Value
}
func NewLocalRPC() *LocalRPC {
return &LocalRPC{make(map[string]reflect.Value, 0)}
}
func (s *LocalRPC) RegisterEndpoint(name string, handler interface{}) (err error) {
_, ok := s.endpoints[name]
if ok {
return errors.Errorf("already set up an endpoint for '%s'", name)
}
ep, err := makeEndpointDescr(handler)
if err != nil {
return err
}
s.endpoints[name] = ep.handler
return nil
}
func (s *LocalRPC) Serve() (err error) {
panic("local cannot serve")
}
func (c *LocalRPC) Call(endpoint string, in, out interface{}) (err error) {
ep, ok := c.endpoints[endpoint]
if !ok {
panic("implementation error: implementation should not call local RPC without knowing which endpoints exist")
}
args := []reflect.Value{reflect.ValueOf(in), reflect.ValueOf(out)}
if err = checkRPCParamTypes(args[0].Type(), args[1].Type()); err != nil {
return
}
rets := ep.Call(args)
if len(rets) != 1 {
panic("implementation error: endpoints must have one error ")
}
if err = checkRPCReturnType(rets[0].Type()); err != nil {
panic(err)
}
err = nil
if !rets[0].IsNil() {
err = rets[0].Interface().(error) // we checked that above
}
return
}
func (c *LocalRPC) Close() (err error) {
return nil
}

View File

@ -1,259 +0,0 @@
package rpc
import (
"bytes"
"encoding/json"
"io"
"reflect"
"github.com/pkg/errors"
)
type Server struct {
ml *MessageLayer
logger Logger
endpoints map[string]endpointDescr
}
type typeMap struct {
local reflect.Type
proto DataType
}
type endpointDescr struct {
inType typeMap
outType typeMap
handler reflect.Value
}
type MarshaledJSONEndpoint func(bodyJSON interface{})
func NewServer(rwc io.ReadWriteCloser) *Server {
ml := NewMessageLayer(rwc)
return &Server{
ml, noLogger{}, make(map[string]endpointDescr),
}
}
func (s *Server) SetLogger(logger Logger, logMessageLayer bool) {
s.logger = logger
if logMessageLayer {
s.ml.logger = logger
} else {
s.ml.logger = noLogger{}
}
}
func (s *Server) RegisterEndpoint(name string, handler interface{}) (err error) {
_, ok := s.endpoints[name]
if ok {
return errors.Errorf("already set up an endpoint for '%s'", name)
}
s.endpoints[name], err = makeEndpointDescr(handler)
return
}
func checkResponseHeader(h *Header) (err error) {
var statusNotSet Status
if h.Error == statusNotSet {
return errors.Errorf("status has zero-value")
}
return nil
}
func (s *Server) writeResponse(h *Header) (err error) {
// TODO validate
return s.ml.WriteHeader(h)
}
func (s *Server) recvRequest() (h *Header, err error) {
h, err = s.ml.ReadHeader()
if err != nil {
s.logger.Printf("error reading header: %s", err)
return nil, err
}
s.logger.Printf("validating request")
err = nil // TODO validate
if err == nil {
return h, nil
}
s.logger.Printf("request validation error: %s", err)
r := NewErrorHeader(StatusRequestError, "%s", err)
return nil, s.writeResponse(r)
}
var doneServeNext error = errors.New("this should not cause a HangUp() in the server")
var doneStopServing error = errors.New("this should cause the server to close the connection")
var ProtocolError error = errors.New("protocol error, server should hang up")
const ControlEndpointClose string = "Close"
// Serve the connection until failure or the client hangs up
func (s *Server) Serve() (err error) {
for {
err = s.ServeRequest()
if err == nil {
continue
}
if err == doneServeNext {
s.logger.Printf("subroutine returned pseudo-error indicating early-exit")
err = nil
continue
}
if err == doneStopServing {
s.logger.Printf("subroutine returned pseudo-error indicating close request")
err = nil
break
}
break
}
if err != nil {
s.logger.Printf("an error occurred that could not be handled on PRC protocol level: %+v", err)
}
s.logger.Printf("cloing MessageLayer")
if mlErr := s.ml.Close(); mlErr != nil {
s.logger.Printf("error closing MessageLayer: %+v", mlErr)
}
return err
}
// Serve a single request
// * wait for request to come in
// * call handler
// * reply
//
// The connection is left open, the next bytes on the conn should be
// the next request header.
//
// Returns an err != nil if the error is bad enough to hang up on the client.
// Examples: protocol version mismatches, protocol errors in general, ...
// Non-Examples: a handler error
func (s *Server) ServeRequest() (err error) {
ml := s.ml
s.logger.Printf("reading header")
h, err := s.recvRequest()
if err != nil {
return err
}
if h.DataType == DataTypeControl {
switch h.Endpoint {
case ControlEndpointClose:
ack := Header{Error: StatusOK, DataType: DataTypeControl}
err = s.writeResponse(&ack)
if err != nil {
return err
}
return doneStopServing
default:
r := NewErrorHeader(StatusRequestError, "unregistered control endpoint %s", h.Endpoint)
return s.writeResponse(r)
}
}
ep, ok := s.endpoints[h.Endpoint]
if !ok {
r := NewErrorHeader(StatusRequestError, "unregistered endpoint %s", h.Endpoint)
return s.writeResponse(r)
}
if ep.inType.proto != h.DataType {
r := NewErrorHeader(StatusRequestError, "wrong DataType for endpoint %s (has %s, you provided %s)", h.Endpoint, ep.inType.proto, h.DataType)
return s.writeResponse(r)
}
if ep.outType.proto != h.Accept {
r := NewErrorHeader(StatusRequestError, "wrong Accept for endpoint %s (has %s, you provided %s)", h.Endpoint, ep.outType.proto, h.Accept)
return s.writeResponse(r)
}
dr := ml.ReadData()
// Determine inval
var inval reflect.Value
switch ep.inType.proto {
case DataTypeMarshaledJSON:
// Unmarshal input
inval = reflect.New(ep.inType.local.Elem())
invalIface := inval.Interface()
err = json.NewDecoder(dr).Decode(invalIface)
if err != nil {
r := NewErrorHeader(StatusRequestError, "cannot decode marshaled JSON: %s", err)
return s.writeResponse(r)
}
case DataTypeOctets:
// Take data as is
inval = reflect.ValueOf(dr)
default:
panic("not implemented")
}
outval := reflect.New(ep.outType.local.Elem()) // outval is a double pointer
s.logger.Printf("before handler, inval=%v outval=%v", inval, outval)
// Call the handler
errs := ep.handler.Call([]reflect.Value{inval, outval})
if !errs[0].IsNil() {
he := errs[0].Interface().(error) // we checked that before...
s.logger.Printf("handler returned error: %s", err)
r := NewErrorHeader(StatusError, "%s", he.Error())
return s.writeResponse(r)
}
switch ep.outType.proto {
case DataTypeMarshaledJSON:
var dataBuf bytes.Buffer
// Marshal output
err = json.NewEncoder(&dataBuf).Encode(outval.Interface())
if err != nil {
r := NewErrorHeader(StatusServerError, "cannot marshal response: %s", err)
return s.writeResponse(r)
}
replyHeader := Header{
Error: StatusOK,
DataType: ep.outType.proto,
}
if err = s.writeResponse(&replyHeader); err != nil {
return err
}
if err = ml.WriteData(&dataBuf); err != nil {
return
}
case DataTypeOctets:
h := Header{
Error: StatusOK,
DataType: DataTypeOctets,
}
if err = s.writeResponse(&h); err != nil {
return
}
reader := outval.Interface().(*io.Reader) // we checked that when adding the endpoint
err = ml.WriteData(*reader)
if err != nil {
return err
}
}
return nil
}

View File

@ -1,111 +0,0 @@
package rpc
import (
"fmt"
"github.com/pkg/errors"
"io"
"reflect"
)
type RPCServer interface {
Serve() (err error)
RegisterEndpoint(name string, handler interface{}) (err error)
}
type RPCClient interface {
Call(endpoint string, in, out interface{}) (err error)
Close() (err error)
}
type Logger interface {
Printf(format string, args ...interface{})
}
type noLogger struct{}
func (l noLogger) Printf(format string, args ...interface{}) {}
func typeIsIOReader(t reflect.Type) bool {
return t == reflect.TypeOf((*io.Reader)(nil)).Elem()
}
func typeIsIOReaderPtr(t reflect.Type) bool {
return t == reflect.TypeOf((*io.Reader)(nil))
}
// An error returned by the Client if the response indicated a status code other than StatusOK
type RPCError struct {
ResponseHeader *Header
}
func (e *RPCError) Error() string {
return fmt.Sprintf("%s: %s", e.ResponseHeader.Error, e.ResponseHeader.ErrorMessage)
}
type RPCProtoError struct {
Message string
UnderlyingError error
}
func (e *RPCProtoError) Error() string {
return e.Message
}
func checkRPCParamTypes(in, out reflect.Type) (err error) {
if !(in.Kind() == reflect.Ptr || typeIsIOReader(in)) {
err = errors.Errorf("input parameter must be a pointer or an io.Reader, is of kind %s, type %s", in.Kind(), in)
return
}
if !(out.Kind() == reflect.Ptr) {
err = errors.Errorf("second input parameter (the non-error output parameter) must be a pointer or an *io.Reader")
return
}
return nil
}
func checkRPCReturnType(rt reflect.Type) (err error) {
errInterfaceType := reflect.TypeOf((*error)(nil)).Elem()
if !rt.Implements(errInterfaceType) {
err = errors.Errorf("handler must return an error")
return
}
return nil
}
func makeEndpointDescr(handler interface{}) (descr endpointDescr, err error) {
ht := reflect.TypeOf(handler)
if ht.Kind() != reflect.Func {
err = errors.Errorf("handler must be of kind reflect.Func")
return
}
if ht.NumIn() != 2 || ht.NumOut() != 1 {
err = errors.Errorf("handler must have exactly two input parameters and one output parameter")
return
}
if err = checkRPCParamTypes(ht.In(0), ht.In(1)); err != nil {
return
}
if err = checkRPCReturnType(ht.Out(0)); err != nil {
return
}
descr.handler = reflect.ValueOf(handler)
descr.inType.local = ht.In(0)
descr.outType.local = ht.In(1)
if typeIsIOReader(ht.In(0)) {
descr.inType.proto = DataTypeOctets
} else {
descr.inType.proto = DataTypeMarshaledJSON
}
if typeIsIOReaderPtr(ht.In(1)) {
descr.outType.proto = DataTypeOctets
} else {
descr.outType.proto = DataTypeMarshaledJSON
}
return
}

View File

@ -1,17 +0,0 @@
// Code generated by "stringer -type=Status"; DO NOT EDIT.
package rpc
import "strconv"
const _Status_name = "StatusOKStatusRequestErrorStatusServerErrorStatusError"
var _Status_index = [...]uint8{0, 8, 26, 43, 54}
func (i Status) String() string {
i -= 1
if i >= Status(len(_Status_index)-1) {
return "Status(" + strconv.FormatInt(int64(i+1), 10) + ")"
}
return _Status_name[_Status_index[i]:_Status_index[i+1]]
}

View File

@ -11,8 +11,8 @@ import (
// An IOCommand exposes a forked process's std(in|out|err) through the io.ReadWriteCloser interface. // An IOCommand exposes a forked process's std(in|out|err) through the io.ReadWriteCloser interface.
type IOCommand struct { type IOCommand struct {
Cmd *exec.Cmd Cmd *exec.Cmd
Stdin io.Writer Stdin io.WriteCloser
Stdout io.Reader Stdout io.ReadCloser
StderrBuf *bytes.Buffer StderrBuf *bytes.Buffer
ExitResult *IOCommandExitResult ExitResult *IOCommandExitResult
} }

View File

@ -7,6 +7,7 @@ import (
"fmt" "fmt"
"os/exec" "os/exec"
"sort" "sort"
"io"
) )
type fsbyCreateTXG []FilesystemVersion type fsbyCreateTXG []FilesystemVersion
@ -245,6 +246,19 @@ func IsPlaceholder(p *DatasetPath, placeholderPropertyValue string) (isPlacehold
return return
} }
// for nonexistent FS, isPlaceholder == false && err == nil
func ZFSIsPlaceholderFilesystem(p *DatasetPath) (isPlaceholder bool, err error) {
props, err := ZFSGet(p, []string{ZREPL_PLACEHOLDER_PROPERTY_NAME})
if err == io.ErrUnexpectedEOF {
// interpret this as an early exit of the zfs binary due to the fs not existing
return false, nil
} else if err != nil {
return false, err
}
isPlaceholder, _ = IsPlaceholder(p, props.Get(ZREPL_PLACEHOLDER_PROPERTY_NAME))
return
}
func ZFSCreatePlaceholderFilesystem(p *DatasetPath) (err error) { func ZFSCreatePlaceholderFilesystem(p *DatasetPath) (err error) {
v := PlaceholderPropertyValue(p) v := PlaceholderPropertyValue(p)
cmd := exec.Command(ZFS_BINARY, "create", cmd := exec.Command(ZFS_BINARY, "create",

View File

@ -10,26 +10,56 @@ type DatasetFilter interface {
} }
func ZFSListMapping(filter DatasetFilter) (datasets []*DatasetPath, err error) { func ZFSListMapping(filter DatasetFilter) (datasets []*DatasetPath, err error) {
res, err := ZFSListMappingProperties(filter, nil)
if err != nil {
return nil, err
}
datasets = make([]*DatasetPath, len(res))
for i, r := range res {
datasets[i] = r.Path
}
return datasets, nil
}
type ZFSListMappingPropertiesResult struct {
Path *DatasetPath
// Guaranteed to have the same length as properties in the originating call
Fields []string
}
// properties must not contain 'name'
func ZFSListMappingProperties(filter DatasetFilter, properties []string) (datasets []ZFSListMappingPropertiesResult, err error) {
if filter == nil { if filter == nil {
panic("filter must not be nil") panic("filter must not be nil")
} }
for _, p := range properties {
if p == "name" {
panic("properties must not contain 'name'")
}
}
newProps := make([]string, len(properties) + 1)
newProps[0] = "name"
copy(newProps[1:], properties)
properties = newProps
ctx, cancel := context.WithCancel(context.Background()) ctx, cancel := context.WithCancel(context.Background())
defer cancel() defer cancel()
rchan := make(chan ZFSListResult) rchan := make(chan ZFSListResult)
go ZFSListChan(ctx, rchan, []string{"name"}, "-r", "-t", "filesystem,volume")
datasets = make([]*DatasetPath, 0) go ZFSListChan(ctx, rchan, properties, "-r", "-t", "filesystem,volume")
datasets = make([]ZFSListMappingPropertiesResult, 0)
for r := range rchan { for r := range rchan {
if r.err != nil { if r.Err != nil {
err = r.err err = r.Err
return return
} }
var path *DatasetPath var path *DatasetPath
if path, err = NewDatasetPath(r.fields[0]); err != nil { if path, err = NewDatasetPath(r.Fields[0]); err != nil {
return return
} }
@ -38,10 +68,15 @@ func ZFSListMapping(filter DatasetFilter) (datasets []*DatasetPath, err error) {
return nil, fmt.Errorf("error calling filter: %s", filterErr) return nil, fmt.Errorf("error calling filter: %s", filterErr)
} }
if pass { if pass {
datasets = append(datasets, path) datasets = append(datasets, ZFSListMappingPropertiesResult{
Path: path,
Fields: r.Fields[1:],
})
} }
} }
return return
} }

View File

@ -103,3 +103,18 @@ func ParseResumeToken(ctx context.Context, token string) (*ResumeToken, error) {
return rt, nil return rt, nil
} }
func ZFSGetReceiveResumeToken(fs *DatasetPath) (string, error) {
const prop_receive_resume_token = "receive_resume_token"
props, err := ZFSGet(fs, []string{prop_receive_resume_token})
if err != nil {
return "", err
}
res := props.m[prop_receive_resume_token]
if res == "-" {
return "", nil
} else {
return res, nil
}
}

View File

@ -9,6 +9,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"time" "time"
"io"
) )
type VersionType string type VersionType string
@ -33,6 +34,26 @@ func (t VersionType) String() string {
return string(t) return string(t)
} }
func DecomposeVersionString(v string) (fs string, versionType VersionType, name string, err error) {
if len(v) < 3 {
err = errors.New(fmt.Sprintf("snapshot or bookmark name implausibly short: %s", v))
return
}
snapSplit := strings.SplitN(v, "@", 2)
bookmarkSplit := strings.SplitN(v, "#", 2)
if len(snapSplit)*len(bookmarkSplit) != 2 {
err = errors.New(fmt.Sprintf("dataset cannot be snapshot and bookmark at the same time: %s", v))
return
}
if len(snapSplit) == 2 {
return snapSplit[0], Snapshot, snapSplit[1], nil
} else {
return bookmarkSplit[0], Bookmark, bookmarkSplit[1], nil
}
}
type FilesystemVersion struct { type FilesystemVersion struct {
Type VersionType Type VersionType
@ -63,7 +84,7 @@ func (v FilesystemVersion) ToAbsPath(p *DatasetPath) string {
} }
type FilesystemVersionFilter interface { type FilesystemVersionFilter interface {
Filter(fsv FilesystemVersion) (accept bool, err error) Filter(t VersionType, name string) (accept bool, err error)
} }
func ZFSListFilesystemVersions(fs *DatasetPath, filter FilesystemVersionFilter) (res []FilesystemVersion, err error) { func ZFSListFilesystemVersions(fs *DatasetPath, filter FilesystemVersionFilter) (res []FilesystemVersion, err error) {
@ -82,31 +103,21 @@ func ZFSListFilesystemVersions(fs *DatasetPath, filter FilesystemVersionFilter)
res = make([]FilesystemVersion, 0) res = make([]FilesystemVersion, 0)
for listResult := range listResults { for listResult := range listResults {
if listResult.err != nil { if listResult.Err != nil {
return nil, listResult.err if listResult.Err == io.ErrUnexpectedEOF {
// Since we specified the fs on the command line, we'll treat this like the filesystem doesn't exist
return []FilesystemVersion{}, nil
}
return nil, listResult.Err
} }
line := listResult.fields line := listResult.Fields
if len(line[0]) < 3 {
err = errors.New(fmt.Sprintf("snapshot or bookmark name implausibly short: %s", line[0]))
return
}
snapSplit := strings.SplitN(line[0], "@", 2)
bookmarkSplit := strings.SplitN(line[0], "#", 2)
if len(snapSplit)*len(bookmarkSplit) != 2 {
err = errors.New(fmt.Sprintf("dataset cannot be snapshot and bookmark at the same time: %s", line[0]))
return
}
var v FilesystemVersion var v FilesystemVersion
if len(snapSplit) == 2 {
v.Name = snapSplit[1] _, v.Type, v.Name, err = DecomposeVersionString(line[0])
v.Type = Snapshot if err != nil {
} else { return nil, err
v.Name = bookmarkSplit[1]
v.Type = Bookmark
} }
if v.Guid, err = strconv.ParseUint(line[1], 10, 64); err != nil { if v.Guid, err = strconv.ParseUint(line[1], 10, 64); err != nil {
@ -129,7 +140,7 @@ func ZFSListFilesystemVersions(fs *DatasetPath, filter FilesystemVersionFilter)
accept := true accept := true
if filter != nil { if filter != nil {
accept, err = filter.Filter(v) accept, err = filter.Filter(v.Type, v.Name)
if err != nil { if err != nil {
err = fmt.Errorf("error executing filter: %s", err) err = fmt.Errorf("error executing filter: %s", err)
return nil, err return nil, err

View File

@ -189,13 +189,13 @@ func ZFSList(properties []string, zfsArgs ...string) (res [][]string, err error)
} }
type ZFSListResult struct { type ZFSListResult struct {
fields []string Fields []string
err error Err error
} }
// ZFSListChan executes `zfs list` and sends the results to the `out` channel. // ZFSListChan executes `zfs list` and sends the results to the `out` channel.
// The `out` channel is always closed by ZFSListChan: // The `out` channel is always closed by ZFSListChan:
// If an error occurs, it is closed after sending a result with the err field set. // If an error occurs, it is closed after sending a result with the Err field set.
// If no error occurs, it is just closed. // If no error occurs, it is just closed.
// If the operation is cancelled via context, the channel is just closed. // If the operation is cancelled via context, the channel is just closed.
// //
@ -250,15 +250,56 @@ func ZFSListChan(ctx context.Context, out chan ZFSListResult, properties []strin
return return
} }
func ZFSSend(fs *DatasetPath, from, to *FilesystemVersion) (stream io.Reader, err error) { func validateRelativeZFSVersion(s string) error {
if len(s) <= 1 {
return errors.New("version must start with a delimiter char followed by at least one character")
}
if !(s[0] == '#' || s[0] == '@') {
return errors.New("version name starts with invalid delimiter char")
}
// FIXME whitespace check...
return nil
}
func validateZFSFilesystem(fs string) error {
if len(fs) < 1 {
return errors.New("filesystem path must have length > 0")
}
return nil
}
func absVersion(fs, v string) (full string, err error) {
if err := validateZFSFilesystem(fs); err != nil {
return "", err
}
if err := validateRelativeZFSVersion(v); err != nil {
return "", err
}
return fmt.Sprintf("%s%s", fs, v), nil
}
func ZFSSend(fs string, from, to string) (stream io.Reader, err error) {
fromV, err := absVersion(fs, from)
if err != nil {
return nil, err
}
toV := ""
if to != "" {
toV, err = absVersion(fs, to)
if err != nil {
return nil, err
}
}
args := make([]string, 0) args := make([]string, 0)
args = append(args, "send") args = append(args, "send")
if to == nil { // Initial if toV == "" { // Initial
args = append(args, from.ToAbsPath(fs)) args = append(args, fromV)
} else { } else {
args = append(args, "-i", from.ToAbsPath(fs), to.ToAbsPath(fs)) args = append(args, "-i", fromV, toV)
} }
stream, err = util.RunIOCommand(ZFS_BINARY, args...) stream, err = util.RunIOCommand(ZFS_BINARY, args...)
@ -266,14 +307,18 @@ func ZFSSend(fs *DatasetPath, from, to *FilesystemVersion) (stream io.Reader, er
return return
} }
func ZFSRecv(fs *DatasetPath, stream io.Reader, additionalArgs ...string) (err error) { func ZFSRecv(fs string, stream io.Reader, additionalArgs ...string) (err error) {
if err := validateZFSFilesystem(fs); err != nil {
return err
}
args := make([]string, 0) args := make([]string, 0)
args = append(args, "recv") args = append(args, "recv")
if len(args) > 0 { if len(args) > 0 {
args = append(args, additionalArgs...) args = append(args, additionalArgs...)
} }
args = append(args, fs.ToString()) args = append(args, fs)
cmd := exec.Command(ZFS_BINARY, args...) cmd := exec.Command(ZFS_BINARY, args...)
@ -304,6 +349,27 @@ func ZFSRecv(fs *DatasetPath, stream io.Reader, additionalArgs ...string) (err e
return nil return nil
} }
func ZFSRecvWriter(fs *DatasetPath, additionalArgs ...string) (io.WriteCloser, error) {
args := make([]string, 0)
args = append(args, "recv")
if len(args) > 0 {
args = append(args, additionalArgs...)
}
args = append(args, fs.ToString())
cmd, err := util.NewIOCommand(ZFS_BINARY, args, 1024)
if err != nil {
return nil, err
}
if err = cmd.Start(); err != nil {
return nil, err
}
return cmd.Stdin, nil
}
type ZFSProperties struct { type ZFSProperties struct {
m map[string]string m map[string]string
} }
@ -316,6 +382,10 @@ func (p *ZFSProperties) Set(key, val string) {
p.m[key] = val p.m[key] = val
} }
func (p *ZFSProperties) Get(key string) string {
return p.m[key]
}
func (p *ZFSProperties) appendArgs(args *[]string) (err error) { func (p *ZFSProperties) appendArgs(args *[]string) (err error) {
for prop, val := range p.m { for prop, val := range p.m {
if strings.Contains(prop, "=") { if strings.Contains(prop, "=") {
@ -355,6 +425,33 @@ func ZFSSet(fs *DatasetPath, props *ZFSProperties) (err error) {
return return
} }
func ZFSGet(fs *DatasetPath, props []string) (*ZFSProperties, error) {
args := []string{"get", "-Hp", "-o", "property,value", strings.Join(props, ","), fs.ToString()}
cmd := exec.Command(ZFS_BINARY, args...)
output, err := cmd.CombinedOutput()
if err != nil {
return nil, err
}
o := string(output)
lines := strings.Split(o, "\n")
if len(lines) < 1 || // account for newlines
len(lines)-1 != len(props) {
return nil, fmt.Errorf("zfs get did not return the number of expected property values")
}
res := &ZFSProperties{
make(map[string]string, len(lines)),
}
for _, line := range lines[:len(lines)-1] {
fields := strings.Fields(line)
if len(fields) != 2 {
return nil, fmt.Errorf("zfs get did not return property value pairs")
}
res.m[fields[0]] = fields[1]
}
return res, nil
}
func ZFSDestroy(dataset string) (err error) { func ZFSDestroy(dataset string) (err error) {
cmd := exec.Command(ZFS_BINARY, "destroy", dataset) cmd := exec.Command(ZFS_BINARY, "destroy", dataset)