cmd: move replication endpoints into subpackage

This commit is contained in:
Christian Schwarz 2018-08-22 00:43:58 +02:00
parent 7b3a84e2a3
commit 70aad0940f
7 changed files with 71 additions and 24 deletions

View File

@ -9,6 +9,7 @@ import (
"github.com/zrepl/zrepl/zfs" "github.com/zrepl/zrepl/zfs"
"sync" "sync"
"github.com/zrepl/zrepl/replication" "github.com/zrepl/zrepl/replication"
"github.com/zrepl/zrepl/cmd/endpoint"
) )
type LocalJob struct { type LocalJob struct {
@ -16,7 +17,7 @@ type LocalJob struct {
Mapping *DatasetMapFilter Mapping *DatasetMapFilter
SnapshotPrefix string SnapshotPrefix string
Interval time.Duration Interval time.Duration
InitialReplPolicy InitialReplPolicy InitialReplPolicy endpoint.InitialReplPolicy
PruneLHS PrunePolicy PruneLHS PrunePolicy
PruneRHS PrunePolicy PruneRHS PrunePolicy
Debug JobDebugSettings Debug JobDebugSettings
@ -59,7 +60,7 @@ func parseLocalJob(c JobParsingContext, name string, i map[string]interface{}) (
return return
} }
if j.InitialReplPolicy, err = parseInitialReplPolicy(asMap.InitialReplPolicy, DEFAULT_INITIAL_REPL_POLICY); err != nil { if j.InitialReplPolicy, err = parseInitialReplPolicy(asMap.InitialReplPolicy, endpoint.DEFAULT_INITIAL_REPL_POLICY); err != nil {
return return
} }
@ -103,9 +104,9 @@ func (j *LocalJob) JobStart(ctx context.Context) {
// We can pay this small performance penalty for now. // We can pay this small performance penalty for now.
wildcardMapFilter := NewDatasetMapFilter(1, false) wildcardMapFilter := NewDatasetMapFilter(1, false)
wildcardMapFilter.Add("<", "<") wildcardMapFilter.Add("<", "<")
sender := &SenderEndpoint{wildcardMapFilter, NewPrefixFilter(j.SnapshotPrefix)} sender := &endpoint.SenderEndpoint{wildcardMapFilter, NewPrefixFilter(j.SnapshotPrefix)}
receiver, err := NewReceiverEndpoint(j.Mapping, NewPrefixFilter(j.SnapshotPrefix)) receiver, err := endpoint.NewReceiverEndpoint(j.Mapping, NewPrefixFilter(j.SnapshotPrefix))
if err != nil { if err != nil {
rootLog.WithError(err).Error("unexpected error setting up local handler") rootLog.WithError(err).Error("unexpected error setting up local handler")
} }

View File

@ -14,6 +14,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/problame/go-streamrpc" "github.com/problame/go-streamrpc"
"github.com/zrepl/zrepl/replication" "github.com/zrepl/zrepl/replication"
"github.com/zrepl/zrepl/cmd/endpoint"
) )
type PullJob struct { type PullJob struct {
@ -24,7 +25,7 @@ type PullJob struct {
// constructed from mapping during parsing // constructed from mapping during parsing
pruneFilter *DatasetMapFilter pruneFilter *DatasetMapFilter
SnapshotPrefix string SnapshotPrefix string
InitialReplPolicy InitialReplPolicy InitialReplPolicy endpoint.InitialReplPolicy
Prune PrunePolicy Prune PrunePolicy
Debug JobDebugSettings Debug JobDebugSettings
@ -73,7 +74,7 @@ func parsePullJob(c JobParsingContext, name string, i map[string]interface{}) (j
return nil, err return nil, err
} }
j.InitialReplPolicy, err = parseInitialReplPolicy(asMap.InitialReplPolicy, DEFAULT_INITIAL_REPL_POLICY) j.InitialReplPolicy, err = parseInitialReplPolicy(asMap.InitialReplPolicy, endpoint.DEFAULT_INITIAL_REPL_POLICY)
if err != nil { if err != nil {
err = errors.Wrap(err, "cannot parse 'initial_repl_policy'") err = errors.Wrap(err, "cannot parse 'initial_repl_policy'")
return return
@ -175,9 +176,9 @@ func (j *PullJob) doRun(ctx context.Context) {
j.task.Enter("pull") j.task.Enter("pull")
sender := RemoteEndpoint{client} sender := endpoint.RemoteEndpoint{client}
puller, err := NewReceiverEndpoint( puller, err := endpoint.NewReceiverEndpoint(
j.Mapping, j.Mapping,
NewPrefixFilter(j.SnapshotPrefix), NewPrefixFilter(j.SnapshotPrefix),
) )
@ -229,7 +230,7 @@ func (j *PullJob) Pruner(task *Task, side PrunePolicySide, dryRun bool) (p Prune
return return
} }
func closeRPCWithTimeout(task *Task, remote RemoteEndpoint, timeout time.Duration, goodbye string) { func closeRPCWithTimeout(task *Task, remote endpoint.RemoteEndpoint, timeout time.Duration, goodbye string) {
task.Log().Info("closing rpc connection") task.Log().Info("closing rpc connection")

View File

@ -8,6 +8,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/problame/go-streamrpc" "github.com/problame/go-streamrpc"
"net" "net"
"github.com/zrepl/zrepl/cmd/endpoint"
) )
type SourceJob struct { type SourceJob struct {
@ -211,12 +212,12 @@ func (j *SourceJob) handleConnection(conn net.Conn, task *Task) {
task.Log().Info("handling client connection") task.Log().Info("handling client connection")
senderEP := NewSenderEndpoint(j.Filesystems, NewPrefixFilter(j.SnapshotPrefix)) senderEP := endpoint.NewSenderEndpoint(j.Filesystems, NewPrefixFilter(j.SnapshotPrefix))
ctx := context.Background() ctx := context.Background()
ctx = context.WithValue(ctx, contextKeyLog, task.Log().WithField("subsystem", "rpc.endpoint")) ctx = context.WithValue(ctx, contextKeyLog, task.Log().WithField("subsystem", "rpc.endpoint"))
ctx = streamrpc.ContextWithLogger(ctx, streamrpcLogAdaptor{task.Log().WithField("subsystem", "rpc.protocol")}) ctx = streamrpc.ContextWithLogger(ctx, streamrpcLogAdaptor{task.Log().WithField("subsystem", "rpc.protocol")})
handler := HandlerAdaptor{senderEP} handler := endpoint.NewHandlerAdaptor(senderEP)
if err := streamrpc.ServeConn(ctx, conn, STREAMRPC_CONFIG, handler.Handle); err != nil { if err := streamrpc.ServeConn(ctx, conn, STREAMRPC_CONFIG, handler.Handle); err != nil {
task.Log().WithError(err).Error("error serving connection") task.Log().WithError(err).Error("error serving connection")
} else { } else {

View File

@ -7,6 +7,7 @@ import (
"github.com/mitchellh/mapstructure" "github.com/mitchellh/mapstructure"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/zrepl/zrepl/zfs" "github.com/zrepl/zrepl/zfs"
"github.com/zrepl/zrepl/cmd/endpoint"
) )
type DatasetMapFilter struct { type DatasetMapFilter struct {
@ -187,11 +188,10 @@ func (m DatasetMapFilter) InvertedFilter() (inv *DatasetMapFilter, err error) {
} }
// FIXME investigate whether we can support more... // FIXME investigate whether we can support more...
func (m DatasetMapFilter) Invert() (inv *DatasetMapFilter, err error) { func (m DatasetMapFilter) Invert() (endpoint.FSMap, error) {
if m.filterMode { if m.filterMode {
err = errors.Errorf("can only invert mappings") return nil, errors.Errorf("can only invert mappings")
return
} }
if len(m.entries) != 1 { if len(m.entries) != 1 {
@ -200,7 +200,7 @@ func (m DatasetMapFilter) Invert() (inv *DatasetMapFilter, err error) {
e := m.entries[0] e := m.entries[0]
inv = &DatasetMapFilter{ inv := &DatasetMapFilter{
make([]datasetMapFilterEntry, len(m.entries)), make([]datasetMapFilterEntry, len(m.entries)),
false, false,
} }
@ -221,9 +221,9 @@ func (m DatasetMapFilter) Invert() (inv *DatasetMapFilter, err error) {
// Creates a new DatasetMapFilter in filter mode from a mapping // Creates a new DatasetMapFilter in filter mode from a mapping
// All accepting mapping results are mapped to accepting filter results // All accepting mapping results are mapped to accepting filter results
// All rejecting mapping results are mapped to rejecting filter results // All rejecting mapping results are mapped to rejecting filter results
func (m DatasetMapFilter) AsFilter() (f *DatasetMapFilter) { func (m DatasetMapFilter) AsFilter() endpoint.FSFilter {
f = &DatasetMapFilter{ f := &DatasetMapFilter{
make([]datasetMapFilterEntry, len(m.entries)), make([]datasetMapFilterEntry, len(m.entries)),
true, true,
} }

View File

@ -12,6 +12,7 @@ import (
"strconv" "strconv"
"time" "time"
"github.com/problame/go-streamrpc" "github.com/problame/go-streamrpc"
"github.com/zrepl/zrepl/cmd/endpoint"
) )
var ConfigFileDefaultLocations []string = []string{ var ConfigFileDefaultLocations []string = []string{
@ -225,7 +226,7 @@ func parseConnect(i map[string]interface{}) (c streamrpc.Connecter, err error) {
} }
func parseInitialReplPolicy(v interface{}, defaultPolicy InitialReplPolicy) (p InitialReplPolicy, err error) { func parseInitialReplPolicy(v interface{}, defaultPolicy endpoint.InitialReplPolicy) (p endpoint.InitialReplPolicy, err error) {
s, ok := v.(string) s, ok := v.(string)
if !ok { if !ok {
goto err goto err
@ -235,9 +236,9 @@ func parseInitialReplPolicy(v interface{}, defaultPolicy InitialReplPolicy) (p I
case s == "": case s == "":
p = defaultPolicy p = defaultPolicy
case s == "most_recent": case s == "most_recent":
p = InitialReplPolicyMostRecent p = endpoint.InitialReplPolicyMostRecent
case s == "all": case s == "all":
p = InitialReplPolicyAll p = endpoint.InitialReplPolicyAll
default: default:
goto err goto err
} }

26
cmd/endpoint/context.go Normal file
View File

@ -0,0 +1,26 @@
package endpoint
import (
"context"
"github.com/zrepl/zrepl/logger"
)
type contextKey int
const (
contextKeyLogger contextKey = iota
)
type Logger = logger.Logger
func WithLogger(ctx context.Context, log Logger) context.Context {
return context.WithValue(ctx, contextKeyLogger, log)
}
func getLogger(ctx context.Context) Logger {
l, ok := ctx.Value(contextKeyLogger).(Logger)
if !ok {
l = logger.NewNullLogger()
}
return l
}

View File

@ -1,4 +1,4 @@
package cmd package endpoint
import ( import (
"fmt" "fmt"
@ -13,6 +13,7 @@ import (
"github.com/zrepl/zrepl/replication" "github.com/zrepl/zrepl/replication"
) )
// FIXME: remove this
type InitialReplPolicy string type InitialReplPolicy string
const ( const (
@ -20,6 +21,7 @@ const (
InitialReplPolicyAll InitialReplPolicy = "all" InitialReplPolicyAll InitialReplPolicy = "all"
) )
// FIXME: remove this
const DEFAULT_INITIAL_REPL_POLICY = InitialReplPolicyMostRecent const DEFAULT_INITIAL_REPL_POLICY = InitialReplPolicyMostRecent
// SenderEndpoint implements replication.ReplicationEndpoint for a sending side // SenderEndpoint implements replication.ReplicationEndpoint for a sending side
@ -93,15 +95,26 @@ func (p *SenderEndpoint) Receive(ctx context.Context, r *pdu.ReceiveReq, sendStr
return fmt.Errorf("sender endpoint does not receive") return fmt.Errorf("sender endpoint does not receive")
} }
type FSFilter interface {
Filter(path *zfs.DatasetPath) (pass bool, err error)
}
// FIXME: can we get away without error types here?
type FSMap interface {
FSFilter
Map(path *zfs.DatasetPath) (*zfs.DatasetPath,error)
Invert() (FSMap,error)
AsFilter() (FSFilter)
}
// ReceiverEndpoint implements replication.ReplicationEndpoint for a receiving side // ReceiverEndpoint implements replication.ReplicationEndpoint for a receiving side
type ReceiverEndpoint struct { type ReceiverEndpoint struct {
fsmapInv *DatasetMapFilter fsmapInv FSMap
fsmap *DatasetMapFilter fsmap FSMap
fsvf zfs.FilesystemVersionFilter fsvf zfs.FilesystemVersionFilter
} }
func NewReceiverEndpoint(fsmap *DatasetMapFilter, fsvf zfs.FilesystemVersionFilter) (*ReceiverEndpoint, error) { func NewReceiverEndpoint(fsmap FSMap, fsvf zfs.FilesystemVersionFilter) (*ReceiverEndpoint, error) {
fsmapInv, err := fsmap.Invert() fsmapInv, err := fsmap.Invert()
if err != nil { if err != nil {
return nil, err return nil, err
@ -327,6 +340,10 @@ type HandlerAdaptor struct {
ep replication.Endpoint ep replication.Endpoint
} }
func NewHandlerAdaptor(ep replication.Endpoint) HandlerAdaptor {
return HandlerAdaptor{ep}
}
func (a *HandlerAdaptor) Handle(ctx context.Context, endpoint string, reqStructured *bytes.Buffer, reqStream io.ReadCloser) (resStructured *bytes.Buffer, resStream io.ReadCloser, err error) { func (a *HandlerAdaptor) Handle(ctx context.Context, endpoint string, reqStructured *bytes.Buffer, reqStream io.ReadCloser) (resStructured *bytes.Buffer, resStream io.ReadCloser, err error) {
switch endpoint { switch endpoint {