2017-07-06 13:03:44 +02:00
|
|
|
package cmd
|
2017-05-20 17:08:18 +02:00
|
|
|
|
|
|
|
import (
|
|
|
|
"fmt"
|
2018-08-11 12:19:10 +02:00
|
|
|
"github.com/zrepl/zrepl/cmd/replication.v2"
|
2018-06-20 20:20:37 +02:00
|
|
|
"github.com/problame/go-streamrpc"
|
|
|
|
"github.com/zrepl/zrepl/zfs"
|
2017-05-20 17:08:18 +02:00
|
|
|
"io"
|
2018-06-20 20:20:37 +02:00
|
|
|
"github.com/pkg/errors"
|
|
|
|
"github.com/golang/protobuf/proto"
|
2017-09-22 14:13:58 +02:00
|
|
|
"bytes"
|
2018-07-08 23:31:46 +02:00
|
|
|
"context"
|
2017-05-20 17:08:18 +02:00
|
|
|
)
|
|
|
|
|
reimplement io.ReadWriteCloser based RPC mechanism
The existing ByteStreamRPC requires writing RPC stub + server code
for each RPC endpoint. Does not scale well.
Goal: adding a new RPC call should
- not require writing an RPC stub / handler
- not require modifications to the RPC lib
The wire format is inspired by HTTP2, the API by net/rpc.
Frames are used for framing messages, i.e. a message is made of multiple
frames which are glued together using a frame-bridging reader / writer.
This roughly corresponds to HTTP2 streams, although we're happy with
just one stream at any time and the resulting non-need for flow control,
etc.
Frames are typed using a header. The two most important types are
'Header' and 'Data'.
The RPC protocol is built on top of this:
- Client sends a header => multiple frames of type 'header'
- Client sends request body => mulitiple frames of type 'data'
- Server reads a header => multiple frames of type 'header'
- Server reads request body => mulitiple frames of type 'data'
- Server sends response header => ...
- Server sends response body => ...
An RPC header is serialized JSON and always the same structure.
The body is of the type specified in the header.
The RPC server and client use some semi-fancy reflection tequniques to
automatically infer the data type of the request/response body based on
the method signature of the server handler; or the client parameters,
respectively.
This boils down to a special-case for io.Reader, which are just dumped
into a series of data frames as efficiently as possible.
All other types are (de)serialized using encoding/json.
The RPC layer and Frame Layer log some arbitrary messages that proved
useful during debugging. By default, they log to a non-logger, which
should not have a big impact on performance.
pprof analysis shows the implementation spends its CPU time
60% waiting for syscalls
30% in memmove
10% ...
On a Intel(R) Core(TM) i7-6600U CPU @ 2.60GHz CPU, Linux 4.12, the
implementation achieved ~3.6GiB/s.
Future optimization may include spice(2) / vmspice(2) on Linux, although
this doesn't fit so well with the heavy use of io.Reader / io.Writer
throughout the codebase.
The existing hackaround for local calls was re-implemented to fit the
new interface of PRCServer and RPCClient.
The 'R'PC method invocation is a bit slower because reflection is
involved inbetween, but otherwise performance should be no different.
The RPC code currently does not support multipart requests and thus does
not support the equivalent of a POST.
Thus, the switch to the new rpc code had the following fallout:
- Move request objects + constants from rpc package to main app code
- Sacrifice the hacky 'push = pull me' way of doing push
-> need to further extend RPC to support multipart requests or
something to implement this properly with additional interfaces
-> should be done after replication is abstracted better than separate
algorithms for doPull() and doPush()
2017-08-19 22:37:14 +02:00
|
|
|
type InitialReplPolicy string
|
|
|
|
|
|
|
|
const (
|
|
|
|
InitialReplPolicyMostRecent InitialReplPolicy = "most_recent"
|
|
|
|
InitialReplPolicyAll InitialReplPolicy = "all"
|
|
|
|
)
|
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
const DEFAULT_INITIAL_REPL_POLICY = InitialReplPolicyMostRecent
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
// SenderEndpoint implements replication.ReplicationEndpoint for a sending side
|
|
|
|
type SenderEndpoint struct {
|
|
|
|
FSFilter zfs.DatasetFilter
|
|
|
|
FilesystemVersionFilter zfs.FilesystemVersionFilter
|
2017-12-26 21:37:48 +01:00
|
|
|
}
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
func NewSenderEndpoint(fsf zfs.DatasetFilter, fsvf zfs.FilesystemVersionFilter) *SenderEndpoint {
|
|
|
|
return &SenderEndpoint{fsf, fsvf}
|
2017-12-26 21:37:48 +01:00
|
|
|
}
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-07-08 23:31:46 +02:00
|
|
|
func (p *SenderEndpoint) ListFilesystems(ctx context.Context) ([]*replication.Filesystem, error) {
|
2018-06-20 20:20:37 +02:00
|
|
|
fss, err := zfs.ZFSListMapping(p.FSFilter)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
rfss := make([]*replication.Filesystem, len(fss))
|
|
|
|
for i := range fss {
|
|
|
|
rfss[i] = &replication.Filesystem{
|
|
|
|
Path: fss[i].ToString(),
|
|
|
|
// FIXME: not supporting ResumeToken yet
|
2017-05-20 17:08:18 +02:00
|
|
|
}
|
|
|
|
}
|
2018-06-20 20:20:37 +02:00
|
|
|
return rfss, nil
|
2017-12-26 21:37:48 +01:00
|
|
|
}
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-07-08 23:31:46 +02:00
|
|
|
func (p *SenderEndpoint) ListFilesystemVersions(ctx context.Context, fs string) ([]*replication.FilesystemVersion, error) {
|
2018-06-20 20:20:37 +02:00
|
|
|
dp, err := zfs.NewDatasetPath(fs)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-07-30 14:56:16 +02:00
|
|
|
}
|
2018-06-20 20:20:37 +02:00
|
|
|
pass, err := p.FSFilter.Filter(dp)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-12-26 21:37:48 +01:00
|
|
|
}
|
2018-06-20 20:20:37 +02:00
|
|
|
if !pass {
|
|
|
|
return nil, replication.NewFilteredError(fs)
|
2017-12-26 21:37:48 +01:00
|
|
|
}
|
2018-06-20 20:20:37 +02:00
|
|
|
fsvs, err := zfs.ZFSListFilesystemVersions(dp, p.FilesystemVersionFilter)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
rfsvs := make([]*replication.FilesystemVersion, len(fsvs))
|
|
|
|
for i := range fsvs {
|
|
|
|
rfsvs[i] = replication.FilesystemVersionFromZFS(fsvs[i])
|
2017-12-26 21:37:48 +01:00
|
|
|
}
|
2018-06-20 20:20:37 +02:00
|
|
|
return rfsvs, nil
|
|
|
|
}
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-07-08 23:31:46 +02:00
|
|
|
func (p *SenderEndpoint) Send(ctx context.Context, r *replication.SendReq) (*replication.SendRes, io.ReadCloser, error) {
|
2018-06-20 20:20:37 +02:00
|
|
|
dp, err := zfs.NewDatasetPath(r.Filesystem)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
pass, err := p.FSFilter.Filter(dp)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
if !pass {
|
|
|
|
return nil, nil, replication.NewFilteredError(r.Filesystem)
|
|
|
|
}
|
|
|
|
stream, err := zfs.ZFSSend(r.Filesystem, r.From, r.To)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
return &replication.SendRes{}, stream, nil
|
|
|
|
}
|
2017-12-26 21:37:48 +01:00
|
|
|
|
2018-07-08 23:31:46 +02:00
|
|
|
func (p *SenderEndpoint) Receive(ctx context.Context, r *replication.ReceiveReq, sendStream io.ReadCloser) (error) {
|
2018-06-20 20:20:37 +02:00
|
|
|
return fmt.Errorf("sender endpoint does not receive")
|
|
|
|
}
|
2017-12-26 21:37:48 +01:00
|
|
|
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
// ReceiverEndpoint implements replication.ReplicationEndpoint for a receiving side
|
|
|
|
type ReceiverEndpoint struct {
|
|
|
|
fsmapInv *DatasetMapFilter
|
|
|
|
fsmap *DatasetMapFilter
|
|
|
|
fsvf zfs.FilesystemVersionFilter
|
|
|
|
}
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
func NewReceiverEndpoint(fsmap *DatasetMapFilter, fsvf zfs.FilesystemVersionFilter) (*ReceiverEndpoint, error) {
|
|
|
|
fsmapInv, err := fsmap.Invert()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return &ReceiverEndpoint{fsmapInv, fsmap, fsvf}, nil
|
|
|
|
}
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-07-08 23:31:46 +02:00
|
|
|
func (e *ReceiverEndpoint) ListFilesystems(ctx context.Context) ([]*replication.Filesystem, error) {
|
2018-06-20 20:20:37 +02:00
|
|
|
filtered, err := zfs.ZFSListMapping(e.fsmapInv.AsFilter())
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "error checking client permission")
|
|
|
|
}
|
|
|
|
fss := make([]*replication.Filesystem, len(filtered))
|
|
|
|
for i, a := range filtered {
|
|
|
|
mapped, err := e.fsmapInv.Map(a)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-12-26 21:37:48 +01:00
|
|
|
}
|
2018-06-20 20:20:37 +02:00
|
|
|
fss[i] = &replication.Filesystem{Path: mapped.ToString()}
|
|
|
|
}
|
|
|
|
return fss, nil
|
|
|
|
}
|
2017-12-26 21:37:48 +01:00
|
|
|
|
2018-07-08 23:31:46 +02:00
|
|
|
func (e *ReceiverEndpoint) ListFilesystemVersions(ctx context.Context, fs string) ([]*replication.FilesystemVersion, error) {
|
2018-06-20 20:20:37 +02:00
|
|
|
p, err := zfs.NewDatasetPath(fs)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
lp, err := e.fsmap.Map(p)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if lp == nil {
|
|
|
|
return nil, errors.New("access to filesystem denied")
|
|
|
|
}
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
fsvs, err := zfs.ZFSListFilesystemVersions(lp, e.fsvf)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2017-12-26 21:37:48 +01:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
rfsvs := make([]*replication.FilesystemVersion, len(fsvs))
|
|
|
|
for i := range fsvs {
|
|
|
|
rfsvs[i] = replication.FilesystemVersionFromZFS(fsvs[i])
|
|
|
|
}
|
2017-07-30 14:56:16 +02:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
return rfsvs, nil
|
|
|
|
}
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-07-08 23:31:46 +02:00
|
|
|
func (e *ReceiverEndpoint) Send(ctx context.Context, req *replication.SendReq) (*replication.SendRes, io.ReadCloser, error) {
|
2018-06-20 20:20:37 +02:00
|
|
|
return nil, nil, errors.New("receiver endpoint does not send")
|
|
|
|
}
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-07-08 23:31:46 +02:00
|
|
|
func (e *ReceiverEndpoint) Receive(ctx context.Context, req *replication.ReceiveReq, sendStream io.ReadCloser) error {
|
|
|
|
defer sendStream.Close()
|
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
p, err := zfs.NewDatasetPath(req.Filesystem)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
lp, err := e.fsmap.Map(p)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if lp == nil {
|
|
|
|
return errors.New("receive to filesystem denied")
|
|
|
|
}
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
// create placeholder parent filesystems as appropriate
|
|
|
|
var visitErr error
|
|
|
|
f := zfs.NewDatasetPathForest()
|
|
|
|
f.Add(lp)
|
2018-08-10 17:06:00 +02:00
|
|
|
getLogger(ctx).Debug("begin tree-walk")
|
2018-06-20 20:20:37 +02:00
|
|
|
f.WalkTopDown(func(v zfs.DatasetPathVisit) (visitChildTree bool) {
|
|
|
|
if v.Path.Equal(lp) {
|
|
|
|
return false
|
2017-12-26 21:37:48 +01:00
|
|
|
}
|
2018-06-20 20:20:37 +02:00
|
|
|
_, err := zfs.ZFSGet(v.Path, []string{zfs.ZREPL_PLACEHOLDER_PROPERTY_NAME})
|
|
|
|
if err != nil {
|
|
|
|
// interpret this as an early exit of the zfs binary due to the fs not existing
|
|
|
|
if err := zfs.ZFSCreatePlaceholderFilesystem(v.Path); err != nil {
|
2018-08-10 17:06:00 +02:00
|
|
|
getLogger(ctx).
|
|
|
|
WithError(err).
|
|
|
|
WithField("placeholder_fs", v.Path).
|
|
|
|
Error("cannot create placeholder filesystem")
|
2018-06-20 20:20:37 +02:00
|
|
|
visitErr = err
|
2017-12-26 21:37:48 +01:00
|
|
|
return false
|
2017-05-20 17:08:18 +02:00
|
|
|
}
|
2017-12-26 21:37:48 +01:00
|
|
|
}
|
2018-08-10 17:06:00 +02:00
|
|
|
getLogger(ctx).WithField("filesystem", v.Path.ToString()).Debug("exists")
|
2018-06-20 20:20:37 +02:00
|
|
|
return true // leave this fs as is
|
|
|
|
})
|
2018-08-10 17:06:00 +02:00
|
|
|
getLogger(ctx).WithField("visitErr", visitErr).Debug("complete tree-walk")
|
2017-07-08 13:13:16 +02:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
if visitErr != nil {
|
|
|
|
return visitErr
|
|
|
|
}
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
needForceRecv := false
|
|
|
|
props, err := zfs.ZFSGet(lp, []string{zfs.ZREPL_PLACEHOLDER_PROPERTY_NAME})
|
|
|
|
if err == nil {
|
|
|
|
if isPlaceholder, _ := zfs.IsPlaceholder(lp, props.Get(zfs.ZREPL_PLACEHOLDER_PROPERTY_NAME)); isPlaceholder {
|
|
|
|
needForceRecv = true
|
2017-12-26 21:37:48 +01:00
|
|
|
}
|
2018-06-20 20:20:37 +02:00
|
|
|
}
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
args := make([]string, 0, 1)
|
|
|
|
if needForceRecv {
|
|
|
|
args = append(args, "-F")
|
|
|
|
}
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-08-10 17:06:00 +02:00
|
|
|
getLogger(ctx).Debug("start receive command")
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
if err := zfs.ZFSRecv(lp.ToString(), sendStream, args...); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
|
|
|
// RPC STUBS
|
|
|
|
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2017-07-30 09:14:37 +02:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
const (
|
|
|
|
RPCListFilesystems = "ListFilesystems"
|
|
|
|
RPCListFilesystemVersions = "ListFilesystemVersions"
|
|
|
|
RPCReceive = "Receive"
|
|
|
|
RPCSend = "Send"
|
|
|
|
)
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
type RemoteEndpoint struct {
|
|
|
|
*streamrpc.Client
|
|
|
|
}
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-07-08 23:31:46 +02:00
|
|
|
func (s RemoteEndpoint) ListFilesystems(ctx context.Context) ([]*replication.Filesystem, error) {
|
2018-06-20 20:20:37 +02:00
|
|
|
req := replication.ListFilesystemReq{}
|
|
|
|
b, err := proto.Marshal(&req)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-12-26 21:37:48 +01:00
|
|
|
}
|
2018-07-08 23:31:46 +02:00
|
|
|
rb, rs, err := s.RequestReply(ctx, RPCListFilesystems, bytes.NewBuffer(b), nil)
|
2018-06-20 20:20:37 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if rs != nil {
|
2018-07-08 23:31:46 +02:00
|
|
|
rs.Close()
|
2018-06-20 20:20:37 +02:00
|
|
|
return nil, errors.New("response contains unexpected stream")
|
|
|
|
}
|
|
|
|
var res replication.ListFilesystemRes
|
|
|
|
if err := proto.Unmarshal(rb.Bytes(), &res); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return res.Filesystems, nil
|
|
|
|
}
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-07-08 23:31:46 +02:00
|
|
|
func (s RemoteEndpoint) ListFilesystemVersions(ctx context.Context, fs string) ([]*replication.FilesystemVersion, error) {
|
2018-06-20 20:20:37 +02:00
|
|
|
req := replication.ListFilesystemVersionsReq{
|
|
|
|
Filesystem: fs,
|
|
|
|
}
|
|
|
|
b, err := proto.Marshal(&req)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2017-12-26 21:37:48 +01:00
|
|
|
}
|
2018-07-08 23:31:46 +02:00
|
|
|
rb, rs, err := s.RequestReply(ctx, RPCListFilesystemVersions, bytes.NewBuffer(b), nil)
|
2018-06-20 20:20:37 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if rs != nil {
|
2018-07-08 23:31:46 +02:00
|
|
|
rs.Close()
|
2018-06-20 20:20:37 +02:00
|
|
|
return nil, errors.New("response contains unexpected stream")
|
|
|
|
}
|
|
|
|
var res replication.ListFilesystemVersionsRes
|
|
|
|
if err := proto.Unmarshal(rb.Bytes(), &res); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return res.Versions, nil
|
|
|
|
}
|
2017-07-08 13:13:16 +02:00
|
|
|
|
2018-07-08 23:31:46 +02:00
|
|
|
func (s RemoteEndpoint) Send(ctx context.Context, r *replication.SendReq) (*replication.SendRes, io.ReadCloser, error) {
|
2018-06-20 20:20:37 +02:00
|
|
|
b, err := proto.Marshal(r)
|
2017-12-26 21:37:48 +01:00
|
|
|
if err != nil {
|
2018-06-20 20:20:37 +02:00
|
|
|
return nil, nil, err
|
|
|
|
}
|
2018-07-08 23:31:46 +02:00
|
|
|
rb, rs, err := s.RequestReply(ctx, RPCSend, bytes.NewBuffer(b), nil)
|
2018-06-20 20:20:37 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
if rs == nil {
|
|
|
|
return nil, nil, errors.New("response does not contain a stream")
|
|
|
|
}
|
|
|
|
var res replication.SendRes
|
|
|
|
if err := proto.Unmarshal(rb.Bytes(), &res); err != nil {
|
2018-07-08 23:31:46 +02:00
|
|
|
rs.Close()
|
2018-06-20 20:20:37 +02:00
|
|
|
return nil, nil, err
|
2017-12-26 21:37:48 +01:00
|
|
|
}
|
2018-06-20 20:20:37 +02:00
|
|
|
// FIXME make sure the consumer will read the reader until the end...
|
|
|
|
return &res, rs, nil
|
|
|
|
}
|
2017-07-08 13:13:16 +02:00
|
|
|
|
2018-07-08 23:31:46 +02:00
|
|
|
func (s RemoteEndpoint) Receive(ctx context.Context, r *replication.ReceiveReq, sendStream io.ReadCloser) (error) {
|
|
|
|
defer sendStream.Close()
|
2018-06-20 20:20:37 +02:00
|
|
|
b, err := proto.Marshal(r)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2018-07-08 23:31:46 +02:00
|
|
|
rb, rs, err := s.RequestReply(ctx, RPCReceive, bytes.NewBuffer(b), sendStream)
|
2018-06-20 20:20:37 +02:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if rs != nil {
|
2018-07-08 23:31:46 +02:00
|
|
|
rs.Close()
|
2018-06-20 20:20:37 +02:00
|
|
|
return errors.New("response contains unexpected stream")
|
2017-12-26 21:37:48 +01:00
|
|
|
}
|
2018-06-20 20:20:37 +02:00
|
|
|
var res replication.ReceiveRes
|
|
|
|
if err := proto.Unmarshal(rb.Bytes(), &res); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
2017-07-08 13:13:16 +02:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
type HandlerAdaptor struct {
|
|
|
|
ep replication.ReplicationEndpoint
|
|
|
|
}
|
2017-09-22 14:13:58 +02:00
|
|
|
|
2018-08-08 13:12:50 +02:00
|
|
|
func (a *HandlerAdaptor) Handle(ctx context.Context, endpoint string, reqStructured *bytes.Buffer, reqStream io.ReadCloser) (resStructured *bytes.Buffer, resStream io.ReadCloser, err error) {
|
2018-07-08 23:31:46 +02:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
switch endpoint {
|
|
|
|
case RPCListFilesystems:
|
|
|
|
var req replication.ListFilesystemReq
|
|
|
|
if err := proto.Unmarshal(reqStructured.Bytes(), &req); err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
2018-07-08 23:31:46 +02:00
|
|
|
fsses, err := a.ep.ListFilesystems(ctx)
|
2018-06-20 20:20:37 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
res := &replication.ListFilesystemRes{
|
|
|
|
Filesystems: fsses,
|
|
|
|
}
|
|
|
|
b, err := proto.Marshal(res)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
return bytes.NewBuffer(b), nil, nil
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
case RPCListFilesystemVersions:
|
|
|
|
|
|
|
|
var req replication.ListFilesystemVersionsReq
|
|
|
|
if err := proto.Unmarshal(reqStructured.Bytes(), &req); err != nil {
|
|
|
|
return nil, nil, err
|
2017-05-20 17:08:18 +02:00
|
|
|
}
|
2018-07-08 23:31:46 +02:00
|
|
|
fsvs, err := a.ep.ListFilesystemVersions(ctx, req.Filesystem)
|
2018-06-20 20:20:37 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
res := &replication.ListFilesystemVersionsRes{
|
|
|
|
Versions: fsvs,
|
|
|
|
}
|
|
|
|
b, err := proto.Marshal(res)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
return bytes.NewBuffer(b), nil, nil
|
|
|
|
|
|
|
|
case RPCSend:
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
var req replication.SendReq
|
|
|
|
if err := proto.Unmarshal(reqStructured.Bytes(), &req); err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
2018-07-08 23:31:46 +02:00
|
|
|
res, sendStream, err := a.ep.Send(ctx, &req)
|
2018-06-20 20:20:37 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
b, err := proto.Marshal(res)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
2017-12-26 21:37:48 +01:00
|
|
|
}
|
2018-06-20 20:20:37 +02:00
|
|
|
return bytes.NewBuffer(b), sendStream, err
|
2017-05-20 17:08:18 +02:00
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
case RPCReceive:
|
|
|
|
|
|
|
|
var req replication.ReceiveReq
|
|
|
|
if err := proto.Unmarshal(reqStructured.Bytes(), &req); err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
2018-07-08 23:31:46 +02:00
|
|
|
err := a.ep.Receive(ctx, &req, reqStream)
|
2018-06-20 20:20:37 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
b, err := proto.Marshal(&replication.ReceiveRes{})
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, err
|
|
|
|
}
|
|
|
|
return bytes.NewBuffer(b), nil, err
|
2017-05-20 17:08:18 +02:00
|
|
|
|
|
|
|
|
2018-06-20 20:20:37 +02:00
|
|
|
default:
|
|
|
|
return nil, nil, errors.New("no handler for given endpoint")
|
|
|
|
}
|
2017-05-20 17:08:18 +02:00
|
|
|
}
|