mirror of
https://github.com/zrepl/zrepl.git
synced 2025-01-18 12:18:19 +01:00
refactor push + source into active + passive 'sides' with push and source 'modes'
This commit is contained in:
parent
9446b51a1f
commit
e3be120d88
@ -202,9 +202,9 @@ func (t *tui) draw() {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
pushStatus, ok := v.JobSpecific.(*job.PushStatus)
|
pushStatus, ok := v.JobSpecific.(*job.ActiveSideStatus)
|
||||||
if !ok || pushStatus == nil {
|
if !ok || pushStatus == nil {
|
||||||
t.printf("PushStatus is null")
|
t.printf("ActiveSideStatus is null")
|
||||||
t.newline()
|
t.newline()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -21,42 +21,42 @@ type JobEnum struct {
|
|||||||
Ret interface{}
|
Ret interface{}
|
||||||
}
|
}
|
||||||
|
|
||||||
type PushJob struct {
|
type ActiveJob struct {
|
||||||
Type string `yaml:"type"`
|
Type string `yaml:"type"`
|
||||||
Name string `yaml:"name"`
|
Name string `yaml:"name"`
|
||||||
Connect ConnectEnum `yaml:"connect"`
|
Connect ConnectEnum `yaml:"connect"`
|
||||||
Filesystems FilesystemsFilter `yaml:"filesystems"`
|
|
||||||
Snapshotting Snapshotting `yaml:"snapshotting"`
|
|
||||||
Pruning PruningSenderReceiver `yaml:"pruning"`
|
Pruning PruningSenderReceiver `yaml:"pruning"`
|
||||||
Debug JobDebugSettings `yaml:"debug,optional"`
|
Debug JobDebugSettings `yaml:"debug,optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type SinkJob struct {
|
type PassiveJob struct {
|
||||||
Type string `yaml:"type"`
|
Type string `yaml:"type"`
|
||||||
Name string `yaml:"name"`
|
Name string `yaml:"name"`
|
||||||
RootDataset string `yaml:"root_dataset"`
|
|
||||||
Serve ServeEnum `yaml:"serve"`
|
Serve ServeEnum `yaml:"serve"`
|
||||||
Debug JobDebugSettings `yaml:"debug,optional"`
|
Debug JobDebugSettings `yaml:"debug,optional"`
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type PushJob struct {
|
||||||
|
ActiveJob `yaml:",inline"`
|
||||||
|
Snapshotting Snapshotting `yaml:"snapshotting"`
|
||||||
|
Filesystems FilesystemsFilter `yaml:"filesystems"`
|
||||||
|
}
|
||||||
|
|
||||||
type PullJob struct {
|
type PullJob struct {
|
||||||
Type string `yaml:"type"`
|
ActiveJob `yaml:",inline"`
|
||||||
Name string `yaml:"name"`
|
|
||||||
Connect ConnectEnum `yaml:"connect"`
|
|
||||||
RootDataset string `yaml:"root_dataset"`
|
RootDataset string `yaml:"root_dataset"`
|
||||||
Interval time.Duration `yaml:"interval,positive"`
|
Interval time.Duration `yaml:"interval,positive"`
|
||||||
Pruning PruningSenderReceiver `yaml:"pruning"`
|
}
|
||||||
Debug JobDebugSettings `yaml:"debug,optional"`
|
|
||||||
|
type SinkJob struct {
|
||||||
|
PassiveJob `yaml:",inline"`
|
||||||
|
RootDataset string `yaml:"root_dataset"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type SourceJob struct {
|
type SourceJob struct {
|
||||||
Type string `yaml:"type"`
|
PassiveJob `yaml:",inline"`
|
||||||
Name string `yaml:"name"`
|
|
||||||
Serve ServeEnum `yaml:"serve"`
|
|
||||||
Filesystems FilesystemsFilter `yaml:"filesystems"`
|
|
||||||
Snapshotting Snapshotting `yaml:"snapshotting"`
|
Snapshotting Snapshotting `yaml:"snapshotting"`
|
||||||
Pruning PruningLocal `yaml:"pruning"`
|
Filesystems FilesystemsFilter `yaml:"filesystems"`
|
||||||
Debug JobDebugSettings `yaml:"debug,optional"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type LocalJob struct {
|
type LocalJob struct {
|
||||||
|
@ -3,6 +3,7 @@ package job
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/problame/go-streamrpc"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/zrepl/zrepl/config"
|
"github.com/zrepl/zrepl/config"
|
||||||
"github.com/zrepl/zrepl/daemon/connecter"
|
"github.com/zrepl/zrepl/daemon/connecter"
|
||||||
@ -15,14 +16,13 @@ import (
|
|||||||
"github.com/zrepl/zrepl/daemon/snapper"
|
"github.com/zrepl/zrepl/daemon/snapper"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Push struct {
|
type ActiveSide struct {
|
||||||
name string
|
mode activeMode
|
||||||
clientFactory *connecter.ClientFactory
|
name string
|
||||||
fsfilter endpoint.FSFilter
|
clientFactory *connecter.ClientFactory
|
||||||
|
|
||||||
prunerFactory *pruner.PrunerFactory
|
prunerFactory *pruner.PrunerFactory
|
||||||
|
|
||||||
snapper *snapper.Snapper
|
|
||||||
|
|
||||||
promRepStateSecs *prometheus.HistogramVec // labels: state
|
promRepStateSecs *prometheus.HistogramVec // labels: state
|
||||||
promPruneSecs *prometheus.HistogramVec // labels: prune_side
|
promPruneSecs *prometheus.HistogramVec // labels: prune_side
|
||||||
@ -32,9 +32,48 @@ type Push struct {
|
|||||||
replication *replication.Replication
|
replication *replication.Replication
|
||||||
}
|
}
|
||||||
|
|
||||||
func PushFromConfig(g *config.Global, in *config.PushJob) (j *Push, err error) {
|
type activeMode interface {
|
||||||
|
SenderReceiver(client *streamrpc.Client) (replication.Sender, replication.Receiver, error)
|
||||||
|
Type() Type
|
||||||
|
RunPeriodic(ctx context.Context, wakeUpCommon chan<- struct{})
|
||||||
|
}
|
||||||
|
|
||||||
j = &Push{}
|
type modePush struct {
|
||||||
|
fsfilter endpoint.FSFilter
|
||||||
|
snapper *snapper.Snapper
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *modePush) SenderReceiver(client *streamrpc.Client) (replication.Sender, replication.Receiver, error) {
|
||||||
|
sender := endpoint.NewSender(m.fsfilter)
|
||||||
|
receiver := endpoint.NewRemote(client)
|
||||||
|
return sender, receiver, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *modePush) Type() Type { return TypePush }
|
||||||
|
|
||||||
|
func (m *modePush) RunPeriodic(ctx context.Context, wakeUpCommon chan <- struct{}) {
|
||||||
|
m.snapper.Run(ctx, wakeUpCommon)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
func modePushFromConfig(g *config.Global, in *config.PushJob) (*modePush, error) {
|
||||||
|
m := &modePush{}
|
||||||
|
fsf, err := filters.DatasetMapFilterFromConfig(in.Filesystems)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrap(err, "cannnot build filesystem filter")
|
||||||
|
}
|
||||||
|
m.fsfilter = fsf
|
||||||
|
|
||||||
|
if m.snapper, err = snapper.FromConfig(g, fsf, &in.Snapshotting); err != nil {
|
||||||
|
return nil, errors.Wrap(err, "cannot build snapper")
|
||||||
|
}
|
||||||
|
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func activeSide(g *config.Global, in *config.ActiveJob, mode activeMode) (j *ActiveSide, err error) {
|
||||||
|
|
||||||
|
j = &ActiveSide{mode: mode}
|
||||||
j.name = in.Name
|
j.name = in.Name
|
||||||
j.promRepStateSecs = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
j.promRepStateSecs = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||||
Namespace: "zrepl",
|
Namespace: "zrepl",
|
||||||
@ -56,12 +95,6 @@ func PushFromConfig(g *config.Global, in *config.PushJob) (j *Push, err error) {
|
|||||||
return nil, errors.Wrap(err, "cannot build client")
|
return nil, errors.Wrap(err, "cannot build client")
|
||||||
}
|
}
|
||||||
|
|
||||||
fsf, err := filters.DatasetMapFilterFromConfig(in.Filesystems)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "cannnot build filesystem filter")
|
|
||||||
}
|
|
||||||
j.fsfilter = fsf
|
|
||||||
|
|
||||||
j.promPruneSecs = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
j.promPruneSecs = prometheus.NewHistogramVec(prometheus.HistogramOpts{
|
||||||
Namespace: "zrepl",
|
Namespace: "zrepl",
|
||||||
Subsystem: "pruning",
|
Subsystem: "pruning",
|
||||||
@ -74,26 +107,22 @@ func PushFromConfig(g *config.Global, in *config.PushJob) (j *Push, err error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if j.snapper, err = snapper.FromConfig(g, fsf, &in.Snapshotting); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "cannot build snapper")
|
|
||||||
}
|
|
||||||
|
|
||||||
return j, nil
|
return j, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j *Push) RegisterMetrics(registerer prometheus.Registerer) {
|
func (j *ActiveSide) RegisterMetrics(registerer prometheus.Registerer) {
|
||||||
registerer.MustRegister(j.promRepStateSecs)
|
registerer.MustRegister(j.promRepStateSecs)
|
||||||
registerer.MustRegister(j.promPruneSecs)
|
registerer.MustRegister(j.promPruneSecs)
|
||||||
registerer.MustRegister(j.promBytesReplicated)
|
registerer.MustRegister(j.promBytesReplicated)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j *Push) Name() string { return j.name }
|
func (j *ActiveSide) Name() string { return j.name }
|
||||||
|
|
||||||
type PushStatus struct {
|
type ActiveSideStatus struct {
|
||||||
Replication *replication.Report
|
Replication *replication.Report
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j *Push) Status() *Status {
|
func (j *ActiveSide) Status() *Status {
|
||||||
rep := func() *replication.Replication {
|
rep := func() *replication.Replication {
|
||||||
j.mtx.Lock()
|
j.mtx.Lock()
|
||||||
defer j.mtx.Unlock()
|
defer j.mtx.Unlock()
|
||||||
@ -102,26 +131,25 @@ func (j *Push) Status() *Status {
|
|||||||
}
|
}
|
||||||
return j.replication
|
return j.replication
|
||||||
}()
|
}()
|
||||||
s := &PushStatus{}
|
s := &ActiveSideStatus{}
|
||||||
|
t := j.mode.Type()
|
||||||
if rep == nil {
|
if rep == nil {
|
||||||
return &Status{Type: TypePush, JobSpecific: s}
|
return &Status{Type: t, JobSpecific: s}
|
||||||
}
|
}
|
||||||
s.Replication = rep.Report()
|
s.Replication = rep.Report()
|
||||||
return &Status{Type: TypePush, JobSpecific: s}
|
return &Status{Type: t, JobSpecific: s}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j *Push) Run(ctx context.Context) {
|
func (j *ActiveSide) Run(ctx context.Context) {
|
||||||
log := GetLogger(ctx)
|
log := GetLogger(ctx)
|
||||||
|
ctx = logging.WithSubsystemLoggers(ctx, log)
|
||||||
|
|
||||||
defer log.Info("job exiting")
|
defer log.Info("job exiting")
|
||||||
|
|
||||||
snapshotsTaken := make(chan struct{})
|
periodicDone := make(chan struct{})
|
||||||
{
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
defer cancel()
|
||||||
defer cancel()
|
go j.mode.RunPeriodic(ctx, periodicDone)
|
||||||
ctx = logging.WithSubsystemLoggers(ctx, log)
|
|
||||||
go j.snapper.Run(ctx, snapshotsTaken)
|
|
||||||
}
|
|
||||||
|
|
||||||
invocationCount := 0
|
invocationCount := 0
|
||||||
outer:
|
outer:
|
||||||
@ -133,7 +161,7 @@ outer:
|
|||||||
break outer
|
break outer
|
||||||
|
|
||||||
case <-WaitWakeup(ctx):
|
case <-WaitWakeup(ctx):
|
||||||
case <-snapshotsTaken:
|
case <-periodicDone:
|
||||||
}
|
}
|
||||||
invocationCount++
|
invocationCount++
|
||||||
invLog := log.WithField("invocation", invocationCount)
|
invLog := log.WithField("invocation", invocationCount)
|
||||||
@ -141,7 +169,7 @@ outer:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j *Push) do(ctx context.Context) {
|
func (j *ActiveSide) do(ctx context.Context) {
|
||||||
|
|
||||||
log := GetLogger(ctx)
|
log := GetLogger(ctx)
|
||||||
ctx = logging.WithSubsystemLoggers(ctx, log)
|
ctx = logging.WithSubsystemLoggers(ctx, log)
|
||||||
@ -152,8 +180,7 @@ func (j *Push) do(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
defer client.Close(ctx)
|
defer client.Close(ctx)
|
||||||
|
|
||||||
sender := endpoint.NewSender(j.fsfilter)
|
sender, receiver, err := j.mode.SenderReceiver(client)
|
||||||
receiver := endpoint.NewRemote(client)
|
|
||||||
|
|
||||||
j.mtx.Lock()
|
j.mtx.Lock()
|
||||||
j.replication = replication.NewReplication(j.promRepStateSecs, j.promBytesReplicated)
|
j.replication = replication.NewReplication(j.promRepStateSecs, j.promBytesReplicated)
|
@ -2,8 +2,8 @@ package job
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/zrepl/zrepl/config"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/zrepl/zrepl/config"
|
||||||
)
|
)
|
||||||
|
|
||||||
func JobsFromConfig(c *config.Config) ([]Job, error) {
|
func JobsFromConfig(c *config.Config) ([]Job, error) {
|
||||||
@ -19,19 +19,31 @@ func JobsFromConfig(c *config.Config) ([]Job, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func buildJob(c *config.Global, in config.JobEnum) (j Job, err error) {
|
func buildJob(c *config.Global, in config.JobEnum) (j Job, err error) {
|
||||||
|
cannotBuildJob := func(e error, name string) (Job, error) {
|
||||||
|
return nil, errors.Wrapf(err, "cannot build job %q", name)
|
||||||
|
}
|
||||||
switch v := in.Ret.(type) {
|
switch v := in.Ret.(type) {
|
||||||
case *config.SinkJob:
|
case *config.SinkJob:
|
||||||
j, err = SinkFromConfig(c, v)
|
m, err := modeSinkFromConfig(c, v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "cannot build job %q", v.Name)
|
return cannotBuildJob(err, v.Name)
|
||||||
|
}
|
||||||
|
j, err = passiveSideFromConfig(c, &v.PassiveJob, m)
|
||||||
|
if err != nil {
|
||||||
|
return cannotBuildJob(err, v.Name)
|
||||||
}
|
}
|
||||||
case *config.PushJob:
|
case *config.PushJob:
|
||||||
j, err = PushFromConfig(c, v)
|
m, err := modePushFromConfig(c, v)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "cannot build job %q", v.Name)
|
return cannotBuildJob(err, v.Name)
|
||||||
|
}
|
||||||
|
j, err = activeSide(c, &v.ActiveJob, m)
|
||||||
|
if err != nil {
|
||||||
|
return cannotBuildJob(err, v.Name)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
panic(fmt.Sprintf("implementation error: unknown job type %T", v))
|
panic(fmt.Sprintf("implementation error: unknown job type %T", v))
|
||||||
}
|
}
|
||||||
return j, err
|
return j, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -59,6 +59,8 @@ const (
|
|||||||
TypeInternal Type = "internal"
|
TypeInternal Type = "internal"
|
||||||
TypePush Type = "push"
|
TypePush Type = "push"
|
||||||
TypeSink Type = "sink"
|
TypeSink Type = "sink"
|
||||||
|
TypePull Type = "pull"
|
||||||
|
TypeSource Type = "source"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Status struct {
|
type Status struct {
|
||||||
@ -101,11 +103,11 @@ func (s *Status) UnmarshalJSON(in []byte) (err error) {
|
|||||||
}
|
}
|
||||||
switch s.Type {
|
switch s.Type {
|
||||||
case TypePush:
|
case TypePush:
|
||||||
var st PushStatus
|
var st ActiveSideStatus
|
||||||
err = json.Unmarshal(jobJSON, &st)
|
err = json.Unmarshal(jobJSON, &st)
|
||||||
s.JobSpecific = &st
|
s.JobSpecific = &st
|
||||||
case TypeSink:
|
case TypeSink:
|
||||||
var st SinkStatus
|
var st PassiveStatus
|
||||||
err = json.Unmarshal(jobJSON, &st)
|
err = json.Unmarshal(jobJSON, &st)
|
||||||
s.JobSpecific = &st
|
s.JobSpecific = &st
|
||||||
case TypeInternal:
|
case TypeInternal:
|
||||||
|
@ -13,43 +13,79 @@ import (
|
|||||||
"github.com/zrepl/zrepl/zfs"
|
"github.com/zrepl/zrepl/zfs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Sink struct {
|
type PassiveSide struct {
|
||||||
|
mode passiveMode
|
||||||
name string
|
name string
|
||||||
l serve.ListenerFactory
|
l serve.ListenerFactory
|
||||||
rpcConf *streamrpc.ConnConfig
|
rpcConf *streamrpc.ConnConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
type passiveMode interface {
|
||||||
|
ConnHandleFunc(ctx context.Context, conn serve.AuthenticatedConn) streamrpc.HandlerFunc
|
||||||
|
Type() Type
|
||||||
|
}
|
||||||
|
|
||||||
|
type modeSink struct {
|
||||||
rootDataset *zfs.DatasetPath
|
rootDataset *zfs.DatasetPath
|
||||||
}
|
}
|
||||||
|
|
||||||
func SinkFromConfig(g *config.Global, in *config.SinkJob) (s *Sink, err error) {
|
func (m *modeSink) Type() Type { return TypeSink }
|
||||||
|
|
||||||
s = &Sink{name: in.Name}
|
func (m *modeSink) ConnHandleFunc(ctx context.Context, conn serve.AuthenticatedConn) streamrpc.HandlerFunc {
|
||||||
|
log := GetLogger(ctx)
|
||||||
|
|
||||||
|
clientRootStr := path.Join(m.rootDataset.ToString(), conn.ClientIdentity())
|
||||||
|
clientRoot, err := zfs.NewDatasetPath(clientRootStr)
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).
|
||||||
|
WithField("client_identity", conn.ClientIdentity()).
|
||||||
|
Error("cannot build client filesystem map (client identity must be a valid ZFS FS name")
|
||||||
|
}
|
||||||
|
log.WithField("client_root", clientRoot).Debug("client root")
|
||||||
|
|
||||||
|
local, err := endpoint.NewReceiver(clientRoot)
|
||||||
|
if err != nil {
|
||||||
|
log.WithError(err).Error("unexpected error: cannot convert mapping to filter")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
h := endpoint.NewHandler(local)
|
||||||
|
return h.Handle
|
||||||
|
}
|
||||||
|
|
||||||
|
func modeSinkFromConfig(g *config.Global, in *config.SinkJob) (m *modeSink, err error) {
|
||||||
|
m = &modeSink{}
|
||||||
|
m.rootDataset, err = zfs.NewDatasetPath(in.RootDataset)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.New("root dataset is not a valid zfs filesystem path")
|
||||||
|
}
|
||||||
|
if m.rootDataset.Length() <= 0 {
|
||||||
|
return nil, errors.New("root dataset must not be empty") // duplicates error check of receiver
|
||||||
|
}
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func passiveSideFromConfig(g *config.Global, in *config.PassiveJob, mode passiveMode) (s *PassiveSide, err error) {
|
||||||
|
|
||||||
|
s = &PassiveSide{mode: mode, name: in.Name}
|
||||||
if s.l, s.rpcConf, err = serve.FromConfig(g, in.Serve); err != nil {
|
if s.l, s.rpcConf, err = serve.FromConfig(g, in.Serve); err != nil {
|
||||||
return nil, errors.Wrap(err, "cannot build server")
|
return nil, errors.Wrap(err, "cannot build server")
|
||||||
}
|
}
|
||||||
|
|
||||||
s.rootDataset, err = zfs.NewDatasetPath(in.RootDataset)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.New("root dataset is not a valid zfs filesystem path")
|
|
||||||
}
|
|
||||||
if s.rootDataset.Length() <= 0 {
|
|
||||||
return nil, errors.New("root dataset must not be empty") // duplicates error check of receiver
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
return s, nil
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j *Sink) Name() string { return j.name }
|
func (j *PassiveSide) Name() string { return j.name }
|
||||||
|
|
||||||
type SinkStatus struct {}
|
type PassiveStatus struct {}
|
||||||
|
|
||||||
func (*Sink) Status() *Status {
|
func (s *PassiveSide) Status() *Status {
|
||||||
return &Status{Type: TypeSink} // FIXME SinkStatus
|
return &Status{Type: s.mode.Type()} // FIXME PassiveStatus
|
||||||
}
|
}
|
||||||
|
|
||||||
func (*Sink) RegisterMetrics(registerer prometheus.Registerer) {}
|
func (*PassiveSide) RegisterMetrics(registerer prometheus.Registerer) {}
|
||||||
|
|
||||||
func (j *Sink) Run(ctx context.Context) {
|
func (j *PassiveSide) Run(ctx context.Context) {
|
||||||
|
|
||||||
log := GetLogger(ctx)
|
log := GetLogger(ctx)
|
||||||
defer log.Info("job exiting")
|
defer log.Info("job exiting")
|
||||||
@ -74,10 +110,26 @@ outer:
|
|||||||
log.WithError(res.err).Info("accept error")
|
log.WithError(res.err).Info("accept error")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
conn := res.conn
|
||||||
connId++
|
connId++
|
||||||
connLog := log.
|
connLog := log.
|
||||||
WithField("connID", connId)
|
WithField("connID", connId)
|
||||||
go j.handleConnection(WithLogger(ctx, connLog), res.conn)
|
connLog.
|
||||||
|
WithField("addr", conn.RemoteAddr()).
|
||||||
|
WithField("client_identity", conn.ClientIdentity()).
|
||||||
|
Info("handling connection")
|
||||||
|
go func() {
|
||||||
|
defer connLog.Info("finished handling connection")
|
||||||
|
defer conn.Close()
|
||||||
|
ctx := logging.WithSubsystemLoggers(ctx, connLog)
|
||||||
|
handleFunc := j.mode.ConnHandleFunc(ctx, conn)
|
||||||
|
if handleFunc == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := streamrpc.ServeConn(ctx, conn, j.rpcConf, handleFunc); err != nil {
|
||||||
|
log.WithError(err).Error("error serving client")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
break outer
|
break outer
|
||||||
@ -87,39 +139,6 @@ outer:
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (j *Sink) handleConnection(ctx context.Context, conn serve.AuthenticatedConn) {
|
|
||||||
defer conn.Close()
|
|
||||||
|
|
||||||
log := GetLogger(ctx)
|
|
||||||
log.
|
|
||||||
WithField("addr", conn.RemoteAddr()).
|
|
||||||
WithField("client_identity", conn.ClientIdentity()).
|
|
||||||
Info("handling connection")
|
|
||||||
defer log.Info("finished handling connection")
|
|
||||||
|
|
||||||
clientRootStr := path.Join(j.rootDataset.ToString(), conn.ClientIdentity())
|
|
||||||
clientRoot, err := zfs.NewDatasetPath(clientRootStr)
|
|
||||||
if err != nil {
|
|
||||||
log.WithError(err).
|
|
||||||
WithField("client_identity", conn.ClientIdentity()).
|
|
||||||
Error("cannot build client filesystem map (client identity must be a valid ZFS FS name")
|
|
||||||
}
|
|
||||||
log.WithField("client_root", clientRoot).Debug("client root")
|
|
||||||
|
|
||||||
ctx = logging.WithSubsystemLoggers(ctx, log)
|
|
||||||
|
|
||||||
local, err := endpoint.NewReceiver(clientRoot)
|
|
||||||
if err != nil {
|
|
||||||
log.WithError(err).Error("unexpected error: cannot convert mapping to filter")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
handler := endpoint.NewHandler(local)
|
|
||||||
if err := streamrpc.ServeConn(ctx, conn, j.rpcConf, handler.Handle); err != nil {
|
|
||||||
log.WithError(err).Error("error serving client")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type acceptResult struct {
|
type acceptResult struct {
|
||||||
conn serve.AuthenticatedConn
|
conn serve.AuthenticatedConn
|
||||||
err error
|
err error
|
Loading…
Reference in New Issue
Block a user