zrepl/cmd/config_job_pull.go

230 lines
5.5 KiB
Go
Raw Normal View History

package cmd
import (
2018-08-11 12:19:10 +02:00
"net"
2018-08-10 17:06:00 +02:00
"os"
"os/signal"
"syscall"
"time"
"context"
2018-08-10 17:06:00 +02:00
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
"github.com/problame/go-streamrpc"
"github.com/zrepl/zrepl/replication"
"github.com/zrepl/zrepl/cmd/endpoint"
)
type PullJob struct {
Name string
Connect streamrpc.Connecter
Interval time.Duration
Mapping *DatasetMapFilter
// constructed from mapping during parsing
pruneFilter *DatasetMapFilter
SnapshotPrefix string
InitialReplPolicy replication.InitialReplPolicy
Prune PrunePolicy
2017-09-11 15:45:10 +02:00
Debug JobDebugSettings
task *Task
rep *replication.Replication
}
func parsePullJob(c JobParsingContext, name string, i map[string]interface{}) (j *PullJob, err error) {
var asMap struct {
Connect map[string]interface{}
Interval string
Mapping map[string]string
InitialReplPolicy string `mapstructure:"initial_repl_policy"`
Prune map[string]interface{}
SnapshotPrefix string `mapstructure:"snapshot_prefix"`
2017-09-11 15:45:10 +02:00
Debug map[string]interface{}
}
if err = mapstructure.Decode(i, &asMap); err != nil {
err = errors.Wrap(err, "mapstructure error")
return nil, err
}
j = &PullJob{Name: name}
j.Connect, err = parseSSHStdinserverConnecter(asMap.Connect)
if err != nil {
err = errors.Wrap(err, "cannot parse 'connect'")
return nil, err
}
if j.Interval, err = parsePostitiveDuration(asMap.Interval); err != nil {
err = errors.Wrap(err, "cannot parse 'interval'")
return nil, err
}
j.Mapping, err = parseDatasetMapFilter(asMap.Mapping, false)
if err != nil {
err = errors.Wrap(err, "cannot parse 'mapping'")
return nil, err
}
if j.pruneFilter, err = j.Mapping.InvertedFilter(); err != nil {
err = errors.Wrap(err, "cannot automatically invert 'mapping' for prune job")
return nil, err
}
j.InitialReplPolicy, err = parseInitialReplPolicy(asMap.InitialReplPolicy, replication.DEFAULT_INITIAL_REPL_POLICY)
if err != nil {
err = errors.Wrap(err, "cannot parse 'initial_repl_policy'")
return
}
if j.SnapshotPrefix, err = parseSnapshotPrefix(asMap.SnapshotPrefix); err != nil {
return
}
if j.Prune, err = parsePrunePolicy(asMap.Prune, false); err != nil {
err = errors.Wrap(err, "cannot parse prune policy")
return
}
2017-09-11 15:45:10 +02:00
if err = mapstructure.Decode(asMap.Debug, &j.Debug); err != nil {
err = errors.Wrap(err, "cannot parse 'debug'")
return
}
if j.Debug.Conn.ReadDump != "" || j.Debug.Conn.WriteDump != "" {
logConnecter := logNetConnConnecter{
Connecter: j.Connect,
2018-08-10 17:06:00 +02:00
ReadDump: j.Debug.Conn.ReadDump,
WriteDump: j.Debug.Conn.WriteDump,
}
j.Connect = logConnecter
}
return
}
func (j *PullJob) JobName() string {
return j.Name
}
func (j *PullJob) JobType() JobType { return JobTypePull }
func (j *PullJob) JobStart(ctx context.Context) {
2017-09-11 15:45:10 +02:00
log := ctx.Value(contextKeyLog).(Logger)
defer log.Info("exiting")
j.task = NewTask("main", j, log)
// j.task is idle here idle here
2018-08-10 17:06:00 +02:00
usr1 := make(chan os.Signal)
signal.Notify(usr1, syscall.SIGUSR1)
defer signal.Stop(usr1)
ticker := time.NewTicker(j.Interval)
for {
2018-07-15 17:36:53 +02:00
begin := time.Now()
j.doRun(ctx)
2018-07-15 17:36:53 +02:00
duration := time.Now().Sub(begin)
if duration > j.Interval {
j.task.Log().
WithField("actual_duration", duration).
WithField("configured_interval", j.Interval).
Warn("pull run took longer than configured interval")
}
select {
case <-ctx.Done():
j.task.Log().WithError(ctx.Err()).Info("context")
return
case <-ticker.C:
2018-08-10 17:06:00 +02:00
case <-usr1:
}
}
}
var STREAMRPC_CONFIG = &streamrpc.ConnConfig{ // FIXME oversight and configurability
2018-08-10 17:06:00 +02:00
RxHeaderMaxLen: 4096,
RxStructuredMaxLen: 4096 * 4096,
RxStreamMaxChunkSize: 4096 * 4096,
2018-08-10 17:06:00 +02:00
TxChunkSize: 4096 * 4096,
RxTimeout: streamrpc.Timeout{
2018-08-10 17:06:00 +02:00
Progress: 10 * time.Second,
},
TxTimeout: streamrpc.Timeout{
2018-08-10 17:06:00 +02:00
Progress: 10 * time.Second,
},
2018-07-15 17:36:53 +02:00
}
2018-08-10 17:06:00 +02:00
2018-07-15 17:36:53 +02:00
func (j *PullJob) doRun(ctx context.Context) {
2018-07-15 17:36:53 +02:00
j.task.Enter("run")
defer j.task.Finish()
// FIXME
clientConf := &streamrpc.ClientConfig{
ConnConfig: STREAMRPC_CONFIG,
2017-09-11 15:45:10 +02:00
}
2018-08-11 12:19:10 +02:00
//client, err := streamrpc.NewClient(j.Connect, clientConf)
client, err := streamrpc.NewClient(&tcpConnecter{net.Dialer{
Timeout: 10*time.Second,
}}, clientConf)
2018-07-15 17:36:53 +02:00
defer client.Close()
j.task.Enter("pull")
2018-08-22 00:52:46 +02:00
sender := endpoint.NewRemote(client)
2018-07-15 17:36:53 +02:00
2018-08-22 00:52:46 +02:00
puller, err := endpoint.NewReceiver(
j.Mapping,
NewPrefixFilter(j.SnapshotPrefix),
)
if err != nil {
j.task.Log().WithError(err).Error("error creating receiver endpoint")
j.task.Finish()
return
}
ctx = replication.WithLogger(ctx, replicationLogAdaptor{j.task.Log().WithField("subsystem", "replication")})
2018-08-10 17:06:00 +02:00
ctx = streamrpc.ContextWithLogger(ctx, streamrpcLogAdaptor{j.task.Log().WithField("subsystem", "rpc.protocol")})
2018-08-22 00:52:46 +02:00
ctx = endpoint.WithLogger(ctx, j.task.Log().WithField("subsystem", "rpc.endpoint"))
2018-08-16 14:02:16 +02:00
j.rep = replication.NewReplication()
j.rep.Drive(ctx, sender, puller)
2018-07-15 17:36:53 +02:00
client.Close()
j.task.Finish()
j.task.Enter("prune")
pruner, err := j.Pruner(j.task, PrunePolicySideDefault, false)
if err != nil {
j.task.Log().WithError(err).Error("error creating pruner")
} else {
pruner.Run(ctx)
}
j.task.Finish()
}
func (j *PullJob) Report() *replication.Report {
return j.rep.Report()
}
func (j *PullJob) JobStatus(ctxt context.Context) (*JobStatus, error) {
return &JobStatus{Tasks: []*TaskStatus{j.task.Status()}}, nil
}
func (j *PullJob) Pruner(task *Task, side PrunePolicySide, dryRun bool) (p Pruner, err error) {
p = Pruner{
task,
time.Now(),
dryRun,
j.pruneFilter,
j.SnapshotPrefix,
j.Prune,
}
return
}