mirror of
https://github.com/zrepl/zrepl.git
synced 2024-11-24 17:35:01 +01:00
client/signal: Revert "add signal 'snapshot', rename existing signal 'wakeup' to 'replication'"
This was merged to master prematurely as the job components are not decoupled well enough
for these signals to be useful yet.
This reverts commit 2c8c2cfa14
.
closes #452
This commit is contained in:
parent
ee2336a24b
commit
b2c6e51a43
@ -11,8 +11,8 @@ import (
|
||||
)
|
||||
|
||||
var SignalCmd = &cli.Subcommand{
|
||||
Use: "signal [replication|reset|snapshot] JOB",
|
||||
Short: "run a job replication, abort its current invocation, run a snapshot job",
|
||||
Use: "signal [wakeup|reset] JOB",
|
||||
Short: "wake up a job from wait state or abort its current invocation",
|
||||
Run: func(ctx context.Context, subcommand *cli.Subcommand, args []string) error {
|
||||
return runSignalCmd(subcommand.Config(), args)
|
||||
},
|
||||
@ -20,7 +20,7 @@ var SignalCmd = &cli.Subcommand{
|
||||
|
||||
func runSignalCmd(config *config.Config, args []string) error {
|
||||
if len(args) != 2 {
|
||||
return errors.Errorf("Expected 2 arguments: [replication|reset|snapshot] JOB")
|
||||
return errors.Errorf("Expected 2 arguments: [wakeup|reset] JOB")
|
||||
}
|
||||
|
||||
httpc, err := controlHttpClient(config.Global.Control.SockPath)
|
||||
|
@ -40,7 +40,7 @@ jobs:
|
||||
|
||||
# This job pushes to the local sink defined in job `backuppool_sink`.
|
||||
# We trigger replication manually from the command line / udev rules using
|
||||
# `zrepl signal replication push_to_drive`
|
||||
# `zrepl signal wakeup push_to_drive`
|
||||
- type: push
|
||||
name: push_to_drive
|
||||
connect:
|
||||
|
@ -143,12 +143,10 @@ func (j *controlJob) Run(ctx context.Context) {
|
||||
|
||||
var err error
|
||||
switch req.Op {
|
||||
case "replication":
|
||||
err = j.jobs.doreplication(req.Name)
|
||||
case "wakeup":
|
||||
err = j.jobs.wakeup(req.Name)
|
||||
case "reset":
|
||||
err = j.jobs.reset(req.Name)
|
||||
case "snapshot":
|
||||
err = j.jobs.dosnapshot(req.Name)
|
||||
default:
|
||||
err = fmt.Errorf("operation %q is invalid", req.Op)
|
||||
}
|
||||
|
@ -20,9 +20,8 @@ import (
|
||||
|
||||
"github.com/zrepl/zrepl/config"
|
||||
"github.com/zrepl/zrepl/daemon/job"
|
||||
"github.com/zrepl/zrepl/daemon/job/doreplication"
|
||||
"github.com/zrepl/zrepl/daemon/job/dosnapshot"
|
||||
"github.com/zrepl/zrepl/daemon/job/reset"
|
||||
"github.com/zrepl/zrepl/daemon/job/wakeup"
|
||||
"github.com/zrepl/zrepl/daemon/logging"
|
||||
"github.com/zrepl/zrepl/logger"
|
||||
"github.com/zrepl/zrepl/version"
|
||||
@ -132,19 +131,17 @@ type jobs struct {
|
||||
wg sync.WaitGroup
|
||||
|
||||
// m protects all fields below it
|
||||
m sync.RWMutex
|
||||
doreplications map[string]doreplication.Func // by Job.Name
|
||||
resets map[string]reset.Func // by Job.Name
|
||||
dosnapshots map[string]dosnapshot.Func // by Job.Name
|
||||
jobs map[string]job.Job
|
||||
m sync.RWMutex
|
||||
wakeups map[string]wakeup.Func // by Job.Name
|
||||
resets map[string]reset.Func // by Job.Name
|
||||
jobs map[string]job.Job
|
||||
}
|
||||
|
||||
func newJobs() *jobs {
|
||||
return &jobs{
|
||||
doreplications: make(map[string]doreplication.Func),
|
||||
resets: make(map[string]reset.Func),
|
||||
dosnapshots: make(map[string]dosnapshot.Func),
|
||||
jobs: make(map[string]job.Job),
|
||||
wakeups: make(map[string]wakeup.Func),
|
||||
resets: make(map[string]reset.Func),
|
||||
jobs: make(map[string]job.Job),
|
||||
}
|
||||
}
|
||||
|
||||
@ -193,11 +190,11 @@ func (s *jobs) status() map[string]*job.Status {
|
||||
return ret
|
||||
}
|
||||
|
||||
func (s *jobs) doreplication(job string) error {
|
||||
func (s *jobs) wakeup(job string) error {
|
||||
s.m.RLock()
|
||||
defer s.m.RUnlock()
|
||||
|
||||
wu, ok := s.doreplications[job]
|
||||
wu, ok := s.wakeups[job]
|
||||
if !ok {
|
||||
return errors.Errorf("Job %s does not exist", job)
|
||||
}
|
||||
@ -215,17 +212,6 @@ func (s *jobs) reset(job string) error {
|
||||
return wu()
|
||||
}
|
||||
|
||||
func (s *jobs) dosnapshot(job string) error {
|
||||
s.m.RLock()
|
||||
defer s.m.RUnlock()
|
||||
|
||||
wu, ok := s.dosnapshots[job]
|
||||
if !ok {
|
||||
return errors.Errorf("Job %s does not exist", job)
|
||||
}
|
||||
return wu()
|
||||
}
|
||||
|
||||
const (
|
||||
jobNamePrometheus = "_prometheus"
|
||||
jobNameControl = "_control"
|
||||
@ -256,12 +242,10 @@ func (s *jobs) start(ctx context.Context, j job.Job, internal bool) {
|
||||
|
||||
s.jobs[jobName] = j
|
||||
ctx = zfscmd.WithJobID(ctx, j.Name())
|
||||
ctx, doreplication := doreplication.Context(ctx)
|
||||
ctx, wakeup := wakeup.Context(ctx)
|
||||
ctx, resetFunc := reset.Context(ctx)
|
||||
ctx, dosnapshotFunc := dosnapshot.Context(ctx)
|
||||
s.doreplications[jobName] = doreplication
|
||||
s.wakeups[jobName] = wakeup
|
||||
s.resets[jobName] = resetFunc
|
||||
s.dosnapshots[jobName] = dosnapshotFunc
|
||||
|
||||
s.wg.Add(1)
|
||||
go func() {
|
||||
|
@ -14,8 +14,8 @@ import (
|
||||
"github.com/zrepl/zrepl/util/envconst"
|
||||
|
||||
"github.com/zrepl/zrepl/config"
|
||||
"github.com/zrepl/zrepl/daemon/job/doreplication"
|
||||
"github.com/zrepl/zrepl/daemon/job/reset"
|
||||
"github.com/zrepl/zrepl/daemon/job/wakeup"
|
||||
"github.com/zrepl/zrepl/daemon/pruner"
|
||||
"github.com/zrepl/zrepl/daemon/snapper"
|
||||
"github.com/zrepl/zrepl/endpoint"
|
||||
@ -89,7 +89,7 @@ type activeMode interface {
|
||||
SenderReceiver() (logic.Sender, logic.Receiver)
|
||||
Type() Type
|
||||
PlannerPolicy() logic.PlannerPolicy
|
||||
RunPeriodic(ctx context.Context, replicationCommon chan<- struct{})
|
||||
RunPeriodic(ctx context.Context, wakeUpCommon chan<- struct{})
|
||||
SnapperReport() *snapper.Report
|
||||
ResetConnectBackoff()
|
||||
}
|
||||
@ -131,8 +131,8 @@ func (m *modePush) Type() Type { return TypePush }
|
||||
|
||||
func (m *modePush) PlannerPolicy() logic.PlannerPolicy { return *m.plannerPolicy }
|
||||
|
||||
func (m *modePush) RunPeriodic(ctx context.Context, replicationCommon chan<- struct{}) {
|
||||
m.snapper.Run(ctx, replicationCommon)
|
||||
func (m *modePush) RunPeriodic(ctx context.Context, wakeUpCommon chan<- struct{}) {
|
||||
m.snapper.Run(ctx, wakeUpCommon)
|
||||
}
|
||||
|
||||
func (m *modePush) SnapperReport() *snapper.Report {
|
||||
@ -214,10 +214,10 @@ func (*modePull) Type() Type { return TypePull }
|
||||
|
||||
func (m *modePull) PlannerPolicy() logic.PlannerPolicy { return *m.plannerPolicy }
|
||||
|
||||
func (m *modePull) RunPeriodic(ctx context.Context, replicationCommon chan<- struct{}) {
|
||||
func (m *modePull) RunPeriodic(ctx context.Context, wakeUpCommon chan<- struct{}) {
|
||||
if m.interval.Manual {
|
||||
GetLogger(ctx).Info("manual pull configured, periodic pull disabled")
|
||||
// "waiting for wakeup replications" is printed in common ActiveSide.do
|
||||
// "waiting for wakeups" is printed in common ActiveSide.do
|
||||
return
|
||||
}
|
||||
t := time.NewTicker(m.interval.Interval)
|
||||
@ -226,12 +226,12 @@ func (m *modePull) RunPeriodic(ctx context.Context, replicationCommon chan<- str
|
||||
select {
|
||||
case <-t.C:
|
||||
select {
|
||||
case replicationCommon <- struct{}{}:
|
||||
case wakeUpCommon <- struct{}{}:
|
||||
default:
|
||||
GetLogger(ctx).
|
||||
WithField("pull_interval", m.interval).
|
||||
Warn("pull job took longer than pull interval")
|
||||
replicationCommon <- struct{}{} // block anyways, to queue up the wakeup replication
|
||||
wakeUpCommon <- struct{}{} // block anyways, to queue up the wakeup
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
@ -435,13 +435,13 @@ func (j *ActiveSide) Run(ctx context.Context) {
|
||||
invocationCount := 0
|
||||
outer:
|
||||
for {
|
||||
log.Info("wait for replications")
|
||||
log.Info("wait for wakeups")
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.WithError(ctx.Err()).Info("context")
|
||||
break outer
|
||||
|
||||
case <-doreplication.Wait(ctx):
|
||||
case <-wakeup.Wait(ctx):
|
||||
j.mode.ResetConnectBackoff()
|
||||
case <-periodicDone:
|
||||
}
|
||||
|
@ -1,35 +0,0 @@
|
||||
package doreplication
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
)
|
||||
|
||||
type contextKey int
|
||||
|
||||
const contextKeyReplication contextKey = iota
|
||||
|
||||
func Wait(ctx context.Context) <-chan struct{} {
|
||||
wc, ok := ctx.Value(contextKeyReplication).(chan struct{})
|
||||
if !ok {
|
||||
wc = make(chan struct{})
|
||||
}
|
||||
return wc
|
||||
}
|
||||
|
||||
type Func func() error
|
||||
|
||||
var AlreadyReplicating = errors.New("already replicating")
|
||||
|
||||
func Context(ctx context.Context) (context.Context, Func) {
|
||||
wc := make(chan struct{})
|
||||
wuf := func() error {
|
||||
select {
|
||||
case wc <- struct{}{}:
|
||||
return nil
|
||||
default:
|
||||
return AlreadyReplicating
|
||||
}
|
||||
}
|
||||
return context.WithValue(ctx, contextKeyReplication, wc), wuf
|
||||
}
|
@ -9,12 +9,12 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/zrepl/zrepl/daemon/job/doreplication"
|
||||
"github.com/zrepl/zrepl/daemon/logging/trace"
|
||||
"github.com/zrepl/zrepl/util/nodefault"
|
||||
|
||||
"github.com/zrepl/zrepl/config"
|
||||
"github.com/zrepl/zrepl/daemon/filters"
|
||||
"github.com/zrepl/zrepl/daemon/job/wakeup"
|
||||
"github.com/zrepl/zrepl/daemon/pruner"
|
||||
"github.com/zrepl/zrepl/daemon/snapper"
|
||||
"github.com/zrepl/zrepl/endpoint"
|
||||
@ -112,13 +112,13 @@ func (j *SnapJob) Run(ctx context.Context) {
|
||||
invocationCount := 0
|
||||
outer:
|
||||
for {
|
||||
log.Info("wait for replications")
|
||||
log.Info("wait for wakeups")
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.WithError(ctx.Err()).Info("context")
|
||||
break outer
|
||||
|
||||
case <-doreplication.Wait(ctx):
|
||||
case <-wakeup.Wait(ctx):
|
||||
case <-periodicDone:
|
||||
}
|
||||
invocationCount++
|
||||
|
@ -1,4 +1,4 @@
|
||||
package dosnapshot
|
||||
package wakeup
|
||||
|
||||
import (
|
||||
"context"
|
||||
@ -7,10 +7,10 @@ import (
|
||||
|
||||
type contextKey int
|
||||
|
||||
const contextKeyDosnapshot contextKey = iota
|
||||
const contextKeyWakeup contextKey = iota
|
||||
|
||||
func Wait(ctx context.Context) <-chan struct{} {
|
||||
wc, ok := ctx.Value(contextKeyDosnapshot).(chan struct{})
|
||||
wc, ok := ctx.Value(contextKeyWakeup).(chan struct{})
|
||||
if !ok {
|
||||
wc = make(chan struct{})
|
||||
}
|
||||
@ -19,7 +19,7 @@ func Wait(ctx context.Context) <-chan struct{} {
|
||||
|
||||
type Func func() error
|
||||
|
||||
var AlreadyDosnapshot = errors.New("already snapshotting")
|
||||
var AlreadyWokenUp = errors.New("already woken up")
|
||||
|
||||
func Context(ctx context.Context) (context.Context, Func) {
|
||||
wc := make(chan struct{})
|
||||
@ -28,8 +28,8 @@ func Context(ctx context.Context) (context.Context, Func) {
|
||||
case wc <- struct{}{}:
|
||||
return nil
|
||||
default:
|
||||
return AlreadyDosnapshot
|
||||
return AlreadyWokenUp
|
||||
}
|
||||
}
|
||||
return context.WithValue(ctx, contextKeyDosnapshot, wc), wuf
|
||||
return context.WithValue(ctx, contextKeyWakeup, wc), wuf
|
||||
}
|
@ -9,7 +9,6 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/zrepl/zrepl/daemon/job/dosnapshot"
|
||||
"github.com/zrepl/zrepl/daemon/logging/trace"
|
||||
|
||||
"github.com/zrepl/zrepl/config"
|
||||
@ -211,10 +210,6 @@ func syncUp(a args, u updater) state {
|
||||
return u(func(s *Snapper) {
|
||||
s.state = Planning
|
||||
}).sf()
|
||||
case <-dosnapshot.Wait(a.ctx):
|
||||
return u(func(s *Snapper) {
|
||||
s.state = Planning
|
||||
}).sf()
|
||||
case <-a.ctx.Done():
|
||||
return onMainCtxDone(a.ctx, u)
|
||||
}
|
||||
@ -383,10 +378,6 @@ func wait(a args, u updater) state {
|
||||
return u(func(snapper *Snapper) {
|
||||
snapper.state = Planning
|
||||
}).sf()
|
||||
case <-dosnapshot.Wait(a.ctx):
|
||||
return u(func(snapper *Snapper) {
|
||||
snapper.state = Planning
|
||||
}).sf()
|
||||
case <-a.ctx.Done():
|
||||
return onMainCtxDone(a.ctx, u)
|
||||
}
|
||||
|
@ -18,9 +18,9 @@ type PeriodicOrManual struct {
|
||||
s *Snapper
|
||||
}
|
||||
|
||||
func (s *PeriodicOrManual) Run(ctx context.Context, replicationCommon chan<- struct{}) {
|
||||
func (s *PeriodicOrManual) Run(ctx context.Context, wakeUpCommon chan<- struct{}) {
|
||||
if s.s != nil {
|
||||
s.s.Run(ctx, replicationCommon)
|
||||
s.s.Run(ctx, wakeUpCommon)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -32,7 +32,6 @@ We use the following annotations for classifying changes:
|
||||
0.4.0
|
||||
-----
|
||||
|
||||
* |break| Change syntax to trigger a job replication, rename ``zrepl signal wakeup JOB`` to ``zrepl signal replication JOB``
|
||||
* |feature| support setting zfs send / recv flags in the config (send: ``-wLcepbS`` , recv: ``-ox`` ).
|
||||
Config docs :ref:`here <job-send-options>` and :ref:`here <job-recv-options>` .
|
||||
* |feature| parallel replication is now configurable (disabled by default, :ref:`config docs here <replication-option-concurrency>` ).
|
||||
|
@ -78,7 +78,7 @@ Job Type ``pull``
|
||||
``$root_fs/$source_path``
|
||||
* - ``interval``
|
||||
- | Interval at which to pull from the source job (e.g. ``10m``).
|
||||
| ``manual`` disables periodic pulling, replication then only happens on :ref:`replication <cli-signal-replication>`.
|
||||
| ``manual`` disables periodic pulling, replication then only happens on :ref:`wakeup <cli-signal-wakeup>`.
|
||||
* - ``pruning``
|
||||
- |pruning-spec|
|
||||
|
||||
|
@ -15,7 +15,7 @@ A filesystem that does not have snapshots by the snapshotter has lower priority
|
||||
|
||||
For ``push`` jobs, replication is automatically triggered after all filesystems have been snapshotted.
|
||||
|
||||
Note that the ``zrepl signal replication JOB`` subcommand does not trigger snapshotting.
|
||||
Note that the ``zrepl signal wakeup JOB`` subcommand does not trigger snapshotting.
|
||||
|
||||
|
||||
::
|
||||
@ -38,7 +38,7 @@ There is also a ``manual`` snapshotting type, which covers the following use cas
|
||||
* Existing infrastructure for automatic snapshots: you only want to use this zrepl job for replication.
|
||||
* Handling snapshotting through a separate ``snap`` job.
|
||||
|
||||
Note that you will have to trigger replication manually using the ``zrepl signal replication JOB`` subcommand in that case.
|
||||
Note that you will have to trigger replication manually using the ``zrepl signal wakeup JOB`` subcommand in that case.
|
||||
|
||||
::
|
||||
|
||||
|
@ -55,8 +55,8 @@ Watch it Work
|
||||
=============
|
||||
|
||||
Run ``zrepl status`` on the active side of the replication setup to monitor snaphotting, replication and pruning activity.
|
||||
To re-trigger replication (snapshots are separate!), use ``zrepl signal replication JOBNAME``.
|
||||
(refer to the example use case document if you are uncertain which job you want to start replication).
|
||||
To re-trigger replication (snapshots are separate!), use ``zrepl signal wakeup JOBNAME``.
|
||||
(refer to the example use case document if you are uncertain which job you want to wake up).
|
||||
|
||||
You can also use basic UNIX tools to inspect see what's going on.
|
||||
If you like tmux, here is a handy script that works on FreeBSD: ::
|
||||
|
@ -13,7 +13,7 @@ CLI Overview
|
||||
The zrepl binary is self-documenting:
|
||||
run ``zrepl help`` for an overview of the available subcommands or ``zrepl SUBCOMMAND --help`` for information on available flags, etc.
|
||||
|
||||
.. _cli-signal-replication:
|
||||
.. _cli-signal-wakeup:
|
||||
|
||||
.. list-table::
|
||||
:widths: 30 70
|
||||
@ -29,7 +29,7 @@ CLI Overview
|
||||
- show job activity, or with ``--raw`` for JSON output
|
||||
* - ``zrepl stdinserver``
|
||||
- see :ref:`transport-ssh+stdinserver`
|
||||
* - ``zrepl signal replication JOB``
|
||||
* - ``zrepl signal wakeup JOB``
|
||||
- manually trigger replication + pruning of JOB
|
||||
* - ``zrepl signal reset JOB``
|
||||
- manually abort current replication + pruning of JOB
|
||||
|
Loading…
Reference in New Issue
Block a user