2018-08-29 19:00:45 +02:00
|
|
|
package pruner
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
2018-08-30 11:49:06 +02:00
|
|
|
"fmt"
|
2018-08-30 17:40:45 +02:00
|
|
|
"github.com/pkg/errors"
|
2018-09-08 07:03:41 +02:00
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2018-08-30 17:40:45 +02:00
|
|
|
"github.com/zrepl/zrepl/config"
|
2018-08-30 11:49:06 +02:00
|
|
|
"github.com/zrepl/zrepl/logger"
|
2018-08-29 19:00:45 +02:00
|
|
|
"github.com/zrepl/zrepl/pruning"
|
2019-02-22 11:40:27 +01:00
|
|
|
"github.com/zrepl/zrepl/replication/logic/pdu"
|
2018-10-19 15:58:51 +02:00
|
|
|
"github.com/zrepl/zrepl/util/envconst"
|
2018-10-19 16:27:05 +02:00
|
|
|
"github.com/zrepl/zrepl/util/watchdog"
|
2018-09-06 03:24:15 +02:00
|
|
|
"sort"
|
2018-10-20 12:35:24 +02:00
|
|
|
"strings"
|
2018-08-29 19:00:45 +02:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
2018-12-11 22:01:50 +01:00
|
|
|
// Try to keep it compatible with gitub.com/zrepl/zrepl/endpoint.Endpoint
|
2018-08-30 11:49:06 +02:00
|
|
|
type History interface {
|
2018-09-06 03:24:15 +02:00
|
|
|
ReplicationCursor(ctx context.Context, req *pdu.ReplicationCursorReq) (*pdu.ReplicationCursorRes, error)
|
2019-03-13 19:23:05 +01:00
|
|
|
ListFilesystems(ctx context.Context, req *pdu.ListFilesystemReq) (*pdu.ListFilesystemRes, error)
|
2018-08-29 19:00:45 +02:00
|
|
|
}
|
|
|
|
|
2018-12-11 22:01:50 +01:00
|
|
|
// Try to keep it compatible with gitub.com/zrepl/zrepl/endpoint.Endpoint
|
2018-08-29 19:00:45 +02:00
|
|
|
type Target interface {
|
2018-12-11 22:01:50 +01:00
|
|
|
ListFilesystems(ctx context.Context, req *pdu.ListFilesystemReq) (*pdu.ListFilesystemRes, error)
|
|
|
|
ListFilesystemVersions(ctx context.Context, req *pdu.ListFilesystemVersionsReq) (*pdu.ListFilesystemVersionsRes, error)
|
2018-08-30 11:49:06 +02:00
|
|
|
DestroySnapshots(ctx context.Context, req *pdu.DestroySnapshotsReq) (*pdu.DestroySnapshotsRes, error)
|
2018-08-29 19:00:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
type Logger = logger.Logger
|
|
|
|
|
|
|
|
type contextKey int
|
|
|
|
|
|
|
|
const contextKeyLogger contextKey = 0
|
|
|
|
|
|
|
|
func WithLogger(ctx context.Context, log Logger) context.Context {
|
|
|
|
return context.WithValue(ctx, contextKeyLogger, log)
|
|
|
|
}
|
|
|
|
|
2018-08-30 11:49:06 +02:00
|
|
|
func GetLogger(ctx context.Context) Logger {
|
2018-08-29 19:00:45 +02:00
|
|
|
if l, ok := ctx.Value(contextKeyLogger).(Logger); ok {
|
|
|
|
return l
|
|
|
|
}
|
|
|
|
return logger.NewNullLogger()
|
|
|
|
}
|
|
|
|
|
|
|
|
type args struct {
|
2018-09-06 03:24:15 +02:00
|
|
|
ctx context.Context
|
|
|
|
target Target
|
|
|
|
receiver History
|
|
|
|
rules []pruning.KeepRule
|
|
|
|
retryWait time.Duration
|
|
|
|
considerSnapAtCursorReplicated bool
|
2018-09-08 07:03:41 +02:00
|
|
|
promPruneSecs prometheus.Observer
|
2018-08-29 19:00:45 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
type Pruner struct {
|
|
|
|
args args
|
|
|
|
|
2018-10-19 16:27:05 +02:00
|
|
|
Progress watchdog.KeepAlive
|
|
|
|
|
2018-08-29 19:00:45 +02:00
|
|
|
mtx sync.RWMutex
|
|
|
|
|
|
|
|
state State
|
|
|
|
|
2019-03-13 20:50:03 +01:00
|
|
|
// State PlanErr
|
2018-08-29 19:00:45 +02:00
|
|
|
err error
|
|
|
|
|
|
|
|
// State Exec
|
2018-10-20 12:35:24 +02:00
|
|
|
execQueue *execQueue
|
2018-08-29 19:00:45 +02:00
|
|
|
}
|
|
|
|
|
2018-08-30 17:40:45 +02:00
|
|
|
type PrunerFactory struct {
|
2018-09-06 03:24:15 +02:00
|
|
|
senderRules []pruning.KeepRule
|
|
|
|
receiverRules []pruning.KeepRule
|
|
|
|
retryWait time.Duration
|
|
|
|
considerSnapAtCursorReplicated bool
|
2018-09-08 07:03:41 +02:00
|
|
|
promPruneSecs *prometheus.HistogramVec
|
2018-08-30 17:40:45 +02:00
|
|
|
}
|
|
|
|
|
2018-11-20 19:30:15 +01:00
|
|
|
type SinglePrunerFactory struct {
|
2019-03-17 20:57:34 +01:00
|
|
|
keepRules []pruning.KeepRule
|
|
|
|
retryWait time.Duration
|
2018-11-20 19:30:15 +01:00
|
|
|
promPruneSecs *prometheus.HistogramVec
|
|
|
|
}
|
|
|
|
|
2018-08-30 17:40:45 +02:00
|
|
|
func checkContainsKeep1(rules []pruning.KeepRule) error {
|
|
|
|
if len(rules) == 0 {
|
|
|
|
return nil //No keep rules means keep all - ok
|
|
|
|
}
|
|
|
|
for _, e := range rules {
|
|
|
|
switch e.(type) {
|
|
|
|
case *pruning.KeepLastN:
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return errors.New("sender keep rules must contain last_n or be empty so that the last snapshot is definitely kept")
|
|
|
|
}
|
|
|
|
|
2018-11-20 19:30:15 +01:00
|
|
|
func NewSinglePrunerFactory(in config.PruningLocal, promPruneSecs *prometheus.HistogramVec) (*SinglePrunerFactory, error) {
|
|
|
|
rules, err := pruning.RulesFromConfig(in.Keep)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "cannot build pruning rules")
|
|
|
|
}
|
2018-11-21 14:17:38 +01:00
|
|
|
for _, r := range in.Keep {
|
2019-03-17 20:57:34 +01:00
|
|
|
if _, ok := r.Ret.(*config.PruneKeepNotReplicated); ok {
|
|
|
|
// rule NotReplicated for a local pruner doesn't make sense
|
|
|
|
// because no replication happens with that job type
|
|
|
|
return nil, fmt.Errorf("single-site pruner cannot support `not_replicated` keep rule")
|
2018-11-21 14:17:38 +01:00
|
|
|
}
|
|
|
|
}
|
2018-11-20 19:30:15 +01:00
|
|
|
f := &SinglePrunerFactory{
|
2019-03-17 20:57:34 +01:00
|
|
|
keepRules: rules,
|
|
|
|
retryWait: envconst.Duration("ZREPL_PRUNER_RETRY_INTERVAL", 10*time.Second),
|
2018-11-20 19:30:15 +01:00
|
|
|
promPruneSecs: promPruneSecs,
|
|
|
|
}
|
|
|
|
return f, nil
|
|
|
|
}
|
|
|
|
|
2018-09-08 07:03:41 +02:00
|
|
|
func NewPrunerFactory(in config.PruningSenderReceiver, promPruneSecs *prometheus.HistogramVec) (*PrunerFactory, error) {
|
2018-08-30 17:40:45 +02:00
|
|
|
keepRulesReceiver, err := pruning.RulesFromConfig(in.KeepReceiver)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "cannot build receiver pruning rules")
|
|
|
|
}
|
|
|
|
|
|
|
|
keepRulesSender, err := pruning.RulesFromConfig(in.KeepSender)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "cannot build sender pruning rules")
|
|
|
|
}
|
|
|
|
|
2018-09-06 03:24:15 +02:00
|
|
|
considerSnapAtCursorReplicated := false
|
|
|
|
for _, r := range in.KeepSender {
|
|
|
|
knr, ok := r.Ret.(*config.PruneKeepNotReplicated)
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
considerSnapAtCursorReplicated = considerSnapAtCursorReplicated || !knr.KeepSnapshotAtCursor
|
2018-08-30 17:40:45 +02:00
|
|
|
}
|
|
|
|
f := &PrunerFactory{
|
2018-10-19 15:58:51 +02:00
|
|
|
senderRules: keepRulesSender,
|
|
|
|
receiverRules: keepRulesReceiver,
|
2018-10-21 17:40:46 +02:00
|
|
|
retryWait: envconst.Duration("ZREPL_PRUNER_RETRY_INTERVAL", 10 * time.Second),
|
2018-10-19 15:58:51 +02:00
|
|
|
considerSnapAtCursorReplicated: considerSnapAtCursorReplicated,
|
|
|
|
promPruneSecs: promPruneSecs,
|
2018-08-30 17:40:45 +02:00
|
|
|
}
|
|
|
|
return f, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *PrunerFactory) BuildSenderPruner(ctx context.Context, target Target, receiver History) *Pruner {
|
|
|
|
p := &Pruner{
|
|
|
|
args: args{
|
|
|
|
WithLogger(ctx, GetLogger(ctx).WithField("prune_side", "sender")),
|
|
|
|
target,
|
|
|
|
receiver,
|
|
|
|
f.senderRules,
|
|
|
|
f.retryWait,
|
2018-09-06 03:24:15 +02:00
|
|
|
f.considerSnapAtCursorReplicated,
|
2018-09-08 07:03:41 +02:00
|
|
|
f.promPruneSecs.WithLabelValues("sender"),
|
2018-08-30 17:40:45 +02:00
|
|
|
},
|
|
|
|
state: Plan,
|
|
|
|
}
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
|
|
|
func (f *PrunerFactory) BuildReceiverPruner(ctx context.Context, target Target, receiver History) *Pruner {
|
2018-08-29 19:00:45 +02:00
|
|
|
p := &Pruner{
|
2018-08-30 17:40:45 +02:00
|
|
|
args: args{
|
|
|
|
WithLogger(ctx, GetLogger(ctx).WithField("prune_side", "receiver")),
|
|
|
|
target,
|
|
|
|
receiver,
|
|
|
|
f.receiverRules,
|
|
|
|
f.retryWait,
|
2018-09-06 03:24:15 +02:00
|
|
|
false, // senseless here anyways
|
2018-09-08 07:03:41 +02:00
|
|
|
f.promPruneSecs.WithLabelValues("receiver"),
|
2018-08-30 17:40:45 +02:00
|
|
|
},
|
2018-08-29 19:00:45 +02:00
|
|
|
state: Plan,
|
|
|
|
}
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
2018-11-20 19:30:15 +01:00
|
|
|
func (f *SinglePrunerFactory) BuildSinglePruner(ctx context.Context, target Target, receiver History) *Pruner {
|
|
|
|
p := &Pruner{
|
|
|
|
args: args{
|
2018-11-21 02:52:33 +01:00
|
|
|
ctx,
|
2018-11-20 19:30:15 +01:00
|
|
|
target,
|
|
|
|
receiver,
|
|
|
|
f.keepRules,
|
|
|
|
f.retryWait,
|
2019-03-17 20:57:34 +01:00
|
|
|
false, // considerSnapAtCursorReplicated is not relevant for local pruning
|
2018-11-21 04:26:03 +01:00
|
|
|
f.promPruneSecs.WithLabelValues("local"),
|
2018-11-20 19:30:15 +01:00
|
|
|
},
|
|
|
|
state: Plan,
|
|
|
|
}
|
|
|
|
return p
|
|
|
|
}
|
|
|
|
|
2018-10-12 22:10:49 +02:00
|
|
|
//go:generate enumer -type=State
|
2018-08-29 19:00:45 +02:00
|
|
|
type State int
|
|
|
|
|
|
|
|
const (
|
|
|
|
Plan State = 1 << iota
|
2019-03-13 20:50:03 +01:00
|
|
|
PlanErr
|
2018-08-29 19:00:45 +02:00
|
|
|
Exec
|
2019-03-13 20:50:03 +01:00
|
|
|
ExecErr
|
2018-08-29 19:00:45 +02:00
|
|
|
Done
|
|
|
|
)
|
|
|
|
|
2019-03-13 20:50:03 +01:00
|
|
|
type updater func(func(*Pruner))
|
2018-08-29 19:00:45 +02:00
|
|
|
|
2018-08-30 17:40:45 +02:00
|
|
|
func (p *Pruner) Prune() {
|
2018-08-29 19:00:45 +02:00
|
|
|
p.prune(p.args)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Pruner) prune(args args) {
|
2019-03-13 20:50:03 +01:00
|
|
|
u := func(f func(*Pruner)) {
|
2018-08-29 19:00:45 +02:00
|
|
|
p.mtx.Lock()
|
|
|
|
defer p.mtx.Unlock()
|
|
|
|
f(p)
|
2018-10-19 15:58:04 +02:00
|
|
|
}
|
2019-03-13 20:50:03 +01:00
|
|
|
// TODO support automatic retries
|
|
|
|
// It is advisable to merge this code with package replication/driver before
|
|
|
|
// That will likely require re-modelling struct fs like replication/driver.attempt,
|
|
|
|
// including figuring out how to resume a plan after being interrupted by network errors
|
|
|
|
// The non-retrying code in this package should move straight to replication/logic.
|
|
|
|
doOneAttempt(&args, u)
|
2018-08-29 19:00:45 +02:00
|
|
|
}
|
|
|
|
|
2018-09-24 19:22:44 +02:00
|
|
|
type Report struct {
|
2019-03-13 20:50:03 +01:00
|
|
|
State string
|
|
|
|
Error string
|
2018-09-24 19:22:44 +02:00
|
|
|
Pending, Completed []FSReport
|
|
|
|
}
|
|
|
|
|
|
|
|
type FSReport struct {
|
2019-03-13 19:23:05 +01:00
|
|
|
Filesystem string
|
2018-09-24 19:22:44 +02:00
|
|
|
SnapshotList, DestroyList []SnapshotReport
|
2019-03-13 20:50:03 +01:00
|
|
|
SkipReason FSSkipReason
|
2019-03-13 19:23:05 +01:00
|
|
|
LastError string
|
2018-09-24 19:22:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
type SnapshotReport struct {
|
|
|
|
Name string
|
|
|
|
Replicated bool
|
|
|
|
Date time.Time
|
|
|
|
}
|
|
|
|
|
|
|
|
func (p *Pruner) Report() *Report {
|
|
|
|
p.mtx.Lock()
|
|
|
|
defer p.mtx.Unlock()
|
|
|
|
|
|
|
|
r := Report{State: p.state.String()}
|
|
|
|
|
|
|
|
if p.err != nil {
|
|
|
|
r.Error = p.err.Error()
|
|
|
|
}
|
|
|
|
|
2018-10-20 12:35:24 +02:00
|
|
|
if p.execQueue != nil {
|
|
|
|
r.Pending, r.Completed = p.execQueue.Report()
|
2018-09-24 19:22:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return &r
|
2018-08-29 19:00:45 +02:00
|
|
|
}
|
|
|
|
|
2018-10-19 16:27:05 +02:00
|
|
|
func (p *Pruner) State() State {
|
|
|
|
p.mtx.Lock()
|
|
|
|
defer p.mtx.Unlock()
|
|
|
|
return p.state
|
|
|
|
}
|
|
|
|
|
2018-08-29 19:00:45 +02:00
|
|
|
type fs struct {
|
2018-08-30 11:49:06 +02:00
|
|
|
path string
|
2018-09-24 19:22:44 +02:00
|
|
|
|
2018-11-16 12:03:38 +01:00
|
|
|
// permanent error during planning
|
2019-03-13 20:50:03 +01:00
|
|
|
planErr error
|
|
|
|
planErrContext string
|
2018-11-16 12:03:38 +01:00
|
|
|
|
2019-03-13 19:23:05 +01:00
|
|
|
// if != "", the fs was skipped for planning and the field
|
|
|
|
// contains the reason
|
|
|
|
skipReason FSSkipReason
|
2018-11-16 12:03:38 +01:00
|
|
|
|
2018-09-24 19:22:44 +02:00
|
|
|
// snapshots presented by target
|
|
|
|
// (type snapshot)
|
2018-08-29 19:00:45 +02:00
|
|
|
snaps []pruning.Snapshot
|
2018-09-24 19:22:44 +02:00
|
|
|
// destroy list returned by pruning.PruneSnapshots(snaps)
|
|
|
|
// (type snapshot)
|
|
|
|
destroyList []pruning.Snapshot
|
2018-08-29 19:00:45 +02:00
|
|
|
|
|
|
|
mtx sync.RWMutex
|
|
|
|
|
2018-10-20 12:35:24 +02:00
|
|
|
// only during Exec state, also used by execQueue
|
|
|
|
execErrLast error
|
2019-03-13 19:23:05 +01:00
|
|
|
}
|
2018-10-20 12:35:24 +02:00
|
|
|
|
2019-03-13 19:23:05 +01:00
|
|
|
type FSSkipReason string
|
2018-10-20 12:35:24 +02:00
|
|
|
|
2019-03-13 19:23:05 +01:00
|
|
|
const (
|
2019-03-13 20:50:03 +01:00
|
|
|
NotSkipped = ""
|
|
|
|
SkipPlaceholder = "filesystem is placeholder"
|
2019-03-13 19:23:05 +01:00
|
|
|
SkipNoCorrespondenceOnSender = "filesystem has no correspondence on sender"
|
|
|
|
)
|
|
|
|
|
|
|
|
func (r FSSkipReason) NotSkipped() bool {
|
|
|
|
return r == NotSkipped
|
2018-08-29 19:00:45 +02:00
|
|
|
}
|
|
|
|
|
2018-09-24 19:22:44 +02:00
|
|
|
func (f *fs) Report() FSReport {
|
|
|
|
f.mtx.Lock()
|
|
|
|
defer f.mtx.Unlock()
|
|
|
|
|
|
|
|
r := FSReport{}
|
|
|
|
r.Filesystem = f.path
|
2019-03-13 19:23:05 +01:00
|
|
|
r.SkipReason = f.skipReason
|
|
|
|
if !r.SkipReason.NotSkipped() {
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
2018-11-16 12:03:38 +01:00
|
|
|
if f.planErr != nil {
|
|
|
|
r.LastError = f.planErr.Error()
|
|
|
|
} else if f.execErrLast != nil {
|
2018-10-20 12:35:24 +02:00
|
|
|
r.LastError = f.execErrLast.Error()
|
2018-09-24 19:22:44 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
r.SnapshotList = make([]SnapshotReport, len(f.snaps))
|
|
|
|
for i, snap := range f.snaps {
|
|
|
|
r.SnapshotList[i] = snap.(snapshot).Report()
|
|
|
|
}
|
|
|
|
|
|
|
|
r.DestroyList = make([]SnapshotReport, len(f.destroyList))
|
|
|
|
for i, snap := range f.destroyList{
|
|
|
|
r.DestroyList[i] = snap.(snapshot).Report()
|
|
|
|
}
|
|
|
|
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
2018-08-29 19:00:45 +02:00
|
|
|
type snapshot struct {
|
|
|
|
replicated bool
|
2018-08-30 11:49:06 +02:00
|
|
|
date time.Time
|
|
|
|
fsv *pdu.FilesystemVersion
|
2018-08-29 19:00:45 +02:00
|
|
|
}
|
|
|
|
|
2018-09-24 19:22:44 +02:00
|
|
|
func (s snapshot) Report() SnapshotReport {
|
|
|
|
return SnapshotReport{
|
|
|
|
Name: s.Name(),
|
|
|
|
Replicated: s.Replicated(),
|
|
|
|
Date: s.Date(),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-08-29 19:00:45 +02:00
|
|
|
var _ pruning.Snapshot = snapshot{}
|
|
|
|
|
|
|
|
func (s snapshot) Name() string { return s.fsv.Name }
|
|
|
|
|
|
|
|
func (s snapshot) Replicated() bool { return s.replicated }
|
|
|
|
|
|
|
|
func (s snapshot) Date() time.Time { return s.date }
|
|
|
|
|
2019-03-13 20:50:03 +01:00
|
|
|
func doOneAttempt(a *args, u updater) {
|
2018-08-29 19:00:45 +02:00
|
|
|
|
|
|
|
ctx, target, receiver := a.ctx, a.target, a.receiver
|
2018-10-21 13:37:30 +02:00
|
|
|
var ka *watchdog.KeepAlive
|
|
|
|
u(func(pruner *Pruner) {
|
|
|
|
ka = &pruner.Progress
|
|
|
|
})
|
2018-08-29 19:00:45 +02:00
|
|
|
|
2019-03-13 19:23:05 +01:00
|
|
|
sfssres, err := receiver.ListFilesystems(ctx, &pdu.ListFilesystemReq{})
|
|
|
|
if err != nil {
|
2019-03-13 20:50:03 +01:00
|
|
|
u(func(p *Pruner) {
|
|
|
|
p.state = PlanErr
|
|
|
|
p.err = err
|
|
|
|
})
|
|
|
|
return
|
2019-03-13 19:23:05 +01:00
|
|
|
}
|
|
|
|
sfss := make(map[string]*pdu.Filesystem)
|
|
|
|
for _, sfs := range sfssres.GetFilesystems() {
|
|
|
|
sfss[sfs.GetPath()] = sfs
|
|
|
|
}
|
|
|
|
|
2018-12-11 22:01:50 +01:00
|
|
|
tfssres, err := target.ListFilesystems(ctx, &pdu.ListFilesystemReq{})
|
2018-08-29 19:00:45 +02:00
|
|
|
if err != nil {
|
2019-03-13 20:50:03 +01:00
|
|
|
u(func(p *Pruner) {
|
|
|
|
p.state = PlanErr
|
|
|
|
p.err = err
|
|
|
|
})
|
|
|
|
return
|
2018-08-29 19:00:45 +02:00
|
|
|
}
|
2018-12-11 22:01:50 +01:00
|
|
|
tfss := tfssres.GetFilesystems()
|
2018-08-29 19:00:45 +02:00
|
|
|
|
2018-09-05 02:25:10 +02:00
|
|
|
pfss := make([]*fs, len(tfss))
|
2019-03-13 20:50:03 +01:00
|
|
|
tfss_loop:
|
2018-08-29 19:00:45 +02:00
|
|
|
for i, tfs := range tfss {
|
2018-09-06 03:24:15 +02:00
|
|
|
|
|
|
|
l := GetLogger(ctx).WithField("fs", tfs.Path)
|
|
|
|
l.Debug("plan filesystem")
|
|
|
|
|
2018-09-06 06:47:44 +02:00
|
|
|
pfs := &fs{
|
2019-03-13 20:50:03 +01:00
|
|
|
path: tfs.Path,
|
2018-09-06 06:47:44 +02:00
|
|
|
}
|
|
|
|
pfss[i] = pfs
|
|
|
|
|
2019-03-13 19:23:05 +01:00
|
|
|
if tfs.GetIsPlaceholder() {
|
|
|
|
pfs.skipReason = SkipPlaceholder
|
|
|
|
l.WithField("skip_reason", pfs.skipReason).Debug("skipping filesystem")
|
|
|
|
continue
|
|
|
|
} else if sfs := sfss[tfs.GetPath()]; sfs == nil {
|
|
|
|
pfs.skipReason = SkipNoCorrespondenceOnSender
|
|
|
|
l.WithField("skip_reason", pfs.skipReason).WithField("sfs", sfs.GetPath()).Debug("skipping filesystem")
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-03-13 20:50:03 +01:00
|
|
|
pfsPlanErrAndLog := func(err error, message string) {
|
|
|
|
t := fmt.Sprintf("%T", err)
|
|
|
|
pfs.planErr = err
|
|
|
|
pfs.planErrContext = message
|
|
|
|
l.WithField("orig_err_type", t).WithError(err).Error(fmt.Sprintf("%s: plan error, skipping filesystem", message))
|
|
|
|
}
|
|
|
|
|
2018-12-11 22:01:50 +01:00
|
|
|
tfsvsres, err := target.ListFilesystemVersions(ctx, &pdu.ListFilesystemVersionsReq{Filesystem: tfs.Path})
|
2018-08-29 19:00:45 +02:00
|
|
|
if err != nil {
|
2019-03-13 20:50:03 +01:00
|
|
|
pfsPlanErrAndLog(err, "cannot list filesystem versions")
|
|
|
|
continue tfss_loop
|
2018-08-29 19:00:45 +02:00
|
|
|
}
|
2018-12-11 22:01:50 +01:00
|
|
|
tfsvs := tfsvsres.GetVersions()
|
2018-10-21 13:37:30 +02:00
|
|
|
// no progress here since we could run in a live-lock (must have used target AND receiver before progress)
|
|
|
|
|
2018-09-06 06:47:44 +02:00
|
|
|
pfs.snaps = make([]pruning.Snapshot, 0, len(tfsvs))
|
2018-08-29 19:00:45 +02:00
|
|
|
|
2018-09-06 03:24:15 +02:00
|
|
|
rcReq := &pdu.ReplicationCursorReq{
|
|
|
|
Filesystem: tfs.Path,
|
2019-03-13 20:50:03 +01:00
|
|
|
Op: &pdu.ReplicationCursorReq_Get{
|
2018-09-24 12:26:55 +02:00
|
|
|
Get: &pdu.ReplicationCursorReq_GetOp{},
|
|
|
|
},
|
2018-09-06 03:24:15 +02:00
|
|
|
}
|
|
|
|
rc, err := receiver.ReplicationCursor(ctx, rcReq)
|
|
|
|
if err != nil {
|
2019-03-13 20:50:03 +01:00
|
|
|
pfsPlanErrAndLog(err, "cannot get replication cursor bookmark")
|
|
|
|
continue tfss_loop
|
2018-09-06 03:24:15 +02:00
|
|
|
}
|
2018-10-21 13:37:30 +02:00
|
|
|
ka.MadeProgress()
|
2019-03-13 20:50:03 +01:00
|
|
|
if rc.GetNotexist() {
|
|
|
|
err := errors.New("replication cursor bookmark does not exist (one successful replication is required before pruning works)")
|
|
|
|
pfsPlanErrAndLog(err, "")
|
|
|
|
continue tfss_loop
|
2018-11-16 12:03:38 +01:00
|
|
|
}
|
2018-09-06 03:24:15 +02:00
|
|
|
|
|
|
|
// scan from older to newer, all snapshots older than cursor are interpreted as replicated
|
|
|
|
sort.Slice(tfsvs, func(i, j int) bool {
|
|
|
|
return tfsvs[i].CreateTXG < tfsvs[j].CreateTXG
|
|
|
|
})
|
2018-10-12 15:29:07 +02:00
|
|
|
|
|
|
|
haveCursorSnapshot := false
|
|
|
|
for _, tfsv := range tfsvs {
|
|
|
|
if tfsv.Type != pdu.FilesystemVersion_Snapshot {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if tfsv.Guid == rc.GetGuid() {
|
|
|
|
haveCursorSnapshot = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
preCursor := haveCursorSnapshot
|
2018-08-29 19:00:45 +02:00
|
|
|
for _, tfsv := range tfsvs {
|
|
|
|
if tfsv.Type != pdu.FilesystemVersion_Snapshot {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
creation, err := tfsv.CreationAsTime()
|
|
|
|
if err != nil {
|
2019-03-13 20:50:03 +01:00
|
|
|
err := fmt.Errorf("%s: %s", tfsv.RelName(), err)
|
|
|
|
pfsPlanErrAndLog(err, "fs version with invalid creation date")
|
|
|
|
continue tfss_loop
|
2018-09-04 22:29:51 +02:00
|
|
|
}
|
2018-10-12 15:29:07 +02:00
|
|
|
// note that we cannot use CreateTXG because target and receiver could be on different pools
|
2018-09-06 03:24:15 +02:00
|
|
|
atCursor := tfsv.Guid == rc.GetGuid()
|
|
|
|
preCursor = preCursor && !atCursor
|
2018-08-29 19:00:45 +02:00
|
|
|
pfs.snaps = append(pfs.snaps, snapshot{
|
2018-09-06 03:24:15 +02:00
|
|
|
replicated: preCursor || (a.considerSnapAtCursorReplicated && atCursor),
|
2018-08-29 19:00:45 +02:00
|
|
|
date: creation,
|
2018-08-30 11:49:06 +02:00
|
|
|
fsv: tfsv,
|
2018-08-29 19:00:45 +02:00
|
|
|
})
|
|
|
|
}
|
2018-09-06 03:24:15 +02:00
|
|
|
if preCursor {
|
2019-03-13 20:50:03 +01:00
|
|
|
pfsPlanErrAndLog(fmt.Errorf("replication cursor not found in prune target filesystem versions"), "")
|
|
|
|
continue tfss_loop
|
2018-09-06 03:24:15 +02:00
|
|
|
}
|
2018-08-29 19:00:45 +02:00
|
|
|
|
2018-09-24 19:22:44 +02:00
|
|
|
// Apply prune rules
|
|
|
|
pfs.destroyList = pruning.PruneSnapshots(pfs.snaps, a.rules)
|
2018-10-21 13:37:30 +02:00
|
|
|
ka.MadeProgress()
|
2018-08-29 19:00:45 +02:00
|
|
|
}
|
|
|
|
|
2019-03-13 20:50:03 +01:00
|
|
|
u(func(pruner *Pruner) {
|
2018-10-20 10:58:22 +02:00
|
|
|
pruner.Progress.MadeProgress()
|
2018-10-20 12:35:24 +02:00
|
|
|
pruner.execQueue = newExecQueue(len(pfss))
|
2018-08-29 19:00:45 +02:00
|
|
|
for _, pfs := range pfss {
|
2018-10-20 12:35:24 +02:00
|
|
|
pruner.execQueue.Put(pfs, nil, false)
|
2018-08-29 19:00:45 +02:00
|
|
|
}
|
|
|
|
pruner.state = Exec
|
2019-03-13 20:50:03 +01:00
|
|
|
})
|
2018-08-29 19:00:45 +02:00
|
|
|
|
2019-03-13 20:50:03 +01:00
|
|
|
for {
|
2018-09-05 02:25:10 +02:00
|
|
|
var pfs *fs
|
2019-03-13 20:50:03 +01:00
|
|
|
u(func(pruner *Pruner) {
|
2018-10-20 12:35:24 +02:00
|
|
|
pfs = pruner.execQueue.Pop()
|
2019-03-13 20:50:03 +01:00
|
|
|
})
|
2018-10-20 12:35:24 +02:00
|
|
|
if pfs == nil {
|
2019-03-13 20:50:03 +01:00
|
|
|
break
|
|
|
|
}
|
|
|
|
doOneAttemptExec(a, u, pfs)
|
|
|
|
}
|
|
|
|
|
|
|
|
var rep *Report
|
|
|
|
{
|
|
|
|
// must not hold lock for report
|
|
|
|
var pruner *Pruner
|
|
|
|
u(func(p *Pruner) {
|
|
|
|
pruner = p
|
|
|
|
})
|
|
|
|
rep = pruner.Report()
|
|
|
|
}
|
|
|
|
u(func(p *Pruner) {
|
|
|
|
if len(rep.Pending) > 0 {
|
|
|
|
panic("queue should not have pending items at this point")
|
|
|
|
}
|
|
|
|
hadErr := false
|
|
|
|
for _, fsr := range rep.Completed {
|
|
|
|
hadErr = hadErr || fsr.SkipReason.NotSkipped() && fsr.LastError != ""
|
2018-09-06 03:24:15 +02:00
|
|
|
}
|
2019-03-13 20:50:03 +01:00
|
|
|
if hadErr {
|
|
|
|
p.state = ExecErr
|
|
|
|
} else {
|
|
|
|
p.state = Done
|
2018-08-29 19:00:45 +02:00
|
|
|
}
|
|
|
|
})
|
2019-03-13 20:50:03 +01:00
|
|
|
|
|
|
|
|
2018-08-29 19:00:45 +02:00
|
|
|
}
|
|
|
|
|
2019-03-13 20:50:03 +01:00
|
|
|
// attempts to exec pfs, puts it back into the queue with the result
|
|
|
|
func doOneAttemptExec(a *args, u updater, pfs *fs) {
|
|
|
|
|
2018-09-24 19:22:44 +02:00
|
|
|
destroyList := make([]*pdu.FilesystemVersion, len(pfs.destroyList))
|
2018-08-29 19:00:45 +02:00
|
|
|
for i := range destroyList {
|
2018-09-24 19:22:44 +02:00
|
|
|
destroyList[i] = pfs.destroyList[i].(snapshot).fsv
|
2018-08-30 11:49:06 +02:00
|
|
|
GetLogger(a.ctx).
|
|
|
|
WithField("fs", pfs.path).
|
|
|
|
WithField("destroy_snap", destroyList[i].Name).
|
|
|
|
Debug("policy destroys snapshot")
|
2018-08-29 19:00:45 +02:00
|
|
|
}
|
2018-08-30 11:49:06 +02:00
|
|
|
req := pdu.DestroySnapshotsReq{
|
|
|
|
Filesystem: pfs.path,
|
|
|
|
Snapshots: destroyList,
|
|
|
|
}
|
2018-10-20 12:35:24 +02:00
|
|
|
GetLogger(a.ctx).WithField("fs", pfs.path).Debug("destroying snapshots")
|
|
|
|
res, err := a.target.DestroySnapshots(a.ctx, &req)
|
|
|
|
if err != nil {
|
|
|
|
u(func(pruner *Pruner) {
|
|
|
|
pruner.execQueue.Put(pfs, err, false)
|
|
|
|
})
|
2019-03-13 20:50:03 +01:00
|
|
|
return
|
2018-10-20 12:35:24 +02:00
|
|
|
}
|
|
|
|
// check if all snapshots were destroyed
|
|
|
|
destroyResults := make(map[string]*pdu.DestroySnapshotRes)
|
|
|
|
for _, fsres := range res.Results {
|
|
|
|
destroyResults[fsres.Snapshot.Name] = fsres
|
|
|
|
}
|
|
|
|
err = nil
|
|
|
|
destroyFails := make([]*pdu.DestroySnapshotRes, 0)
|
|
|
|
for _, reqDestroy := range destroyList {
|
|
|
|
res, ok := destroyResults[reqDestroy.Name]
|
|
|
|
if !ok {
|
|
|
|
err = fmt.Errorf("missing destroy-result for %s", reqDestroy.RelName())
|
|
|
|
break
|
|
|
|
} else if res.Error != "" {
|
|
|
|
destroyFails = append(destroyFails, res)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err == nil && len(destroyFails) > 0 {
|
|
|
|
names := make([]string, len(destroyFails))
|
|
|
|
pairs := make([]string, len(destroyFails))
|
|
|
|
allSame := true
|
|
|
|
lastMsg := destroyFails[0].Error
|
|
|
|
for i := 0; i < len(destroyFails); i++{
|
|
|
|
allSame = allSame && destroyFails[i].Error == lastMsg
|
|
|
|
relname := destroyFails[i].Snapshot.RelName()
|
|
|
|
names[i] = relname
|
|
|
|
pairs[i] = fmt.Sprintf("(%s: %s)", relname, destroyFails[i].Error)
|
|
|
|
}
|
|
|
|
if allSame {
|
|
|
|
err = fmt.Errorf("destroys failed %s: %s",
|
|
|
|
strings.Join(names, ", "), lastMsg)
|
|
|
|
} else {
|
|
|
|
err = fmt.Errorf("destroys failed: %s", strings.Join(pairs, ", "))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
u(func(pruner *Pruner) {
|
|
|
|
pruner.execQueue.Put(pfs, err, err == nil)
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
GetLogger(a.ctx).WithError(err).Error("target could not destroy snapshots")
|
2019-03-13 20:50:03 +01:00
|
|
|
return
|
2018-08-29 19:00:45 +02:00
|
|
|
}
|
|
|
|
}
|