2020-05-10 15:06:44 +02:00
|
|
|
package endpoint
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"sync"
|
|
|
|
|
|
|
|
"github.com/prometheus/client_golang/prometheus"
|
2020-08-31 16:04:00 +02:00
|
|
|
|
2024-10-18 19:21:17 +02:00
|
|
|
"github.com/zrepl/zrepl/internal/daemon/logging/trace"
|
|
|
|
"github.com/zrepl/zrepl/internal/util/chainlock"
|
2020-05-10 15:06:44 +02:00
|
|
|
)
|
|
|
|
|
2020-06-27 23:53:33 +02:00
|
|
|
var abstractionsCacheMetrics struct {
|
2020-05-10 15:06:44 +02:00
|
|
|
count prometheus.Gauge
|
|
|
|
}
|
|
|
|
|
|
|
|
func init() {
|
2020-06-27 23:53:33 +02:00
|
|
|
abstractionsCacheMetrics.count = prometheus.NewGauge(prometheus.GaugeOpts{
|
2020-05-10 15:06:44 +02:00
|
|
|
Namespace: "zrepl",
|
|
|
|
Subsystem: "endpoint",
|
2020-06-27 23:53:33 +02:00
|
|
|
Name: "abstractions_cache_entry_count",
|
|
|
|
Help: "number of abstractions tracked in the abstractionsCache data structure",
|
2020-05-10 15:06:44 +02:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-06-27 23:53:33 +02:00
|
|
|
var abstractionsCacheSingleton = newAbstractionsCache()
|
2020-05-10 15:06:44 +02:00
|
|
|
|
2020-06-27 23:53:33 +02:00
|
|
|
func AbstractionsCacheInvalidate(fs string) {
|
|
|
|
abstractionsCacheSingleton.InvalidateFSCache(fs)
|
2020-05-20 13:10:37 +02:00
|
|
|
}
|
|
|
|
|
2020-06-27 23:53:33 +02:00
|
|
|
type abstractionsCacheDidLoadFSState int
|
2020-05-10 15:06:44 +02:00
|
|
|
|
|
|
|
const (
|
2020-06-27 23:53:33 +02:00
|
|
|
abstractionsCacheDidLoadFSStateNo abstractionsCacheDidLoadFSState = iota // 0-value has meaning
|
|
|
|
abstractionsCacheDidLoadFSStateInProgress
|
|
|
|
abstractionsCacheDidLoadFSStateDone
|
2020-05-10 15:06:44 +02:00
|
|
|
)
|
|
|
|
|
2020-06-27 23:53:33 +02:00
|
|
|
type abstractionsCache struct {
|
2020-05-10 15:06:44 +02:00
|
|
|
mtx chainlock.L
|
|
|
|
abstractions []Abstraction
|
2020-06-27 23:53:33 +02:00
|
|
|
didLoadFS map[string]abstractionsCacheDidLoadFSState
|
2020-05-10 15:06:44 +02:00
|
|
|
didLoadFSChanged *sync.Cond
|
|
|
|
}
|
|
|
|
|
2020-06-27 23:53:33 +02:00
|
|
|
func newAbstractionsCache() *abstractionsCache {
|
|
|
|
c := &abstractionsCache{
|
|
|
|
didLoadFS: make(map[string]abstractionsCacheDidLoadFSState),
|
2020-05-10 15:06:44 +02:00
|
|
|
}
|
|
|
|
c.didLoadFSChanged = c.mtx.NewCond()
|
|
|
|
return c
|
|
|
|
}
|
|
|
|
|
2020-06-27 23:53:33 +02:00
|
|
|
func (s *abstractionsCache) Put(a Abstraction) {
|
2020-05-10 15:06:44 +02:00
|
|
|
defer s.mtx.Lock().Unlock()
|
|
|
|
|
|
|
|
var zeroJobId JobID
|
|
|
|
if a.GetJobID() == nil {
|
|
|
|
panic("abstraction must not have nil job id")
|
|
|
|
} else if *a.GetJobID() == zeroJobId {
|
|
|
|
panic(fmt.Sprintf("abstraction must not have zero-value job id: %s", a))
|
|
|
|
}
|
|
|
|
|
|
|
|
s.abstractions = append(s.abstractions, a)
|
2020-06-27 23:53:33 +02:00
|
|
|
abstractionsCacheMetrics.count.Set(float64(len(s.abstractions)))
|
2020-05-10 15:06:44 +02:00
|
|
|
}
|
|
|
|
|
2020-06-27 23:53:33 +02:00
|
|
|
func (s *abstractionsCache) InvalidateFSCache(fs string) {
|
2020-05-10 15:06:44 +02:00
|
|
|
// FIXME: O(n)
|
|
|
|
newAbs := make([]Abstraction, 0, len(s.abstractions))
|
|
|
|
for _, a := range s.abstractions {
|
|
|
|
if a.GetFS() != fs {
|
|
|
|
newAbs = append(newAbs, a)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.abstractions = newAbs
|
2020-06-27 23:53:33 +02:00
|
|
|
abstractionsCacheMetrics.count.Set(float64(len(s.abstractions)))
|
2020-05-10 15:06:44 +02:00
|
|
|
|
2020-06-27 23:53:33 +02:00
|
|
|
s.didLoadFS[fs] = abstractionsCacheDidLoadFSStateNo
|
2020-05-10 15:06:44 +02:00
|
|
|
s.didLoadFSChanged.Broadcast()
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
// - logs errors in getting on-disk abstractions
|
|
|
|
// - only fetches on-disk abstractions once, but every time from the in-memory store
|
|
|
|
//
|
|
|
|
// That means that for precise results, all abstractions created by the endpoint must be .Put into this cache.
|
2020-06-27 23:53:33 +02:00
|
|
|
func (s *abstractionsCache) GetAndDeleteByJobIDAndFS(ctx context.Context, jobID JobID, fs string, types AbstractionTypeSet, keep func(a Abstraction) bool) (ret []Abstraction) {
|
2020-05-10 15:06:44 +02:00
|
|
|
defer s.mtx.Lock().Unlock()
|
|
|
|
defer trace.WithSpanFromStackUpdateCtx(&ctx)()
|
|
|
|
var zeroJobId JobID
|
|
|
|
if jobID == zeroJobId {
|
|
|
|
panic("must not pass zero-value job id")
|
|
|
|
}
|
|
|
|
if fs == "" {
|
|
|
|
panic("must not pass zero-value fs")
|
|
|
|
}
|
|
|
|
|
2020-06-27 23:53:33 +02:00
|
|
|
s.tryLoadOnDiskAbstractions(ctx, fs)
|
2020-05-10 15:06:44 +02:00
|
|
|
|
|
|
|
// FIXME O(n)
|
|
|
|
var remaining []Abstraction
|
|
|
|
for _, a := range s.abstractions {
|
|
|
|
aJobId := *a.GetJobID()
|
|
|
|
aFS := a.GetFS()
|
2020-06-27 23:53:33 +02:00
|
|
|
if aJobId == jobID && aFS == fs && types[a.GetType()] && !keep(a) {
|
2020-05-10 15:06:44 +02:00
|
|
|
ret = append(ret, a)
|
|
|
|
} else {
|
|
|
|
remaining = append(remaining, a)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.abstractions = remaining
|
2020-06-27 23:53:33 +02:00
|
|
|
abstractionsCacheMetrics.count.Set(float64(len(s.abstractions)))
|
2020-05-10 15:06:44 +02:00
|
|
|
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
|
|
|
// caller must hold s.mtx
|
2020-06-27 23:53:33 +02:00
|
|
|
func (s *abstractionsCache) tryLoadOnDiskAbstractions(ctx context.Context, fs string) {
|
|
|
|
for s.didLoadFS[fs] != abstractionsCacheDidLoadFSStateDone {
|
|
|
|
if s.didLoadFS[fs] == abstractionsCacheDidLoadFSStateInProgress {
|
2020-05-10 15:06:44 +02:00
|
|
|
s.didLoadFSChanged.Wait()
|
|
|
|
continue
|
|
|
|
}
|
2020-06-27 23:53:33 +02:00
|
|
|
if s.didLoadFS[fs] != abstractionsCacheDidLoadFSStateNo {
|
2020-05-10 15:06:44 +02:00
|
|
|
panic(fmt.Sprintf("unreachable: %v", s.didLoadFS[fs]))
|
|
|
|
}
|
|
|
|
|
2020-06-27 23:53:33 +02:00
|
|
|
s.didLoadFS[fs] = abstractionsCacheDidLoadFSStateInProgress
|
2020-05-10 15:06:44 +02:00
|
|
|
defer s.didLoadFSChanged.Broadcast()
|
|
|
|
|
|
|
|
var onDiskAbs []Abstraction
|
|
|
|
var err error
|
|
|
|
s.mtx.DropWhile(func() {
|
2020-06-27 23:53:33 +02:00
|
|
|
onDiskAbs, err = s.tryLoadOnDiskAbstractionsImpl(ctx, fs) // no shadow
|
2020-05-10 15:06:44 +02:00
|
|
|
})
|
|
|
|
|
|
|
|
if err != nil {
|
2020-06-27 23:53:33 +02:00
|
|
|
s.didLoadFS[fs] = abstractionsCacheDidLoadFSStateNo
|
|
|
|
getLogger(ctx).WithField("fs", fs).WithError(err).Error("cannot list abstractions for filesystem")
|
2020-05-10 15:06:44 +02:00
|
|
|
} else {
|
2020-06-27 23:53:33 +02:00
|
|
|
s.didLoadFS[fs] = abstractionsCacheDidLoadFSStateDone
|
2020-05-10 15:06:44 +02:00
|
|
|
s.abstractions = append(s.abstractions, onDiskAbs...)
|
|
|
|
getLogger(ctx).WithField("fs", fs).WithField("abstractions", onDiskAbs).Debug("loaded step abstractions for filesystem")
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// caller should _not hold s.mtx
|
2020-06-27 23:53:33 +02:00
|
|
|
func (s *abstractionsCache) tryLoadOnDiskAbstractionsImpl(ctx context.Context, fs string) ([]Abstraction, error) {
|
2020-05-10 15:06:44 +02:00
|
|
|
defer trace.WithSpanFromStackUpdateCtx(&ctx)()
|
|
|
|
|
|
|
|
q := ListZFSHoldsAndBookmarksQuery{
|
|
|
|
FS: ListZFSHoldsAndBookmarksQueryFilesystemFilter{
|
|
|
|
FS: &fs,
|
|
|
|
},
|
|
|
|
JobID: nil,
|
|
|
|
What: AbstractionTypeSet{
|
2020-06-27 23:53:33 +02:00
|
|
|
AbstractionStepHold: true,
|
|
|
|
AbstractionTentativeReplicationCursorBookmark: true,
|
|
|
|
AbstractionReplicationCursorBookmarkV2: true,
|
|
|
|
AbstractionLastReceivedHold: true,
|
2020-05-10 15:06:44 +02:00
|
|
|
},
|
|
|
|
Concurrency: 1,
|
|
|
|
}
|
|
|
|
abs, absErrs, err := ListAbstractions(ctx, q)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
// safe to ignore absErrs here, this is best-effort cleanup
|
|
|
|
if len(absErrs) > 0 {
|
|
|
|
return nil, ListAbstractionsErrors(absErrs)
|
|
|
|
}
|
|
|
|
return abs, nil
|
|
|
|
}
|
|
|
|
|
2020-06-27 23:53:33 +02:00
|
|
|
func (s *abstractionsCache) TryBatchDestroy(ctx context.Context, jobId JobID, fs string, types AbstractionTypeSet, keep func(a Abstraction) bool, check func(willDestroy []Abstraction)) {
|
2020-05-10 15:06:44 +02:00
|
|
|
// no s.mtx, we only use the public interface in this function
|
|
|
|
|
|
|
|
defer trace.WithSpanFromStackUpdateCtx(&ctx)()
|
|
|
|
|
2020-06-27 23:53:33 +02:00
|
|
|
obsoleteAbs := s.GetAndDeleteByJobIDAndFS(ctx, jobId, fs, types, keep)
|
2020-05-10 15:06:44 +02:00
|
|
|
|
|
|
|
if check != nil {
|
|
|
|
check(obsoleteAbs)
|
|
|
|
}
|
|
|
|
|
|
|
|
hadErr := false
|
|
|
|
for res := range BatchDestroy(ctx, obsoleteAbs) {
|
|
|
|
if res.DestroyErr != nil {
|
|
|
|
hadErr = true
|
|
|
|
getLogger(ctx).
|
|
|
|
WithField("abstraction", res.Abstraction).
|
|
|
|
WithError(res.DestroyErr).
|
2020-06-27 23:53:33 +02:00
|
|
|
Error("cannot destroy abstraction")
|
2020-05-10 15:06:44 +02:00
|
|
|
} else {
|
|
|
|
getLogger(ctx).
|
|
|
|
WithField("abstraction", res.Abstraction).
|
2020-06-27 23:53:33 +02:00
|
|
|
Info("destroyed abstraction")
|
2020-05-10 15:06:44 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if hadErr {
|
|
|
|
s.InvalidateFSCache(fs)
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|