zrepl/daemon/job/job.go
Christian Schwarz 10a14a8c50 [#307] add package trace, integrate it with logging, and adopt it throughout zrepl
package trace:

- introduce the concept of tasks and spans, tracked as linked list within ctx
    - see package-level docs for an overview of the concepts
    - **main feature 1**: unique stack of task and span IDs
        - makes it easy to follow a series of log entries in concurrent code
    - **main feature 2**: ability to produce a chrome://tracing-compatible trace file
        - either via an env variable or a `zrepl pprof` subcommand
        - this is not a CPU profile, we already have go pprof for that
        - but it is very useful to visually inspect where the
          replication / snapshotter / pruner spends its time
          ( fixes #307 )

usage in package daemon/logging:

- goal: every log entry should have a trace field with the ID stack from package trace

- make `logging.GetLogger(ctx, Subsys)` the authoritative `logger.Logger` factory function
    - the context carries a linked list of injected fields which
      `logging.GetLogger` adds to the logger it returns
    - `logging.GetLogger` also uses package `trace` to get the
      task-and-span-stack and injects it into the returned logger's fields
2020-05-19 11:30:02 +02:00

109 lines
2.2 KiB
Go

package job
import (
"context"
"encoding/json"
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/zrepl/zrepl/daemon/logging"
"github.com/zrepl/zrepl/endpoint"
"github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/zfs"
)
type Logger = logger.Logger
func GetLogger(ctx context.Context) Logger {
return logging.GetLogger(ctx, logging.SubsysJob)
}
type Job interface {
Name() string
Run(ctx context.Context)
Status() *Status
RegisterMetrics(registerer prometheus.Registerer)
// Jobs that return a subtree of the dataset hierarchy
// must return the root of that subtree as rfs and ok = true
OwnedDatasetSubtreeRoot() (rfs *zfs.DatasetPath, ok bool)
SenderConfig() *endpoint.SenderConfig
}
type Type string
const (
TypeInternal Type = "internal"
TypeSnap Type = "snap"
TypePush Type = "push"
TypeSink Type = "sink"
TypePull Type = "pull"
TypeSource Type = "source"
)
type Status struct {
Type Type
JobSpecific interface{}
}
func (s *Status) MarshalJSON() ([]byte, error) {
typeJson, err := json.Marshal(s.Type)
if err != nil {
return nil, err
}
jobJSON, err := json.Marshal(s.JobSpecific)
if err != nil {
return nil, err
}
m := map[string]json.RawMessage{
"type": typeJson,
string(s.Type): jobJSON,
}
return json.Marshal(m)
}
func (s *Status) UnmarshalJSON(in []byte) (err error) {
var m map[string]json.RawMessage
if err := json.Unmarshal(in, &m); err != nil {
return err
}
tJSON, ok := m["type"]
if !ok {
return fmt.Errorf("field 'type' not found")
}
if err := json.Unmarshal(tJSON, &s.Type); err != nil {
return err
}
key := string(s.Type)
jobJSON, ok := m[key]
if !ok {
return fmt.Errorf("field '%s', not found", key)
}
switch s.Type {
case TypeSnap:
var st SnapJobStatus
err = json.Unmarshal(jobJSON, &st)
s.JobSpecific = &st
case TypePull:
fallthrough
case TypePush:
var st ActiveSideStatus
err = json.Unmarshal(jobJSON, &st)
s.JobSpecific = &st
case TypeSource:
fallthrough
case TypeSink:
var st PassiveStatus
err = json.Unmarshal(jobJSON, &st)
s.JobSpecific = &st
case TypeInternal:
// internal jobs do not report specifics
default:
err = fmt.Errorf("unknown job type '%s'", key)
}
return err
}