mirror of
https://github.com/zrepl/zrepl.git
synced 2024-12-23 15:38:49 +01:00
10a14a8c50
package trace: - introduce the concept of tasks and spans, tracked as linked list within ctx - see package-level docs for an overview of the concepts - **main feature 1**: unique stack of task and span IDs - makes it easy to follow a series of log entries in concurrent code - **main feature 2**: ability to produce a chrome://tracing-compatible trace file - either via an env variable or a `zrepl pprof` subcommand - this is not a CPU profile, we already have go pprof for that - but it is very useful to visually inspect where the replication / snapshotter / pruner spends its time ( fixes #307 ) usage in package daemon/logging: - goal: every log entry should have a trace field with the ID stack from package trace - make `logging.GetLogger(ctx, Subsys)` the authoritative `logger.Logger` factory function - the context carries a linked list of injected fields which `logging.GetLogger` adds to the logger it returns - `logging.GetLogger` also uses package `trace` to get the task-and-span-stack and injects it into the returned logger's fields
213 lines
5.3 KiB
Go
213 lines
5.3 KiB
Go
package client
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"fmt"
|
|
"os"
|
|
|
|
"github.com/pkg/errors"
|
|
"github.com/spf13/pflag"
|
|
|
|
"github.com/zrepl/zrepl/cli"
|
|
"github.com/zrepl/zrepl/config"
|
|
"github.com/zrepl/zrepl/daemon/filters"
|
|
"github.com/zrepl/zrepl/zfs"
|
|
)
|
|
|
|
var TestCmd = &cli.Subcommand{
|
|
Use: "test",
|
|
SetupSubcommands: func() []*cli.Subcommand {
|
|
return []*cli.Subcommand{testFilter, testPlaceholder, testDecodeResumeToken}
|
|
},
|
|
}
|
|
|
|
var testFilterArgs struct {
|
|
job string
|
|
all bool
|
|
input string
|
|
}
|
|
|
|
var testFilter = &cli.Subcommand{
|
|
Use: "filesystems --job JOB [--all | --input INPUT]",
|
|
Short: "test filesystems filter specified in push or source job",
|
|
SetupFlags: func(f *pflag.FlagSet) {
|
|
f.StringVar(&testFilterArgs.job, "job", "", "the name of the push or source job")
|
|
f.StringVar(&testFilterArgs.input, "input", "", "a filesystem name to test against the job's filters")
|
|
f.BoolVar(&testFilterArgs.all, "all", false, "test all local filesystems")
|
|
},
|
|
Run: runTestFilterCmd,
|
|
}
|
|
|
|
func runTestFilterCmd(ctx context.Context, subcommand *cli.Subcommand, args []string) error {
|
|
|
|
if testFilterArgs.job == "" {
|
|
return fmt.Errorf("must specify --job flag")
|
|
}
|
|
if !(testFilterArgs.all != (testFilterArgs.input != "")) { // xor
|
|
return fmt.Errorf("must set one: --all or --input")
|
|
}
|
|
|
|
conf := subcommand.Config()
|
|
|
|
var confFilter config.FilesystemsFilter
|
|
job, err := conf.Job(testFilterArgs.job)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
switch j := job.Ret.(type) {
|
|
case *config.SourceJob:
|
|
confFilter = j.Filesystems
|
|
case *config.PushJob:
|
|
confFilter = j.Filesystems
|
|
case *config.SnapJob:
|
|
confFilter = j.Filesystems
|
|
default:
|
|
return fmt.Errorf("job type %T does not have filesystems filter", j)
|
|
}
|
|
|
|
f, err := filters.DatasetMapFilterFromConfig(confFilter)
|
|
if err != nil {
|
|
return fmt.Errorf("filter invalid: %s", err)
|
|
}
|
|
|
|
var fsnames []string
|
|
if testFilterArgs.input != "" {
|
|
fsnames = []string{testFilterArgs.input}
|
|
} else {
|
|
out, err := zfs.ZFSList(ctx, []string{"name"})
|
|
if err != nil {
|
|
return fmt.Errorf("could not list ZFS filesystems: %s", err)
|
|
}
|
|
for _, row := range out {
|
|
|
|
fsnames = append(fsnames, row[0])
|
|
}
|
|
}
|
|
|
|
fspaths := make([]*zfs.DatasetPath, len(fsnames))
|
|
for i, fsname := range fsnames {
|
|
path, err := zfs.NewDatasetPath(fsname)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
fspaths[i] = path
|
|
}
|
|
|
|
hadFilterErr := false
|
|
for _, in := range fspaths {
|
|
var res string
|
|
var errStr string
|
|
pass, err := f.Filter(in)
|
|
if err != nil {
|
|
res = "ERROR"
|
|
errStr = err.Error()
|
|
hadFilterErr = true
|
|
} else if pass {
|
|
res = "ACCEPT"
|
|
} else {
|
|
res = "REJECT"
|
|
}
|
|
fmt.Printf("%s\t%s\t%s\n", res, in.ToString(), errStr)
|
|
}
|
|
|
|
if hadFilterErr {
|
|
return fmt.Errorf("filter errors occurred")
|
|
}
|
|
return nil
|
|
}
|
|
|
|
var testPlaceholderArgs struct {
|
|
ds string
|
|
all bool
|
|
}
|
|
|
|
var testPlaceholder = &cli.Subcommand{
|
|
Use: "placeholder [--all | --dataset DATASET]",
|
|
Short: fmt.Sprintf("list received placeholder filesystems (zfs property %q)", zfs.PlaceholderPropertyName),
|
|
Example: `
|
|
placeholder --all
|
|
placeholder --dataset path/to/sink/clientident/fs`,
|
|
NoRequireConfig: true,
|
|
SetupFlags: func(f *pflag.FlagSet) {
|
|
f.StringVar(&testPlaceholderArgs.ds, "dataset", "", "dataset path (not required to exist)")
|
|
f.BoolVar(&testPlaceholderArgs.all, "all", false, "list tab-separated placeholder status of all filesystems")
|
|
},
|
|
Run: runTestPlaceholder,
|
|
}
|
|
|
|
func runTestPlaceholder(ctx context.Context, subcommand *cli.Subcommand, args []string) error {
|
|
|
|
var checkDPs []*zfs.DatasetPath
|
|
|
|
// all actions first
|
|
if testPlaceholderArgs.all {
|
|
out, err := zfs.ZFSList(ctx, []string{"name"})
|
|
if err != nil {
|
|
return errors.Wrap(err, "could not list ZFS filesystems")
|
|
}
|
|
for _, row := range out {
|
|
dp, err := zfs.NewDatasetPath(row[0])
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
checkDPs = append(checkDPs, dp)
|
|
}
|
|
} else {
|
|
dp, err := zfs.NewDatasetPath(testPlaceholderArgs.ds)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if dp.Empty() {
|
|
return fmt.Errorf("must specify --dataset DATASET or --all")
|
|
}
|
|
checkDPs = append(checkDPs, dp)
|
|
}
|
|
|
|
fmt.Printf("IS_PLACEHOLDER\tDATASET\tzrepl:placeholder\n")
|
|
for _, dp := range checkDPs {
|
|
ph, err := zfs.ZFSGetFilesystemPlaceholderState(ctx, dp)
|
|
if err != nil {
|
|
return errors.Wrap(err, "cannot get placeholder state")
|
|
}
|
|
if !ph.FSExists {
|
|
panic("placeholder state inconsistent: filesystem " + ph.FS + " must exist in this context")
|
|
}
|
|
is := "yes"
|
|
if !ph.IsPlaceholder {
|
|
is = "no"
|
|
}
|
|
fmt.Printf("%s\t%s\t%s\n", is, dp.ToString(), ph.RawLocalPropertyValue)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
var testDecodeResumeTokenArgs struct {
|
|
token string
|
|
}
|
|
|
|
var testDecodeResumeToken = &cli.Subcommand{
|
|
Use: "decoderesumetoken --token TOKEN",
|
|
Short: "decode resume token",
|
|
SetupFlags: func(f *pflag.FlagSet) {
|
|
f.StringVar(&testDecodeResumeTokenArgs.token, "token", "", "the resume token obtained from the receive_resume_token property")
|
|
},
|
|
Run: runTestDecodeResumeTokenCmd,
|
|
}
|
|
|
|
func runTestDecodeResumeTokenCmd(ctx context.Context, subcommand *cli.Subcommand, args []string) error {
|
|
if testDecodeResumeTokenArgs.token == "" {
|
|
return fmt.Errorf("token argument must be specified")
|
|
}
|
|
token, err := zfs.ParseResumeToken(ctx, testDecodeResumeTokenArgs.token)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
enc := json.NewEncoder(os.Stdout)
|
|
enc.SetIndent("", " ")
|
|
if err := enc.Encode(&token); err != nil {
|
|
panic(err)
|
|
}
|
|
return nil
|
|
}
|