cmd: configurable logrus formatters

We lost the nice context-stack [jobname][taskname][...] at the beginning
of each log line when switching to logrus.

Define some field names that define these contexts.
Write a human-friendly formatter that presents these field names like
the solution we had before logrus.

Write some other formatters for logfmt and json output along the way.

Limit ourselves to stdout logging for now.
This commit is contained in:
Christian Schwarz 2017-09-23 11:24:36 +02:00
parent 3ff9e6d2f7
commit 9465b593f9
10 changed files with 187 additions and 101 deletions

View File

@ -45,7 +45,7 @@ func (a *IntervalAutosnap) Run(ctx context.Context, didSnaps chan struct{}) {
a.log.Debug("examine filesystem state")
for i, d := range ds {
l := a.log.WithField("filesystem", d.ToString())
l := a.log.WithField(logFSField, d.ToString())
fsvs, err := zfs.ZFSListFilesystemVersions(d, &PrefixSnapshotFilter{a.Prefix})
if err != nil {
@ -129,7 +129,7 @@ func (a *IntervalAutosnap) doSnapshots(didSnaps chan struct{}) {
suffix := time.Now().In(time.UTC).Format("20060102_150405_000")
snapname := fmt.Sprintf("%s%s", a.Prefix, suffix)
a.log.WithField("filesystem", d.ToString()).
a.log.WithField(logFSField, d.ToString()).
WithField("snapname", snapname).
Info("create snapshot")

View File

@ -90,7 +90,7 @@ func (j *LocalJob) JobStart(ctx context.Context) {
// All local datasets will be passed to its Map() function,
// but only those for which a mapping exists will actually be pulled.
// We can pay this small performance penalty for now.
handler := NewHandler(log.WithField("task", "handler"), localPullACL{}, &PrefixSnapshotFilter{j.SnapshotPrefix})
handler := NewHandler(log.WithField(logTaskField, "handler"), localPullACL{}, &PrefixSnapshotFilter{j.SnapshotPrefix})
registerEndpoints(local, handler)
@ -112,7 +112,7 @@ func (j *LocalJob) JobStart(ctx context.Context) {
}
makeCtx := func(parent context.Context, taskName string) (ctx context.Context) {
return context.WithValue(parent, contextKeyLog, log.WithField("task", taskName))
return context.WithValue(parent, contextKeyLog, log.WithField(logTaskField, taskName))
}
var snapCtx, plCtx, prCtx, pullCtx context.Context
snapCtx = makeCtx(ctx, "autosnap")

View File

@ -119,7 +119,7 @@ start:
log.Printf("starting pull")
pullLog := log.WithField("task", "pull")
pullLog := log.WithField(logTaskField, "pull")
err = doPull(PullContext{client, pullLog, j.Mapping, j.InitialReplPolicy})
if err != nil {
log.Printf("error doing pull: %s", err)
@ -128,7 +128,7 @@ start:
closeRPCWithTimeout(log, client, time.Second*10, "")
log.Printf("starting prune")
prunectx := context.WithValue(ctx, contextKeyLog, log.WithField("task", "prune"))
prunectx := context.WithValue(ctx, contextKeyLog, log.WithField(logTaskField, "prune"))
pruner, err := j.Pruner(PrunePolicySideDefault, false)
if err != nil {
log.Printf("error creating pruner: %s", err)

View File

@ -84,9 +84,9 @@ func (j *SourceJob) JobStart(ctx context.Context) {
return
}
snapContext := context.WithValue(ctx, contextKeyLog, log.WithField("task", "autosnap"))
prunerContext := context.WithValue(ctx, contextKeyLog, log.WithField("task", "prune"))
serveContext := context.WithValue(ctx, contextKeyLog, log.WithField("task", "serve"))
snapContext := context.WithValue(ctx, contextKeyLog, log.WithField(logTaskField, "autosnap"))
prunerContext := context.WithValue(ctx, contextKeyLog, log.WithField(logTaskField, "prune"))
serveContext := context.WithValue(ctx, contextKeyLog, log.WithField(logTaskField, "serve"))
didSnaps := make(chan struct{})
go j.serve(serveContext)

View File

@ -3,75 +3,101 @@ package cmd
import (
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
//"github.com/rifflock/lfshook"
"github.com/sirupsen/logrus"
"os"
)
type LoggingConfig struct {
Stdout struct {
Level logrus.Level
Level logrus.Level
Format LogFormat
}
//LFS lfshook.PathMap
}
func parseLogging(i interface{}) (c *LoggingConfig, err error) {
c = &LoggingConfig{}
c.Stdout.Level = logrus.WarnLevel
c.Stdout.Format = LogFormatHuman
if i == nil {
return c, nil
}
var asMap struct {
Mate string
Stdout map[string]string
LFS map[string]string
Stdout struct {
Level string
Format string
}
}
if err = mapstructure.Decode(i, &asMap); err != nil {
return nil, errors.Wrap(err, "mapstructure error")
}
//if asMap.LFS != nil {
// c.LFS = make(map[logrus.Level]string, len(asMap.LFS))
// for level_str, path := range asMap.LFS {
// level, err := logrus.ParseLevel(level_str)
// if err != nil {
// return nil, errors.Wrapf(err, "cannot parse level '%s'", level_str)
// }
// if len(path) <= 0 {
// return nil, errors.Errorf("path must be longer than 0")
// }
// c.LFS[level] = path
// }
//}
if asMap.Stdout != nil {
lvl, err := logrus.ParseLevel(asMap.Stdout["level"])
if asMap.Stdout.Level != "" {
lvl, err := logrus.ParseLevel(asMap.Stdout.Level)
if err != nil {
return nil, errors.Wrap(err, "cannot parse stdout log level")
}
c.Stdout.Level = lvl
}
if asMap.Stdout.Format != "" {
format, err := parseLogFormat(asMap.Stdout.Format)
if err != nil {
return nil, errors.Wrap(err, "cannot parse log format")
}
c.Stdout.Format = format
}
return c, nil
}
type LogFormat string
const (
LogFormatHuman LogFormat = "human"
LogFormatLogfmt LogFormat = "logfmt"
LogFormatJSON LogFormat = "json"
)
func (f LogFormat) Formatter() logrus.Formatter {
switch f {
case LogFormatHuman:
return HumanFormatter{}
case LogFormatLogfmt:
return &logrus.TextFormatter{}
case LogFormatJSON:
return &logrus.JSONFormatter{}
default:
panic("incomplete implementation")
}
}
var LogFormats []LogFormat = []LogFormat{LogFormatHuman, LogFormatLogfmt, LogFormatJSON}
func parseLogFormat(i interface{}) (f LogFormat, err error) {
var is string
switch j := i.(type) {
case string:
is = j
default:
return "", errors.Errorf("invalid log format: wrong type: %T", i)
}
for _, f := range LogFormats {
if string(f) == is {
return f, nil
}
}
return "", errors.Errorf("invalid log format: '%s'", is)
}
func (c *LoggingConfig) MakeLogrus() (l logrus.FieldLogger) {
log := logrus.New()
log.Out = nopWriter(0)
log.Level = logrus.DebugLevel
//log.Level = logrus.DebugLevel
//
//if len(c.LFS) > 0 {
// lfshook := lfshook.NewHook(c.LFS)
// log.Hooks.Add(lfshook)
//}
stdhook := NewStdHook()
log.Hooks.Add(stdhook)
log.Out = os.Stdout
log.Level = c.Stdout.Level
log.Formatter = c.Stdout.Format.Formatter()
return log

View File

@ -73,7 +73,7 @@ func (d *Daemon) Loop(ctx context.Context) {
for _, job := range d.conf.Jobs {
log.Printf("starting job %s", job.JobName())
logger := log.WithField("job", job.JobName())
logger := log.WithField(logJobField, job.JobName())
i++
jobCtx := context.WithValue(ctx, contextKeyLog, logger)
go func(j Job) {

View File

@ -2,52 +2,123 @@ package cmd
import (
"bytes"
"encoding/json"
"fmt"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"os"
"strings"
"time"
)
type CLIFormatter struct {
const (
logJobField string = "job"
logTaskField string = "task"
logFSField string = "filesystem"
logMapFromField string = "map_from"
logMapToField string = "map_to"
logIncFromField string = "inc_from"
logIncToField string = "inc_to"
)
type NoFormatter struct{}
func (f NoFormatter) Format(e *logrus.Entry) ([]byte, error) {
return []byte(e.Message), nil
}
func (f CLIFormatter) Format(e *logrus.Entry) (out []byte, err error) {
var buf bytes.Buffer
fmt.Fprintf(&buf, "%s\n", e.Message)
return buf.Bytes(), nil
}
type HumanFormatter struct{}
var stdhookStderrLevels []logrus.Level = []logrus.Level{
logrus.PanicLevel, logrus.FatalLevel, logrus.ErrorLevel, logrus.WarnLevel,
}
type Stdhook struct {
}
func NewStdHook() *Stdhook {
return &Stdhook{}
}
func (h *Stdhook) Levels() []logrus.Level {
// Accept all so we can filter the output later
return []logrus.Level{
logrus.PanicLevel, logrus.FatalLevel, logrus.ErrorLevel, logrus.WarnLevel,
logrus.InfoLevel, logrus.DebugLevel,
func (f HumanFormatter) shortLevel(l logrus.Level) string {
switch l {
case logrus.DebugLevel:
return "DBG"
case logrus.InfoLevel:
return "INF"
case logrus.WarnLevel:
return "WRN"
case logrus.ErrorLevel:
return "ERR"
case logrus.PanicLevel:
return "PNC"
}
panic("incomplete implementation")
}
func (h *Stdhook) Fire(entry *logrus.Entry) error {
s, err := entry.String()
if err != nil {
return err
}
for _, l := range stdhookStderrLevels {
if l == entry.Level {
fmt.Fprint(os.Stderr, s)
return nil
func (f HumanFormatter) Format(e *logrus.Entry) (out []byte, err error) {
var line bytes.Buffer
fmt.Fprintf(&line, "[%s]", f.shortLevel(e.Level))
prefixFields := []string{logJobField, logTaskField, logFSField}
prefixed := make(map[string]bool, len(prefixFields)+2)
for _, field := range prefixFields {
val, ok := e.Data[field].(string)
if ok {
fmt.Fprintf(&line, "[%s]", val)
prefixed[field] = true
} else {
break
}
}
fmt.Fprint(os.Stdout, s)
return nil
// even more prefix fields
mapFrom, mapFromOk := e.Data[logMapFromField].(string)
mapTo, mapToOk := e.Data[logMapToField].(string)
if mapFromOk && mapToOk {
fmt.Fprintf(&line, "[%s => %s]", mapFrom, mapTo)
prefixed[logMapFromField], prefixed[logMapToField] = true, true
}
incFrom, incFromOk := e.Data[logIncFromField].(string)
incTo, incToOk := e.Data[logIncToField].(string)
if incFromOk && incToOk {
fmt.Fprintf(&line, "[%s => %s]", incFrom, incTo)
prefixed[logIncFromField], prefixed[logIncToField] = true, true
}
fmt.Fprintf(&line, ": %s", e.Message)
for field, value := range e.Data {
if prefixed[field] {
continue
}
if strings.ContainsAny(field, " \t") {
return nil, errors.Errorf("field must not contain whitespace: '%s'", field)
}
fmt.Fprintf(&line, " %s=\"%s\"", field, value)
}
fmt.Fprintf(&line, "\n")
return line.Bytes(), nil
}
type JSONFormatter struct{}
func (f JSONFormatter) Format(e *logrus.Entry) ([]byte, error) {
data := make(logrus.Fields, len(e.Data)+3)
for k, v := range e.Data {
switch v := v.(type) {
case error:
// Otherwise errors are ignored by `encoding/json`
// https://github.com/sirupsen/logrus/issues/137
data[k] = v.Error()
default:
_, err := json.Marshal(v)
if err != nil {
return nil, errors.Errorf("field is not JSON encodable: %s", k)
}
data[k] = v
}
}
data["msg"] = e.Message
data["time"] = e.Time.Format(time.RFC3339)
data["level"] = e.Level
return json.Marshal(data)
}
type nopWriter int

View File

@ -2,7 +2,6 @@ package cmd
import (
"context"
"encoding/json"
"fmt"
"github.com/zrepl/zrepl/zfs"
"time"
@ -45,7 +44,7 @@ func (p *Pruner) Run(ctx context.Context) (r []PruneResult, err error) {
for _, fs := range filesystems {
log := log.WithField("filesystem", fs.ToString())
log := log.WithField(logFSField, fs.ToString())
fsversions, err := zfs.ZFSListFilesystemVersions(fs, &PrefixSnapshotFilter{p.SnapshotPrefix})
if err != nil {
@ -57,26 +56,16 @@ func (p *Pruner) Run(ctx context.Context) (r []PruneResult, err error) {
continue
}
dbgj, err := json.Marshal(fsversions)
if err != nil {
panic(err)
}
log.WithField("fsversions", string(dbgj)).Debug()
keep, remove, err := p.PrunePolicy.Prune(fs, fsversions)
if err != nil {
log.WithError(err).Error("error evaluating prune policy")
continue
}
dbgj, err = json.Marshal(keep)
if err != nil {
panic(err)
}
log.WithField("keep", string(dbgj)).Debug()
dbgj, err = json.Marshal(remove)
log.WithField("remove", string(dbgj)).Debug()
log.WithField("fsversions", fsversions).
WithField("keep", keep).
WithField("remove", remove).
Debug("prune policy debug dump")
r = append(r, PruneResult{fs, fsversions, keep, remove})

View File

@ -90,8 +90,8 @@ func doPull(pull PullContext) (err error) {
if localFs == nil {
continue
}
log.WithField("map_remote", remoteFilesystems[fs].ToString()).
WithField("map_local", localFs.ToString()).Debug()
log.WithField(logMapFromField, remoteFilesystems[fs].ToString()).
WithField(logMapToField, localFs.ToString()).Debug()
m := RemoteLocalMapping{remoteFilesystems[fs], localFs}
replMapping[m.Local.ToString()] = m
localTraversal.Add(m.Local)
@ -107,7 +107,7 @@ func doPull(pull PullContext) (err error) {
log.Info("start per-filesystem sync")
localTraversal.WalkTopDown(func(v zfs.DatasetPathVisit) bool {
log := log.WithField("filesystem", v.Path.ToString())
log := log.WithField(logFSField, v.Path.ToString())
if v.FilledIn {
if _, exists := localFilesystemState[v.Path.ToString()]; exists {
@ -129,8 +129,8 @@ func doPull(pull PullContext) (err error) {
panic("internal inconsistency: replMapping should contain mapping for any path that was not filled in by WalkTopDown()")
}
log = log.WithField("map_remote", m.Remote.ToString()).
WithField("map_local", m.Local.ToString())
log = log.WithField(logMapToField, m.Remote.ToString()).
WithField(logMapFromField, m.Local.ToString())
log.Debug("examing local filesystem state")
localState, localExists := localFilesystemState[m.Local.ToString()]
@ -245,7 +245,7 @@ func doPull(pull PullContext) (err error) {
from, to := diff.IncrementalPath[i], diff.IncrementalPath[i+1]
log, _ := log.WithField("inc_from", from.Name).WithField("inc_to", to.Name), 0
log, _ := log.WithField(logIncFromField, from.Name).WithField(logIncToField, to.Name), 0
log.Debug("requesting incremental snapshot stream")
r := IncrementalTransferRequest{

View File

@ -65,7 +65,7 @@ func init() {
func testCmdGlobalInit(cmd *cobra.Command, args []string) {
log := logrus.New()
log.Formatter = CLIFormatter{}
log.Formatter = NoFormatter{}
testCmdGlobal.log = log
var err error