format source tree using goimports

This commit is contained in:
Christian Schwarz 2019-03-22 19:41:12 +01:00
parent 5324f29693
commit afed762774
93 changed files with 585 additions and 463 deletions

View File

@ -30,6 +30,9 @@ generate: #not part of the build, must do that manually
protoc -I=replication/logic/pdu --go_out=plugins=grpc:replication/logic/pdu replication/logic/pdu/pdu.proto
go generate -x ./...
format:
goimports -srcdir . -local 'github.com/zrepl/zrepl' -w $(shell find . -type f -name '*.go' -not -path "./vendor/*")
build:
@echo "INFO: In case of missing dependencies, run 'make vendordeps'"
$(GO_BUILD) -o "$(ARTIFACTDIR)/zrepl"

View File

@ -11,7 +11,9 @@
package main
import (
"fmt"
_ "fmt"
_ "github.com/alvaroloes/enumer"
_ "github.com/golang/protobuf/protoc-gen-go"
_ "golang.org/x/tools/cmd/stringer"

View File

@ -2,10 +2,12 @@ package cli
import (
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/zrepl/zrepl/config"
"os"
)
var rootArgs struct {
@ -40,15 +42,15 @@ func init() {
}
type Subcommand struct {
Use string
Short string
Example string
NoRequireConfig bool
Run func(subcommand *Subcommand, args []string) error
SetupFlags func(f *pflag.FlagSet)
SetupSubcommands func() []*Subcommand
Use string
Short string
Example string
NoRequireConfig bool
Run func(subcommand *Subcommand, args []string) error
SetupFlags func(f *pflag.FlagSet)
SetupSubcommands func() []*Subcommand
config *config.Config
config *config.Config
configErr error
}
@ -93,8 +95,8 @@ func AddSubcommand(s *Subcommand) {
func addSubcommandToCobraCmd(c *cobra.Command, s *Subcommand) {
cmd := cobra.Command{
Use: s.Use,
Short: s.Short,
Use: s.Use,
Short: s.Short,
Example: s.Example,
}
if s.SetupSubcommands == nil {
@ -110,7 +112,6 @@ func addSubcommandToCobraCmd(c *cobra.Command, s *Subcommand) {
c.AddCommand(&cmd)
}
func Run() {
if err := rootCmd.Execute(); err != nil {
os.Exit(1)

View File

@ -3,33 +3,35 @@ package client
import (
"encoding/json"
"fmt"
"os"
"github.com/kr/pretty"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"github.com/zrepl/yaml-config"
"github.com/zrepl/zrepl/cli"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon/job"
"github.com/zrepl/zrepl/daemon/logging"
"github.com/zrepl/zrepl/logger"
"os"
)
var configcheckArgs struct {
format string
what string
what string
}
var ConfigcheckCmd = &cli.Subcommand{
Use: "configcheck",
Use: "configcheck",
Short: "check if config can be parsed without errors",
SetupFlags: func(f *pflag.FlagSet) {
f.StringVar(&configcheckArgs.format, "format", "", "dump parsed config object [pretty|yaml|json]")
f.StringVar(&configcheckArgs.what, "what", "all", "what to print [all|config|jobs|logging]")
},
Run: func(subcommand *cli.Subcommand, args []string) error {
formatMap := map[string]func(interface{}) {
"": func(i interface{}) {},
formatMap := map[string]func(interface{}){
"": func(i interface{}) {},
"pretty": func(i interface{}) { pretty.Println(i) },
"json": func(i interface{}) {
json.NewEncoder(os.Stdout).Encode(subcommand.Config())
@ -71,12 +73,11 @@ var ConfigcheckCmd = &cli.Subcommand{
}
}
whatMap := map[string]func() {
whatMap := map[string]func(){
"all": func() {
o := struct {
config *config.Config
jobs []job.Job
config *config.Config
jobs []job.Job
logging *logger.Outlets
}{
subcommand.Config(),
@ -109,4 +110,3 @@ var ConfigcheckCmd = &cli.Subcommand{
}
},
}

View File

@ -4,10 +4,11 @@ import (
"bytes"
"context"
"encoding/json"
"github.com/pkg/errors"
"io"
"net"
"net/http"
"github.com/pkg/errors"
)
func controlHttpClient(sockpath string) (client http.Client, err error) {

View File

@ -6,6 +6,7 @@ import (
"github.com/pkg/errors"
"github.com/spf13/pflag"
"github.com/zrepl/zrepl/zfs"
"github.com/zrepl/zrepl/cli"

View File

@ -2,11 +2,12 @@ package client
import (
"errors"
"log"
"os"
"github.com/zrepl/zrepl/cli"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon"
"log"
"os"
)
var pprofArgs struct {

View File

@ -2,6 +2,7 @@ package client
import (
"github.com/pkg/errors"
"github.com/zrepl/zrepl/cli"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon"
@ -28,10 +29,10 @@ func runSignalCmd(config *config.Config, args []string) error {
err = jsonRequestResponse(httpc, daemon.ControlJobEndpointSignal,
struct {
Name string
Op string
Op string
}{
Name: args[1],
Op: args[0],
Op: args[0],
},
struct{}{},
)

View File

@ -2,15 +2,6 @@ package client
import (
"fmt"
"github.com/gdamore/tcell/termbox"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"github.com/zrepl/yaml-config"
"github.com/zrepl/zrepl/cli"
"github.com/zrepl/zrepl/daemon"
"github.com/zrepl/zrepl/daemon/job"
"github.com/zrepl/zrepl/daemon/pruner"
"github.com/zrepl/zrepl/replication/report"
"io"
"math"
"net/http"
@ -19,18 +10,29 @@ import (
"strings"
"sync"
"time"
"github.com/gdamore/tcell/termbox"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"github.com/zrepl/yaml-config"
"github.com/zrepl/zrepl/cli"
"github.com/zrepl/zrepl/daemon"
"github.com/zrepl/zrepl/daemon/job"
"github.com/zrepl/zrepl/daemon/pruner"
"github.com/zrepl/zrepl/replication/report"
)
type byteProgressMeasurement struct {
time time.Time
val int64
val int64
}
type bytesProgressHistory struct {
last *byteProgressMeasurement // pointer as poor man's optional
last *byteProgressMeasurement // pointer as poor man's optional
changeCount int
lastChange time.Time
bpsAvg float64
lastChange time.Time
bpsAvg float64
}
func (p *bytesProgressHistory) Update(currentVal int64) (bytesPerSecondAvg int64, changeCount int) {
@ -38,7 +40,7 @@ func (p *bytesProgressHistory) Update(currentVal int64) (bytesPerSecondAvg int64
if p.last == nil {
p.last = &byteProgressMeasurement{
time: time.Now(),
val: currentVal,
val: currentVal,
}
return 0, 0
}
@ -48,18 +50,17 @@ func (p *bytesProgressHistory) Update(currentVal int64) (bytesPerSecondAvg int64
p.lastChange = time.Now()
}
if time.Now().Sub(p.lastChange) > 3 * time.Second {
if time.Now().Sub(p.lastChange) > 3*time.Second {
p.last = nil
return 0, 0
}
deltaV := currentVal - p.last.val;
deltaV := currentVal - p.last.val
deltaT := time.Now().Sub(p.last.time)
rate := float64(deltaV) / deltaT.Seconds()
factor := 0.3
p.bpsAvg = (1-factor) * p.bpsAvg + factor * rate
p.bpsAvg = (1-factor)*p.bpsAvg + factor*rate
p.last.time = time.Now()
p.last.val = currentVal
@ -119,7 +120,7 @@ func wrap(s string, width int) string {
rem = len(s)
}
if idx := strings.IndexAny(s, "\n\r"); idx != -1 && idx < rem {
rem = idx+1
rem = idx + 1
}
untilNewline := strings.TrimRight(s[:rem], "\n\r")
s = s[rem:]
@ -135,12 +136,12 @@ func wrap(s string, width int) string {
func (t *tui) printfDrawIndentedAndWrappedIfMultiline(format string, a ...interface{}) {
whole := fmt.Sprintf(format, a...)
width, _ := termbox.Size()
if !strings.ContainsAny(whole, "\n\r") && t.x + len(whole) <= width {
if !strings.ContainsAny(whole, "\n\r") && t.x+len(whole) <= width {
t.printf(format, a...)
} else {
t.addIndent(1)
t.newline()
t.write(wrap(whole, width - INDENT_MULTIPLIER*t.indent))
t.write(wrap(whole, width-INDENT_MULTIPLIER*t.indent))
t.addIndent(-1)
}
}
@ -159,7 +160,6 @@ func (t *tui) addIndent(indent int) {
t.moveLine(0, 0)
}
var statusFlags struct {
Raw bool
}
@ -180,7 +180,7 @@ func runStatus(s *cli.Subcommand, args []string) error {
}
if statusFlags.Raw {
resp, err := httpc.Get("http://unix"+daemon.ControlJobEndpointStatus)
resp, err := httpc.Get("http://unix" + daemon.ControlJobEndpointStatus)
if err != nil {
return err
}
@ -390,7 +390,7 @@ func (t *tui) renderReplicationReport(rep *report.Report, history *bytesProgress
t.newline()
t.addIndent(1)
for i, a := range rep.Attempts[:len(rep.Attempts)-1] {
t.printfDrawIndentedAndWrappedIfMultiline("#%d: %s (failed at %s) (ran %s)", i + 1, a.State, a.FinishAt, a.FinishAt.Sub(a.StartAt))
t.printfDrawIndentedAndWrappedIfMultiline("#%d: %s (failed at %s) (ran %s)", i+1, a.State, a.FinishAt, a.FinishAt.Sub(a.StartAt))
t.newline()
}
t.addIndent(-1)
@ -462,7 +462,7 @@ func (t *tui) renderPrunerReport(r *pruner.Report) {
*pruner.FSReport
completed bool
}
all := make([]commonFS, 0, len(r.Pending) + len(r.Completed))
all := make([]commonFS, 0, len(r.Pending)+len(r.Completed))
for i := range r.Pending {
all = append(all, commonFS{&r.Pending[i], false})
}
@ -471,7 +471,8 @@ func (t *tui) renderPrunerReport(r *pruner.Report) {
}
switch state {
case pruner.Plan: fallthrough
case pruner.Plan:
fallthrough
case pruner.PlanErr:
return
}
@ -499,7 +500,7 @@ func (t *tui) renderPrunerReport(r *pruner.Report) {
t.write("[")
t.write(times("=", progress))
t.write(">")
t.write(times("-", 80 - progress))
t.write(times("-", 80-progress))
t.write("]")
t.printf(" %d/%d snapshots", completedDestroyCount, totalDestroyCount)
t.newline()
@ -519,9 +520,9 @@ func (t *tui) renderPrunerReport(r *pruner.Report) {
if fs.LastError != "" {
if strings.ContainsAny(fs.LastError, "\r\n") {
t.printf("ERROR:")
t.printfDrawIndentedAndWrappedIfMultiline("%s\n", fs.LastError)
t.printfDrawIndentedAndWrappedIfMultiline("%s\n", fs.LastError)
} else {
t.printfDrawIndentedAndWrappedIfMultiline("ERROR: %s\n", fs.LastError)
t.printfDrawIndentedAndWrappedIfMultiline("ERROR: %s\n", fs.LastError)
}
t.newline()
continue
@ -531,7 +532,7 @@ func (t *tui) renderPrunerReport(r *pruner.Report) {
len(fs.DestroyList), len(fs.SnapshotList))
if fs.completed {
t.printf( "Completed %s\n", pruneRuleActionStr)
t.printf("Completed %s\n", pruneRuleActionStr)
continue
}
@ -560,7 +561,6 @@ func rightPad(str string, length int, pad string) string {
return str + times(pad, length-len(str))
}
func leftPad(str string, length int, pad string) string {
if len(str) > length {
return str[len(str)-length:]
@ -584,7 +584,7 @@ func (t *tui) drawBar(length int, bytes, totalBytes int64, changeCount int) {
t.write("[")
t.write(times("=", completedLength))
t.write( string(arrowPositions[changeCount%len(arrowPositions)]))
t.write(string(arrowPositions[changeCount%len(arrowPositions)]))
t.write(times("-", length-completedLength))
t.write("]")
}

View File

@ -1,13 +1,15 @@
package client
import (
"github.com/zrepl/zrepl/cli"
"os"
"github.com/problame/go-netssh"
"github.com/zrepl/zrepl/cli"
"github.com/zrepl/zrepl/config"
"context"
"errors"
"github.com/problame/go-netssh"
"github.com/zrepl/zrepl/config"
"log"
"path"
)

View File

@ -2,15 +2,17 @@ package client
import (
"fmt"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"github.com/zrepl/zrepl/cli"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon/filters"
"github.com/zrepl/zrepl/zfs"
)
var TestCmd = &cli.Subcommand {
var TestCmd = &cli.Subcommand{
Use: "test",
SetupSubcommands: func() []*cli.Subcommand {
return []*cli.Subcommand{testFilter, testPlaceholder}
@ -18,13 +20,13 @@ var TestCmd = &cli.Subcommand {
}
var testFilterArgs struct {
job string
all bool
job string
all bool
input string
}
var testFilter = &cli.Subcommand{
Use: "filesystems --job JOB [--all | --input INPUT]",
Use: "filesystems --job JOB [--all | --input INPUT]",
Short: "test filesystems filter specified in push or source job",
SetupFlags: func(f *pflag.FlagSet) {
f.StringVar(&testFilterArgs.job, "job", "", "the name of the push or source job")
@ -51,8 +53,10 @@ func runTestFilterCmd(subcommand *cli.Subcommand, args []string) error {
return err
}
switch j := job.Ret.(type) {
case *config.SourceJob: confFilter = j.Filesystems
case *config.PushJob: confFilter = j.Filesystems
case *config.SourceJob:
confFilter = j.Filesystems
case *config.PushJob:
confFilter = j.Filesystems
default:
return fmt.Errorf("job type %T does not have filesystems filter", j)
}

View File

@ -2,23 +2,25 @@ package client
import (
"fmt"
"os"
"github.com/spf13/pflag"
"github.com/zrepl/zrepl/cli"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon"
"github.com/zrepl/zrepl/version"
"os"
)
var versionArgs struct {
Show string
Config *config.Config
Show string
Config *config.Config
ConfigErr error
}
var VersionCmd = &cli.Subcommand{
Use: "version",
Short: "print version of zrepl binary and running daemon",
Use: "version",
Short: "print version of zrepl binary and running daemon",
NoRequireConfig: true,
SetupFlags: func(f *pflag.FlagSet) {
f.StringVar(&versionArgs.Show, "show", "", "version info to show (client|daemon)")

View File

@ -2,8 +2,6 @@ package config
import (
"fmt"
"github.com/pkg/errors"
"github.com/zrepl/yaml-config"
"io/ioutil"
"log/syslog"
"os"
@ -11,6 +9,9 @@ import (
"regexp"
"strconv"
"time"
"github.com/pkg/errors"
"github.com/zrepl/yaml-config"
)
type Config struct {
@ -34,11 +35,16 @@ type JobEnum struct {
func (j JobEnum) Name() string {
var name string
switch v := j.Ret.(type) {
case *SnapJob: name = v.Name
case *PushJob: name = v.Name
case *SinkJob: name = v.Name
case *PullJob: name = v.Name
case *SourceJob: name = v.Name
case *SnapJob:
name = v.Name
case *PushJob:
name = v.Name
case *SinkJob:
name = v.Name
case *PullJob:
name = v.Name
case *SourceJob:
name = v.Name
default:
panic(fmt.Sprintf("unknown job type %T", v))
}
@ -46,38 +52,38 @@ func (j JobEnum) Name() string {
}
type ActiveJob struct {
Type string `yaml:"type"`
Name string `yaml:"name"`
Connect ConnectEnum `yaml:"connect"`
Pruning PruningSenderReceiver `yaml:"pruning"`
Debug JobDebugSettings `yaml:"debug,optional"`
Type string `yaml:"type"`
Name string `yaml:"name"`
Connect ConnectEnum `yaml:"connect"`
Pruning PruningSenderReceiver `yaml:"pruning"`
Debug JobDebugSettings `yaml:"debug,optional"`
}
type PassiveJob struct {
Type string `yaml:"type"`
Name string `yaml:"name"`
Serve ServeEnum `yaml:"serve"`
Debug JobDebugSettings `yaml:"debug,optional"`
Type string `yaml:"type"`
Name string `yaml:"name"`
Serve ServeEnum `yaml:"serve"`
Debug JobDebugSettings `yaml:"debug,optional"`
}
type SnapJob struct {
Type string `yaml:"type"`
Name string `yaml:"name"`
Pruning PruningLocal `yaml:"pruning"`
Debug JobDebugSettings `yaml:"debug,optional"`
Snapshotting SnapshottingEnum `yaml:"snapshotting"`
Filesystems FilesystemsFilter `yaml:"filesystems"`
Type string `yaml:"type"`
Name string `yaml:"name"`
Pruning PruningLocal `yaml:"pruning"`
Debug JobDebugSettings `yaml:"debug,optional"`
Snapshotting SnapshottingEnum `yaml:"snapshotting"`
Filesystems FilesystemsFilter `yaml:"filesystems"`
}
type PushJob struct {
ActiveJob `yaml:",inline"`
Snapshotting SnapshottingEnum `yaml:"snapshotting"`
Filesystems FilesystemsFilter `yaml:"filesystems"`
ActiveJob `yaml:",inline"`
Snapshotting SnapshottingEnum `yaml:"snapshotting"`
Filesystems FilesystemsFilter `yaml:"filesystems"`
}
type PullJob struct {
ActiveJob `yaml:",inline"`
RootFS string `yaml:"root_fs"`
RootFS string `yaml:"root_fs"`
Interval PositiveDurationOrManual `yaml:"interval"`
}
@ -118,9 +124,9 @@ type SinkJob struct {
}
type SourceJob struct {
PassiveJob `yaml:",inline"`
Snapshotting SnapshottingEnum `yaml:"snapshotting"`
Filesystems FilesystemsFilter `yaml:"filesystems"`
PassiveJob `yaml:",inline"`
Snapshotting SnapshottingEnum `yaml:"snapshotting"`
Filesystems FilesystemsFilter `yaml:"filesystems"`
}
type FilesystemsFilter map[string]bool
@ -130,8 +136,8 @@ type SnapshottingEnum struct {
}
type SnapshottingPeriodic struct {
Type string `yaml:"type"`
Prefix string `yaml:"prefix"`
Type string `yaml:"type"`
Prefix string `yaml:"prefix"`
Interval time.Duration `yaml:"interval,positive"`
}
@ -191,7 +197,7 @@ type ConnectEnum struct {
}
type ConnectCommon struct {
Type string `yaml:"type"`
Type string `yaml:"type"`
}
type TCPConnect struct {
@ -223,8 +229,8 @@ type SSHStdinserverConnect struct {
}
type LocalConnect struct {
ConnectCommon `yaml:",inline"`
ListenerName string `yaml:"listener_name"`
ConnectCommon `yaml:",inline"`
ListenerName string `yaml:"listener_name"`
ClientIdentity string `yaml:"client_identity"`
}
@ -233,7 +239,7 @@ type ServeEnum struct {
}
type ServeCommon struct {
Type string `yaml:"type"`
Type string `yaml:"type"`
}
type TCPServe struct {
@ -253,12 +259,12 @@ type TLSServe struct {
}
type StdinserverServer struct {
ServeCommon `yaml:",inline"`
ServeCommon `yaml:",inline"`
ClientIdentities []string `yaml:"client_identities"`
}
type LocalServe struct {
ServeCommon `yaml:",inline"`
ServeCommon `yaml:",inline"`
ListenerName string `yaml:"listener_name"`
}
@ -267,8 +273,8 @@ type PruningEnum struct {
}
type PruneKeepNotReplicated struct {
Type string `yaml:"type"`
KeepSnapshotAtCursor bool `yaml:"keep_snapshot_at_cursor,optional,default=true"`
Type string `yaml:"type"`
KeepSnapshotAtCursor bool `yaml:"keep_snapshot_at_cursor,optional,default=true"`
}
type PruneKeepLastN struct {
@ -277,8 +283,8 @@ type PruneKeepLastN struct {
}
type PruneKeepRegex struct { // FIXME rename to KeepRegex
Type string `yaml:"type"`
Regex string `yaml:"regex"`
Type string `yaml:"type"`
Regex string `yaml:"regex"`
Negate bool `yaml:"negate,optional,default=false"`
}
@ -301,7 +307,7 @@ type StdoutLoggingOutlet struct {
type SyslogLoggingOutlet struct {
LoggingOutletCommon `yaml:",inline"`
Facility *SyslogFacility `yaml:"facility,optional,fromdefaults"`
RetryInterval time.Duration `yaml:"retry_interval,positive,default=10s"`
RetryInterval time.Duration `yaml:"retry_interval,positive,default=10s"`
}
type TCPLoggingOutlet struct {
@ -392,7 +398,7 @@ func (t *ConnectEnum) UnmarshalYAML(u func(interface{}, bool) error) (err error)
"tcp": &TCPConnect{},
"tls": &TLSConnect{},
"ssh+stdinserver": &SSHStdinserverConnect{},
"local": &LocalConnect{},
"local": &LocalConnect{},
})
return
}
@ -402,7 +408,7 @@ func (t *ServeEnum) UnmarshalYAML(u func(interface{}, bool) error) (err error) {
"tcp": &TCPServe{},
"tls": &TLSServe{},
"stdinserver": &StdinserverServer{},
"local" : &LocalServe{},
"local": &LocalServe{},
})
return
}
@ -420,7 +426,7 @@ func (t *PruningEnum) UnmarshalYAML(u func(interface{}, bool) error) (err error)
func (t *SnapshottingEnum) UnmarshalYAML(u func(interface{}, bool) error) (err error) {
t.Ret, err = enumUnmarshal(u, map[string]interface{}{
"periodic": &SnapshottingPeriodic{},
"manual": &SnapshottingManual{},
"manual": &SnapshottingManual{},
})
return
}
@ -448,31 +454,51 @@ func (t *SyslogFacility) UnmarshalYAML(u func(interface{}, bool) error) (err err
}
var level syslog.Priority
switch s {
case "kern": level = syslog.LOG_KERN
case "user": level = syslog.LOG_USER
case "mail": level = syslog.LOG_MAIL
case "daemon": level = syslog.LOG_DAEMON
case "auth": level = syslog.LOG_AUTH
case "syslog": level = syslog.LOG_SYSLOG
case "lpr": level = syslog.LOG_LPR
case "news": level = syslog.LOG_NEWS
case "uucp": level = syslog.LOG_UUCP
case "cron": level = syslog.LOG_CRON
case "authpriv": level = syslog.LOG_AUTHPRIV
case "ftp": level = syslog.LOG_FTP
case "local0": level = syslog.LOG_LOCAL0
case "local1": level = syslog.LOG_LOCAL1
case "local2": level = syslog.LOG_LOCAL2
case "local3": level = syslog.LOG_LOCAL3
case "local4": level = syslog.LOG_LOCAL4
case "local5": level = syslog.LOG_LOCAL5
case "local6": level = syslog.LOG_LOCAL6
case "local7": level = syslog.LOG_LOCAL7
case "kern":
level = syslog.LOG_KERN
case "user":
level = syslog.LOG_USER
case "mail":
level = syslog.LOG_MAIL
case "daemon":
level = syslog.LOG_DAEMON
case "auth":
level = syslog.LOG_AUTH
case "syslog":
level = syslog.LOG_SYSLOG
case "lpr":
level = syslog.LOG_LPR
case "news":
level = syslog.LOG_NEWS
case "uucp":
level = syslog.LOG_UUCP
case "cron":
level = syslog.LOG_CRON
case "authpriv":
level = syslog.LOG_AUTHPRIV
case "ftp":
level = syslog.LOG_FTP
case "local0":
level = syslog.LOG_LOCAL0
case "local1":
level = syslog.LOG_LOCAL1
case "local2":
level = syslog.LOG_LOCAL2
case "local3":
level = syslog.LOG_LOCAL3
case "local4":
level = syslog.LOG_LOCAL4
case "local5":
level = syslog.LOG_LOCAL5
case "local6":
level = syslog.LOG_LOCAL6
case "local7":
level = syslog.LOG_LOCAL7
default:
return fmt.Errorf("invalid syslog level: %q", s)
}
*t = SyslogFacility(level)
return nil
return nil
}
var ConfigFileDefaultLocations = []string{

View File

@ -2,11 +2,12 @@ package config
import (
"fmt"
"log/syslog"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zrepl/yaml-config"
"log/syslog"
"testing"
)
func testValidGlobalSection(t *testing.T, s string) *Config {
@ -24,7 +25,7 @@ jobs:
`
_, err := ParseConfigBytes([]byte(jobdef))
require.NoError(t, err)
return testValidConfig(t, s + jobdef)
return testValidConfig(t, s+jobdef)
}
func TestOutletTypes(t *testing.T) {
@ -71,7 +72,7 @@ global:
- type: prometheus
listen: ':9091'
`)
assert.Equal(t, ":9091", conf.Global.Monitoring[0].Ret.(*PrometheusMonitoring).Listen)
assert.Equal(t, ":9091", conf.Global.Monitoring[0].Ret.(*PrometheusMonitoring).Listen)
}
func TestSyslogLoggingOutletFacility(t *testing.T) {

View File

@ -2,6 +2,7 @@ package config
import (
"testing"
"github.com/stretchr/testify/assert"
)
@ -36,4 +37,4 @@ jobs:
- type: last_n
count: 1
`)
}
}

View File

@ -2,9 +2,10 @@ package config
import (
"fmt"
"github.com/stretchr/testify/assert"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestSnapshotting(t *testing.T) {
@ -37,7 +38,7 @@ jobs:
interval: 10m
`
fillSnapshotting := func(s string) string {return fmt.Sprintf(tmpl, s)}
fillSnapshotting := func(s string) string { return fmt.Sprintf(tmpl, s) }
var c *Config
t.Run("manual", func(t *testing.T) {
@ -51,7 +52,7 @@ jobs:
snp := c.Jobs[0].Ret.(*PushJob).Snapshotting.Ret.(*SnapshottingPeriodic)
assert.Equal(t, "periodic", snp.Type)
assert.Equal(t, 10*time.Minute, snp.Interval)
assert.Equal(t, "zrepl_" , snp.Prefix)
assert.Equal(t, "zrepl_", snp.Prefix)
})
}

View File

@ -11,9 +11,9 @@ import (
type RetentionIntervalList []RetentionInterval
type PruneGrid struct {
Type string `yaml:"type"`
Grid RetentionIntervalList `yaml:"grid"`
Regex string `yaml:"regex"`
Type string `yaml:"type"`
Grid RetentionIntervalList `yaml:"grid"`
Regex string `yaml:"regex"`
}
type RetentionInterval struct {

View File

@ -12,6 +12,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/zrepl/zrepl/daemon/job"
"github.com/zrepl/zrepl/daemon/nethelpers"
"github.com/zrepl/zrepl/logger"
@ -43,24 +44,24 @@ func (j *controlJob) Status() *job.Status { return &job.Status{Type: job.TypeInt
func (j *controlJob) OwnedDatasetSubtreeRoot() (p *zfs.DatasetPath, ok bool) { return nil, false }
var promControl struct {
requestBegin *prometheus.CounterVec
requestBegin *prometheus.CounterVec
requestFinished *prometheus.HistogramVec
}
func (j *controlJob) RegisterMetrics(registerer prometheus.Registerer) {
promControl.requestBegin = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "zrepl",
Subsystem: "control",
Name: "request_begin",
Help: "number of request we started to handle",
Namespace: "zrepl",
Subsystem: "control",
Name: "request_begin",
Help: "number of request we started to handle",
}, []string{"endpoint"})
promControl.requestFinished = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "zrepl",
Subsystem: "control",
Name: "request_finished",
Help: "time it took a request to finih",
Buckets: []float64{1e-6, 10e-6, 100e-6, 500e-6, 1e-3,10e-3, 100e-3, 200e-3,400e-3,800e-3, 1, 10, 20},
Namespace: "zrepl",
Subsystem: "control",
Name: "request_finished",
Help: "time it took a request to finih",
Buckets: []float64{1e-6, 10e-6, 100e-6, 500e-6, 1e-3, 10e-3, 100e-3, 200e-3, 400e-3, 800e-3, 1, 10, 20},
}, []string{"endpoint"})
registerer.MustRegister(promControl.requestBegin)
registerer.MustRegister(promControl.requestFinished)
@ -114,7 +115,7 @@ func (j *controlJob) Run(ctx context.Context) {
requestLogger{log: log, handler: jsonRequestResponder{func(decoder jsonDecoder) (interface{}, error) {
type reqT struct {
Name string
Op string
Op string
}
var req reqT
if decoder(&req) != nil {
@ -136,8 +137,8 @@ func (j *controlJob) Run(ctx context.Context) {
server := http.Server{
Handler: mux,
// control socket is local, 1s timeout should be more than sufficient, even on a loaded system
WriteTimeout: 1*time.Second,
ReadTimeout: 1*time.Second,
WriteTimeout: 1 * time.Second,
ReadTimeout: 1 * time.Second,
}
outer:

View File

@ -3,8 +3,16 @@ package daemon
import (
"context"
"fmt"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"time"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon/job"
"github.com/zrepl/zrepl/daemon/job/reset"
@ -12,12 +20,6 @@ import (
"github.com/zrepl/zrepl/daemon/logging"
"github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/version"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"time"
)
func Run(conf *config.Config) error {
@ -74,12 +76,11 @@ func Run(conf *config.Config) error {
return errors.Errorf("unknown monitoring job #%d (type %T)", i, v)
}
if err != nil {
return errors.Wrapf(err,"cannot build monitorin gjob #%d", i)
return errors.Wrapf(err, "cannot build monitorin gjob #%d", i)
}
jobs.start(ctx, job, true)
}
log.Info("starting daemon")
// start regular jobs
@ -103,7 +104,7 @@ type jobs struct {
// m protects all fields below it
m sync.RWMutex
wakeups map[string]wakeup.Func // by Job.Name
resets map[string]reset.Func // by Job.Name
resets map[string]reset.Func // by Job.Name
jobs map[string]job.Job
}

View File

@ -2,10 +2,12 @@ package filters
import (
"fmt"
"strings"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/endpoint"
"github.com/zrepl/zrepl/zfs"
"strings"
)
type DatasetMapFilter struct {

View File

@ -1,8 +1,9 @@
package filters
import (
"github.com/zrepl/zrepl/zfs"
"strings"
"github.com/zrepl/zrepl/zfs"
)
type AnyFSVFilter struct{}
@ -17,7 +18,6 @@ func (AnyFSVFilter) Filter(t zfs.VersionType, name string) (accept bool, err err
return true, nil
}
type PrefixFilter struct {
prefix string
fstype zfs.VersionType

View File

@ -6,6 +6,7 @@ import (
"strings"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/config"
)

View File

@ -6,6 +6,7 @@ import (
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/zfs"
)
@ -29,7 +30,6 @@ func WithLogger(ctx context.Context, l Logger) context.Context {
return context.WithValue(ctx, contextKeyLog, l)
}
type Job interface {
Name() string
Run(ctx context.Context)
@ -44,15 +44,15 @@ type Type string
const (
TypeInternal Type = "internal"
TypeSnap Type = "snap"
TypePush Type = "push"
TypeSink Type = "sink"
TypePull Type = "pull"
TypeSource Type = "source"
TypeSnap Type = "snap"
TypePush Type = "push"
TypeSink Type = "sink"
TypePull Type = "pull"
TypeSource Type = "source"
)
type Status struct {
Type Type
Type Type
JobSpecific interface{}
}
@ -65,8 +65,8 @@ func (s *Status) MarshalJSON() ([]byte, error) {
if err != nil {
return nil, err
}
m := map[string]json.RawMessage {
"type": typeJson,
m := map[string]json.RawMessage{
"type": typeJson,
string(s.Type): jobJSON,
}
return json.Marshal(m)
@ -94,12 +94,14 @@ func (s *Status) UnmarshalJSON(in []byte) (err error) {
var st SnapJobStatus
err = json.Unmarshal(jobJSON, &st)
s.JobSpecific = &st
case TypePull: fallthrough
case TypePull:
fallthrough
case TypePush:
var st ActiveSideStatus
err = json.Unmarshal(jobJSON, &st)
s.JobSpecific = &st
case TypeSource: fallthrough
case TypeSource:
fallthrough
case TypeSink:
var st PassiveStatus
err = json.Unmarshal(jobJSON, &st)

View File

@ -7,6 +7,7 @@ import (
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon/filters"
"github.com/zrepl/zrepl/daemon/job/wakeup"

View File

@ -4,11 +4,13 @@ import (
"bytes"
"encoding/json"
"fmt"
"time"
"github.com/fatih/color"
"github.com/go-logfmt/logfmt"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/logger"
"time"
)
const (

View File

@ -4,12 +4,14 @@ import (
"bytes"
"context"
"crypto/tls"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/logger"
"io"
"log/syslog"
"net"
"time"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/logger"
)
type EntryFormatter interface {

View File

@ -7,7 +7,7 @@ import (
type Logger = logger.Logger
var DaemonCmd = &cli.Subcommand {
var DaemonCmd = &cli.Subcommand{
Use: "daemon",
Short: "run the zrepl daemon",
Run: func(subcommand *cli.Subcommand, args []string) error {

View File

@ -1,10 +1,11 @@
package nethelpers
import (
"github.com/pkg/errors"
"net"
"os"
"path/filepath"
"github.com/pkg/errors"
)
func PreparePrivateSockpath(sockpath string) error {

View File

@ -2,15 +2,17 @@ package daemon
import (
"context"
"net"
"net/http"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon/job"
"github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/rpc/dataconn/frameconn"
"github.com/zrepl/zrepl/zfs"
"net"
"net/http"
)
type prometheusJob struct {
@ -25,7 +27,7 @@ func newPrometheusJobFromConfig(in *config.PrometheusMonitoring) (*prometheusJob
}
var prom struct {
taskLogEntries *prometheus.CounterVec
taskLogEntries *prometheus.CounterVec
}
func init() {
@ -93,4 +95,3 @@ func (o prometheusJobOutlet) WriteEntry(entry logger.Entry) error {
prom.taskLogEntries.WithLabelValues(o.jobName, entry.Level.String()).Inc()
return nil
}

View File

@ -3,17 +3,19 @@ package pruner
import (
"context"
"fmt"
"sort"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/pruning"
"github.com/zrepl/zrepl/replication/logic/pdu"
"github.com/zrepl/zrepl/util/envconst"
"sort"
"strings"
"sync"
"time"
)
// Try to keep it compatible with gitub.com/zrepl/zrepl/endpoint.Endpoint
@ -53,7 +55,7 @@ type args struct {
rules []pruning.KeepRule
retryWait time.Duration
considerSnapAtCursorReplicated bool
promPruneSecs prometheus.Observer
promPruneSecs prometheus.Observer
}
type Pruner struct {
@ -64,7 +66,7 @@ type Pruner struct {
state State
// State PlanErr
err error
err error
// State Exec
execQueue *execQueue
@ -75,7 +77,7 @@ type PrunerFactory struct {
receiverRules []pruning.KeepRule
retryWait time.Duration
considerSnapAtCursorReplicated bool
promPruneSecs *prometheus.HistogramVec
promPruneSecs *prometheus.HistogramVec
}
type LocalPrunerFactory struct {
@ -137,11 +139,11 @@ func NewPrunerFactory(in config.PruningSenderReceiver, promPruneSecs *prometheus
considerSnapAtCursorReplicated = considerSnapAtCursorReplicated || !knr.KeepSnapshotAtCursor
}
f := &PrunerFactory{
senderRules: keepRulesSender,
receiverRules: keepRulesReceiver,
retryWait: envconst.Duration("ZREPL_PRUNER_RETRY_INTERVAL", 10 * time.Second),
senderRules: keepRulesSender,
receiverRules: keepRulesReceiver,
retryWait: envconst.Duration("ZREPL_PRUNER_RETRY_INTERVAL", 10*time.Second),
considerSnapAtCursorReplicated: considerSnapAtCursorReplicated,
promPruneSecs: promPruneSecs,
promPruneSecs: promPruneSecs,
}
return f, nil
}
@ -213,17 +215,17 @@ func (p *Pruner) Prune() {
func (p *Pruner) prune(args args) {
u := func(f func(*Pruner)) {
p.mtx.Lock()
defer p.mtx.Unlock()
f(p)
}
p.mtx.Lock()
defer p.mtx.Unlock()
f(p)
}
// TODO support automatic retries
// It is advisable to merge this code with package replication/driver before
// That will likely require re-modelling struct fs like replication/driver.attempt,
// including figuring out how to resume a plan after being interrupted by network errors
// The non-retrying code in this package should move straight to replication/logic.
doOneAttempt(&args, u)
}
}
type Report struct {
State string
@ -239,9 +241,9 @@ type FSReport struct {
}
type SnapshotReport struct {
Name string
Name string
Replicated bool
Date time.Time
Date time.Time
}
func (p *Pruner) Report() *Report {
@ -250,9 +252,9 @@ func (p *Pruner) Report() *Report {
r := Report{State: p.state.String()}
if p.err != nil {
r.Error = p.err.Error()
}
if p.err != nil {
r.Error = p.err.Error()
}
if p.execQueue != nil {
r.Pending, r.Completed = p.execQueue.Report()
@ -268,7 +270,7 @@ func (p *Pruner) State() State {
}
type fs struct {
path string
path string
// permanent error during planning
planErr error
@ -316,7 +318,7 @@ func (f *fs) Report() FSReport {
if f.planErr != nil {
r.LastError = f.planErr.Error()
} else if f.execErrLast != nil {
} else if f.execErrLast != nil {
r.LastError = f.execErrLast.Error()
}
@ -326,7 +328,7 @@ func (f *fs) Report() FSReport {
}
r.DestroyList = make([]SnapshotReport, len(f.destroyList))
for i, snap := range f.destroyList{
for i, snap := range f.destroyList {
r.DestroyList[i] = snap.(snapshot).Report()
}
@ -490,9 +492,9 @@ tfss_loop:
})
for {
var pfs *fs
var pfs *fs
u(func(pruner *Pruner) {
pfs = pruner.execQueue.Pop()
pfs = pruner.execQueue.Pop()
})
if pfs == nil {
break
@ -516,16 +518,15 @@ tfss_loop:
hadErr := false
for _, fsr := range rep.Completed {
hadErr = hadErr || fsr.SkipReason.NotSkipped() && fsr.LastError != ""
}
}
if hadErr {
p.state = ExecErr
} else {
p.state = Done
}
})
}
}
// attempts to exec pfs, puts it back into the queue with the result
func doOneAttemptExec(a *args, u updater, pfs *fs) {
@ -558,20 +559,20 @@ func doOneAttemptExec(a *args, u updater, pfs *fs) {
err = nil
destroyFails := make([]*pdu.DestroySnapshotRes, 0)
for _, reqDestroy := range destroyList {
res, ok := destroyResults[reqDestroy.Name]
if !ok {
err = fmt.Errorf("missing destroy-result for %s", reqDestroy.RelName())
break
} else if res.Error != "" {
destroyFails = append(destroyFails, res)
}
res, ok := destroyResults[reqDestroy.Name]
if !ok {
err = fmt.Errorf("missing destroy-result for %s", reqDestroy.RelName())
break
} else if res.Error != "" {
destroyFails = append(destroyFails, res)
}
}
if err == nil && len(destroyFails) > 0 {
names := make([]string, len(destroyFails))
pairs := make([]string, len(destroyFails))
allSame := true
lastMsg := destroyFails[0].Error
for i := 0; i < len(destroyFails); i++{
for i := 0; i < len(destroyFails); i++ {
allSame = allSame && destroyFails[i].Error == lastMsg
relname := destroyFails[i].Snapshot.RelName()
names[i] = relname

View File

@ -7,13 +7,13 @@ import (
)
type execQueue struct {
mtx sync.Mutex
mtx sync.Mutex
pending, completed []*fs
}
func newExecQueue(cap int) *execQueue {
q := execQueue{
pending: make([]*fs, 0, cap),
pending: make([]*fs, 0, cap),
completed: make([]*fs, 0, cap),
}
return &q
@ -55,7 +55,7 @@ func (q *execQueue) Pop() *fs {
return fs
}
func(q *execQueue) Put(fs *fs, err error, done bool) {
func (q *execQueue) Put(fs *fs, err error, done bool) {
fs.mtx.Lock()
fs.execErrLast = err
if done || err != nil {
@ -79,5 +79,4 @@ func(q *execQueue) Put(fs *fs, err error, done bool) {
})
q.mtx.Unlock()
}
}

View File

@ -1,18 +1,19 @@
package snapper
import (
"github.com/zrepl/zrepl/config"
"github.com/pkg/errors"
"time"
"context"
"github.com/zrepl/zrepl/daemon/filters"
"fmt"
"github.com/zrepl/zrepl/zfs"
"sort"
"github.com/zrepl/zrepl/logger"
"sync"
)
"time"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon/filters"
"github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/zfs"
)
//go:generate stringer -type=SnapState
type SnapState uint
@ -28,7 +29,7 @@ type snapProgress struct {
state SnapState
// SnapStarted, SnapDone, SnapError
name string
name string
startAt time.Time
// SnapDone
@ -44,13 +45,13 @@ type args struct {
prefix string
interval time.Duration
fsf *filters.DatasetMapFilter
snapshotsTaken chan<-struct{}
snapshotsTaken chan<- struct{}
}
type Snapper struct {
args args
mtx sync.Mutex
mtx sync.Mutex
state State
// set in state Plan, used in Waiting
@ -70,7 +71,7 @@ type Snapper struct {
type State uint
const (
SyncUp State = 1<<iota
SyncUp State = 1 << iota
SyncUpErrWait
Planning
Snapshotting
@ -81,13 +82,13 @@ const (
func (s State) sf() state {
m := map[State]state{
SyncUp: syncUp,
SyncUp: syncUp,
SyncUpErrWait: wait,
Planning: plan,
Snapshotting: snapshot,
Waiting: wait,
ErrorWait: wait,
Stopped: nil,
Planning: plan,
Snapshotting: snapshot,
Waiting: wait,
ErrorWait: wait,
Stopped: nil,
}
return m[s]
}
@ -123,9 +124,9 @@ func PeriodicFromConfig(g *config.Global, fsf *filters.DatasetMapFilter, in *con
}
args := args{
prefix: in.Prefix,
prefix: in.Prefix,
interval: in.Interval,
fsf: fsf,
fsf: fsf,
// ctx and log is set in Run()
}
@ -199,7 +200,7 @@ func syncUp(a args, u updater) state {
if err != nil {
return onErr(err, u)
}
u(func(s *Snapper){
u(func(s *Snapper) {
s.sleepUntil = syncPoint
})
t := time.NewTimer(syncPoint.Sub(time.Now()))
@ -386,4 +387,3 @@ func findSyncPoint(log Logger, fss []*zfs.DatasetPath, prefix string, interval t
return snaptimes[0].time, nil
}

View File

@ -3,6 +3,7 @@ package snapper
import (
"context"
"fmt"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon/filters"
)
@ -17,7 +18,7 @@ type PeriodicOrManual struct {
s *Snapper
}
func (s *PeriodicOrManual) Run(ctx context.Context, wakeUpCommon chan <- struct{}) {
func (s *PeriodicOrManual) Run(ctx context.Context, wakeUpCommon chan<- struct{}) {
if s.s != nil {
s.s.Run(ctx, wakeUpCommon)
}

View File

@ -2,6 +2,7 @@ package endpoint
import (
"context"
"github.com/zrepl/zrepl/logger"
)

View File

@ -7,6 +7,7 @@ import (
"path"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/replication/logic/pdu"
"github.com/zrepl/zrepl/util/chainlock"
"github.com/zrepl/zrepl/zfs"

View File

@ -4,10 +4,11 @@ import (
"bytes"
"encoding/json"
"fmt"
"github.com/fatih/color"
"github.com/pkg/errors"
"sync"
"time"
"github.com/fatih/color"
"github.com/pkg/errors"
)
type Level int

View File

@ -2,10 +2,12 @@ package logger_test
import (
"fmt"
"github.com/kr/pretty"
"github.com/zrepl/zrepl/logger"
"testing"
"time"
"github.com/kr/pretty"
"github.com/zrepl/zrepl/logger"
)
type TestOutlet struct {

View File

@ -9,7 +9,7 @@ type stderrLogger struct {
Logger
}
type stderrLoggerOutlet struct {}
type stderrLoggerOutlet struct{}
func (stderrLoggerOutlet) WriteEntry(entry Entry) error {
fmt.Fprintf(os.Stderr, "%#v\n", entry)

View File

@ -2,12 +2,14 @@ package pruning
import (
"fmt"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/pruning/retentiongrid"
"regexp"
"sort"
"time"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/pruning/retentiongrid"
)
// KeepGrid fits snapshots that match a given regex into a retentiongrid.Grid,
@ -15,7 +17,7 @@ import (
// and deletes all snapshots that do not fit the grid specification.
type KeepGrid struct {
retentionGrid *retentiongrid.Grid
re *regexp.Regexp
re *regexp.Regexp
}
func NewKeepGrid(in *config.PruneGrid) (p *KeepGrid, err error) {

View File

@ -1,8 +1,9 @@
package pruning
import (
"github.com/stretchr/testify/assert"
"testing"
"github.com/stretchr/testify/assert"
)
func TestShallowCopySnapList(t *testing.T) {

View File

@ -1,8 +1,9 @@
package pruning
import (
"github.com/pkg/errors"
"sort"
"github.com/pkg/errors"
)
type KeepLastN struct {

View File

@ -1,9 +1,10 @@
package pruning
import (
"github.com/stretchr/testify/assert"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestKeepLastN(t *testing.T) {

View File

@ -5,7 +5,7 @@ import (
)
type KeepRegex struct {
expr *regexp.Regexp
expr *regexp.Regexp
negate bool
}

View File

@ -2,9 +2,11 @@ package pruning
import (
"fmt"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/config"
"time"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/config"
)
type KeepRule interface {

View File

@ -2,11 +2,12 @@ package retentiongrid
import (
"fmt"
"github.com/stretchr/testify/assert"
"strconv"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
type retentionIntervalStub struct {

View File

@ -10,11 +10,12 @@ import (
"sync"
"time"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/zrepl/zrepl/replication/report"
"github.com/zrepl/zrepl/util/chainlock"
"github.com/zrepl/zrepl/util/envconst"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
type interval struct {

View File

@ -26,4 +26,4 @@ func debugPrefix(prefixFormat string, prefixFormatArgs ...interface{}) debugFunc
return func(format string, args ...interface{}) {
debug("%s: %s", prefix, fmt.Sprintf(format, args))
}
}
}

View File

@ -10,6 +10,7 @@ import (
"time"
"github.com/stretchr/testify/require"
"github.com/zrepl/zrepl/replication/report"
"github.com/stretchr/testify/assert"

View File

@ -97,7 +97,7 @@ func TestIncrementalPath_SnapshotsOnly(t *testing.T) {
})
// sender with earlier but also current version as sender is not a conflict
doTest(l("@c,3"), l("@a,1", "@b,2", "@c,3") , func(path []*FilesystemVersion, conflict error) {
doTest(l("@c,3"), l("@a,1", "@b,2", "@c,3"), func(path []*FilesystemVersion, conflict error) {
t.Logf("path: %#v", path)
t.Logf("conflict: %#v", conflict)
assert.Empty(t, path)

View File

@ -2,8 +2,9 @@ package pdu
import (
"fmt"
"github.com/zrepl/zrepl/zfs"
"time"
"github.com/zrepl/zrepl/zfs"
)
func (v *FilesystemVersion) RelName() string {

View File

@ -1,9 +1,10 @@
package pdu
import (
"github.com/stretchr/testify/assert"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func TestFilesystemVersion_RelName(t *testing.T) {
@ -18,24 +19,24 @@ func TestFilesystemVersion_RelName(t *testing.T) {
tcs := []TestCase{
{
In: FilesystemVersion{
Type: FilesystemVersion_Snapshot,
Name: "foobar",
Type: FilesystemVersion_Snapshot,
Name: "foobar",
Creation: creat,
},
Out: "@foobar",
},
{
In: FilesystemVersion{
Type: FilesystemVersion_Bookmark,
Name: "foobar",
Type: FilesystemVersion_Bookmark,
Name: "foobar",
Creation: creat,
},
Out: "#foobar",
},
{
In: FilesystemVersion{
Type: 2342,
Name: "foobar",
Type: 2342,
Name: "foobar",
Creation: creat,
},
Panic: true,
@ -58,7 +59,7 @@ func TestFilesystemVersion_RelName(t *testing.T) {
func TestFilesystemVersion_ZFSFilesystemVersion(t *testing.T) {
empty := &FilesystemVersion{}
_, err:= empty.ZFSFilesystemVersion()
_, err := empty.ZFSFilesystemVersion()
assert.Error(t, err)
dateInvalid := &FilesystemVersion{Creation: "foobar"}

View File

@ -26,7 +26,7 @@ type Endpoint interface {
ListFilesystems(ctx context.Context, req *pdu.ListFilesystemReq) (*pdu.ListFilesystemRes, error)
ListFilesystemVersions(ctx context.Context, req *pdu.ListFilesystemVersionsReq) (*pdu.ListFilesystemVersionsRes, error)
DestroySnapshots(ctx context.Context, req *pdu.DestroySnapshotsReq) (*pdu.DestroySnapshotsRes, error)
WaitForConnectivity(ctx context.Context) (error)
WaitForConnectivity(ctx context.Context) error
}
type Sender interface {
@ -107,7 +107,7 @@ type Filesystem struct {
sender Sender
receiver Receiver
Path string // compat
Path string // compat
receiverFS *pdu.Filesystem
promBytesReplicated prometheus.Counter // compat
}

View File

@ -7,6 +7,7 @@ import (
"strings"
"github.com/golang/protobuf/proto"
"github.com/zrepl/zrepl/replication/logic/pdu"
"github.com/zrepl/zrepl/rpc/dataconn/stream"
"github.com/zrepl/zrepl/transport"
@ -214,13 +215,12 @@ func (c *Client) ReqRecv(ctx context.Context, req *pdu.ReceiveReq, streamCopier
return res.res, cause
}
func (c *Client) ReqPing(ctx context.Context, req *pdu.PingReq) (*pdu.PingRes, error) {
conn, err := c.getWire(ctx)
if err != nil {
return nil, err
}
defer c.putWire(conn)
defer c.putWire(conn)
if err := c.send(ctx, conn, EndpointPing, req, nil); err != nil {
return nil, err
@ -232,4 +232,4 @@ func (c *Client) ReqPing(ctx context.Context, req *pdu.PingReq) (*pdu.PingRes, e
}
return &res, nil
}
}

View File

@ -6,6 +6,7 @@ import (
"fmt"
"github.com/golang/protobuf/proto"
"github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/replication/logic/pdu"
"github.com/zrepl/zrepl/rpc/dataconn/stream"

View File

@ -12,6 +12,7 @@ import (
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/zrepl/zrepl/rpc/dataconn/base2bufpool"
"github.com/zrepl/zrepl/rpc/dataconn/timeoutconn"
)

View File

@ -3,8 +3,8 @@ package frameconn
import "sync"
type shutdownFSM struct {
mtx sync.Mutex
state shutdownFSMState
mtx sync.Mutex
state shutdownFSMState
}
type shutdownFSMState uint32
@ -16,7 +16,7 @@ const (
func newShutdownFSM() *shutdownFSM {
fsm := &shutdownFSM{
state: shutdownStateOpen,
state: shutdownStateOpen,
}
return fsm
}
@ -34,4 +34,3 @@ func (f *shutdownFSM) IsShuttingDown() bool {
defer f.mtx.Unlock()
return f.state != shutdownStateOpen
}

View File

@ -19,4 +19,3 @@ func TestIsPublicFrameType(t *testing.T) {
assert.True(t, IsPublicFrameType(255))
assert.False(t, IsPublicFrameType(rstFrameType))
}

View File

@ -5,6 +5,7 @@ import (
"time"
"github.com/stretchr/testify/assert"
"github.com/zrepl/zrepl/rpc/dataconn/frameconn"
)

View File

@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/rpc/dataconn/heartbeatconn"
"github.com/zrepl/zrepl/util/socketpair"

View File

@ -11,6 +11,7 @@ import (
netssh "github.com/problame/go-netssh"
"github.com/zrepl/yaml-config"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/transport"
transportconfig "github.com/zrepl/zrepl/transport/fromconfig"

View File

@ -85,7 +85,7 @@ func (CloseWrite) receiver(wire transport.Wire) {
// consume half the test data, then detect an error, send it and CloseWrite
r := io.LimitReader(wire, int64(5 * len(closeWriteTestSendData)/3))
r := io.LimitReader(wire, int64(5*len(closeWriteTestSendData)/3))
_, err := io.Copy(ioutil.Discard, r)
noerror(err)
@ -103,7 +103,7 @@ func (CloseWrite) receiver(wire transport.Wire) {
// io.Copy masks io.EOF to nil, and we expect io.EOF from the client's Close() call
log.Panicf("unexpected error returned from reading conn: %s", err)
}
closeErr := wire.Close()
log.Printf("closeErr=%T %s", closeErr, closeErr)

View File

@ -10,6 +10,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zrepl/zrepl/util/socketpair"
)

View File

@ -18,11 +18,12 @@ import (
"net"
"time"
"github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/transport"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/peer"
"github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/transport"
)
type Logger = logger.Logger

View File

@ -5,10 +5,11 @@ package pdu
import (
fmt "fmt"
math "math"
proto "github.com/golang/protobuf/proto"
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
math "math"
)
// Reference imports to suppress errors if they are not otherwise used.

View File

@ -25,8 +25,9 @@ package netadaptor
import (
"context"
"fmt"
"github.com/zrepl/zrepl/logger"
"net"
"github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/transport"
)

View File

@ -12,6 +12,7 @@ import (
"google.golang.org/grpc"
"github.com/google/uuid"
"github.com/zrepl/zrepl/replication/logic"
"github.com/zrepl/zrepl/replication/logic/pdu"
"github.com/zrepl/zrepl/rpc/dataconn"
@ -158,7 +159,7 @@ func (c *Client) WaitForConnectivity(ctx context.Context) error {
time.Sleep(envconst.Duration("ZREPL_RPC_DATACONN_PING_SLEEP", 1*time.Second))
continue
}
// it's not a dial timeout,
// it's not a dial timeout,
checkRes(data, dataErr, loggers.Data, &dataOk)
return
}

View File

@ -115,4 +115,3 @@ package rpc
// - remove the comments //
// - vim: set virtualedit+=all
// - vim: set ft=text

View File

@ -4,8 +4,8 @@ import (
"context"
"time"
"github.com/zrepl/zrepl/transport"
"github.com/zrepl/zrepl/rpc/transportmux"
"github.com/zrepl/zrepl/transport"
"github.com/zrepl/zrepl/util/envconst"
)

View File

@ -49,10 +49,10 @@ func (l *demuxListener) Accept(ctx context.Context) (*transport.AuthConn, error)
return res.conn, res.err
}
type demuxAddr struct {}
type demuxAddr struct{}
func (demuxAddr) Network() string { return "demux" }
func (demuxAddr) String() string { return "demux" }
func (demuxAddr) String() string { return "demux" }
func (l *demuxListener) Addr() net.Addr {
return demuxAddr{}
@ -64,7 +64,7 @@ func (l *demuxListener) Close() error { return nil } // TODO
// This is a protocol constant, changing it breaks the wire protocol.
const LabelLen = 64
func padLabel(out []byte, label string) (error) {
func padLabel(out []byte, label string) error {
if len(label) > LabelLen {
return fmt.Errorf("label %q exceeds max length (is %d, max %d)", label, len(label), LabelLen)
}
@ -153,7 +153,7 @@ func Demux(ctx context.Context, rawListener transport.AuthenticatedListener, lab
type labeledConnecter struct {
label []byte
transport.Connecter
transport.Connecter
}
func (c labeledConnecter) Connect(ctx context.Context) (transport.Wire, error) {
@ -202,4 +202,3 @@ func MuxConnecter(rawConnecter transport.Connecter, labels []string, timeout tim
}
return ret, nil
}

View File

@ -17,7 +17,7 @@ import (
type HandshakeMessage struct {
ProtocolVersion int
Extensions []string
Extensions []string
}
// A HandshakeError describes what went wrong during the handshake.
@ -25,7 +25,7 @@ type HandshakeMessage struct {
type HandshakeError struct {
msg string
// If not nil, the underlying IO error that caused the handshake to fail.
IOError error
IOError error
isAcceptError bool
}
@ -36,10 +36,10 @@ func (e HandshakeError) Error() string { return e.msg }
// Like with net.OpErr (Go issue 6163), a client failing to handshake
// should be a temporary Accept error toward the Listener .
func (e HandshakeError) Temporary() bool {
if e.isAcceptError {
if e.isAcceptError {
return true
}
te, ok := e.IOError.(interface{ Temporary() bool });
te, ok := e.IOError.(interface{ Temporary() bool })
return ok && te.Temporary()
}
@ -52,11 +52,11 @@ func (e HandshakeError) Timeout() bool {
return false
}
func hsErr(format string, args... interface{}) *HandshakeError {
func hsErr(format string, args ...interface{}) *HandshakeError {
return &HandshakeError{msg: fmt.Sprintf(format, args...)}
}
func hsIOErr(err error, format string, args... interface{}) *HandshakeError {
func hsIOErr(err error, format string, args ...interface{}) *HandshakeError {
return &HandshakeError{IOError: err, msg: fmt.Sprintf(format, args...)}
}
@ -145,7 +145,7 @@ func (m *HandshakeMessage) DecodeReader(r io.Reader, maxLen int) error {
if exts[len(exts)-1] != "" {
return hsErr("unexpected data trailing after last extension newline")
}
m.Extensions = exts[0:len(exts)-1]
m.Extensions = exts[0 : len(exts)-1]
return nil
}
@ -160,7 +160,7 @@ const HandshakeMessageMaxLen = 16 * 4096
func DoHandshakeVersion(conn net.Conn, deadline time.Time, version int) *HandshakeError {
ours := HandshakeMessage{
ProtocolVersion: version,
Extensions: nil,
Extensions: nil,
}
hsb, err := ours.Encode()
if err != nil {

View File

@ -3,13 +3,15 @@ package versionhandshake
import (
"bytes"
"fmt"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zrepl/zrepl/util/socketpair"
"io"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zrepl/zrepl/util/socketpair"
)
func TestHandshakeMessage_Encode(t *testing.T) {
@ -23,8 +25,6 @@ func TestHandshakeMessage_Encode(t *testing.T) {
enc := string(encB)
t.Logf("enc: %s", enc)
assert.False(t, strings.ContainsAny(enc[0:10], " "))
assert.True(t, enc[10] == ' ')
@ -45,7 +45,7 @@ func TestHandshakeMessage_Encode(t *testing.T) {
func TestHandshakeMessage_Encode_InvalidProtocolVersion(t *testing.T) {
for _, pv := range []int{-1, 0, 10000, 10001} {
for _, pv := range []int{-1, 0, 10000, 10001} {
t.Logf("testing invalid protocol version = %v", pv)
msg := HandshakeMessage{
ProtocolVersion: pv,
@ -68,7 +68,7 @@ func TestHandshakeMessage_DecodeReader(t *testing.T) {
require.NoError(t, err)
out := HandshakeMessage{}
err = out.DecodeReader(bytes.NewReader([]byte(enc)), 4 * 4096)
err = out.DecodeReader(bytes.NewReader([]byte(enc)), 4*4096)
assert.NoError(t, err)
assert.Equal(t, 2342, out.ProtocolVersion)
assert.Equal(t, 2, len(out.Extensions))

View File

@ -4,12 +4,13 @@ import (
"context"
"net"
"time"
"github.com/zrepl/zrepl/transport"
)
type HandshakeConnecter struct {
connecter transport.Connecter
timeout time.Duration
timeout time.Duration
}
func (c HandshakeConnecter) Connect(ctx context.Context) (transport.Wire, error) {
@ -31,17 +32,17 @@ func (c HandshakeConnecter) Connect(ctx context.Context) (transport.Wire, error)
func Connecter(connecter transport.Connecter, timeout time.Duration) HandshakeConnecter {
return HandshakeConnecter{
connecter: connecter,
timeout: timeout,
timeout: timeout,
}
}
// wrapper type that performs a a protocol version handshake before returning the connection
type HandshakeListener struct {
l transport.AuthenticatedListener
l transport.AuthenticatedListener
timeout time.Duration
}
func (l HandshakeListener) Addr() (net.Addr) { return l.l.Addr() }
func (l HandshakeListener) Addr() net.Addr { return l.l.Addr() }
func (l HandshakeListener) Close() error { return l.l.Close() }

View File

@ -4,7 +4,9 @@ package fromconfig
import (
"fmt"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/transport"
"github.com/zrepl/zrepl/transport/local"
@ -13,10 +15,10 @@ import (
"github.com/zrepl/zrepl/transport/tls"
)
func ListenerFactoryFromConfig(g *config.Global, in config.ServeEnum) (transport.AuthenticatedListenerFactory,error) {
func ListenerFactoryFromConfig(g *config.Global, in config.ServeEnum) (transport.AuthenticatedListenerFactory, error) {
var (
l transport.AuthenticatedListenerFactory
l transport.AuthenticatedListenerFactory
err error
)
switch v := in.Ret.(type) {
@ -35,7 +37,6 @@ func ListenerFactoryFromConfig(g *config.Global, in config.ServeEnum) (transport
return l, err
}
func ConnecterFromConfig(g *config.Global, in config.ConnectEnum) (transport.Connecter, error) {
var (
connecter transport.Connecter

View File

@ -3,12 +3,13 @@ package local
import (
"context"
"fmt"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/transport"
)
type LocalConnecter struct {
listenerName string
listenerName string
clientIdentity string
}
@ -26,4 +27,3 @@ func (c *LocalConnecter) Connect(dialCtx context.Context) (transport.Wire, error
l := GetLocalListener(c.listenerName)
return l.Connect(dialCtx, c.clientIdentity)
}

View File

@ -3,20 +3,21 @@ package local
import (
"context"
"fmt"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/util/socketpair"
"net"
"sync"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/transport"
"github.com/zrepl/zrepl/util/socketpair"
)
var localListeners struct {
m map[string]*LocalListener // listenerName -> listener
m map[string]*LocalListener // listenerName -> listener
init sync.Once
mtx sync.Mutex
mtx sync.Mutex
}
func GetLocalListener(listenerName string) (*LocalListener) {
func GetLocalListener(listenerName string) *LocalListener {
localListeners.init.Do(func() {
localListeners.m = make(map[string]*LocalListener)
@ -36,12 +37,12 @@ func GetLocalListener(listenerName string) (*LocalListener) {
type connectRequest struct {
clientIdentity string
callback chan connectResult
callback chan connectResult
}
type connectResult struct {
conn transport.Wire
err error
err error
}
type LocalListener struct {
@ -60,7 +61,7 @@ func (l *LocalListener) Connect(dialCtx context.Context, clientIdentity string)
// place request
req := connectRequest{
clientIdentity: clientIdentity,
callback: make(chan connectResult),
callback: make(chan connectResult),
}
select {
case l.connects <- req:
@ -70,7 +71,7 @@ func (l *LocalListener) Connect(dialCtx context.Context, clientIdentity string)
// wait for listener response
select {
case connRes := <- req.callback:
case connRes := <-req.callback:
conn, err = connRes.conn, connRes.err
case <-dialCtx.Done():
close(req.callback) // sending to the channel afterwards will panic, the listener has to catch this
@ -88,7 +89,7 @@ func (localAddr) Network() string { return "local" }
func (a localAddr) String() string { return a.S }
func (l *LocalListener) Addr() (net.Addr) { return localAddr{"<listening>"} }
func (l *LocalListener) Addr() net.Addr { return localAddr{"<listening>"} }
func (l *LocalListener) Accept(ctx context.Context) (*transport.AuthConn, error) {
respondToRequest := func(req connectRequest, res connectResult) (err error) {
@ -163,12 +164,12 @@ func (l *LocalListener) Close() error {
return nil
}
func LocalListenerFactoryFromConfig(g *config.Global, in *config.LocalServe) (transport.AuthenticatedListenerFactory,error) {
func LocalListenerFactoryFromConfig(g *config.Global, in *config.LocalServe) (transport.AuthenticatedListenerFactory, error) {
if in.ListenerName == "" {
return nil, fmt.Errorf("ListenerName must not be empty")
}
listenerName := in.ListenerName
lf := func() (transport.AuthenticatedListener,error) {
lf := func() (transport.AuthenticatedListener, error) {
return GetLocalListener(listenerName), nil
}
return lf, nil

View File

@ -2,12 +2,14 @@ package ssh
import (
"context"
"time"
"github.com/jinzhu/copier"
"github.com/pkg/errors"
"github.com/problame/go-netssh"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/transport"
"time"
)
type SSHStdinserverConnecter struct {

View File

@ -1,19 +1,21 @@
package ssh
import (
"github.com/problame/go-netssh"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon/nethelpers"
"github.com/zrepl/zrepl/transport"
"context"
"fmt"
"net"
"path"
"context"
"github.com/pkg/errors"
"sync/atomic"
"github.com/pkg/errors"
"github.com/problame/go-netssh"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon/nethelpers"
"github.com/zrepl/zrepl/transport"
)
func MultiStdinserverListenerFactoryFromConfig(g *config.Global, in *config.StdinserverServer) (transport.AuthenticatedListenerFactory,error) {
func MultiStdinserverListenerFactoryFromConfig(g *config.Global, in *config.StdinserverServer) (transport.AuthenticatedListenerFactory, error) {
for _, ci := range in.ClientIdentities {
if err := transport.ValidateClientIdentity(ci); err != nil {
@ -24,7 +26,7 @@ func MultiStdinserverListenerFactoryFromConfig(g *config.Global, in *config.Stdi
clientIdentities := in.ClientIdentities
sockdir := g.Serve.StdinServer.SockDir
lf := func() (transport.AuthenticatedListener,error) {
lf := func() (transport.AuthenticatedListener, error) {
return multiStdinserverListenerFromClientIdentities(sockdir, clientIdentities)
}
@ -33,13 +35,13 @@ func MultiStdinserverListenerFactoryFromConfig(g *config.Global, in *config.Stdi
type multiStdinserverAcceptRes struct {
conn *transport.AuthConn
err error
err error
}
type MultiStdinserverListener struct {
listeners []*stdinserverListener
accepts chan multiStdinserverAcceptRes
closed int32
accepts chan multiStdinserverAcceptRes
closed int32
}
// client identities must be validated
@ -48,7 +50,7 @@ func multiStdinserverListenerFromClientIdentities(sockdir string, cis []string)
var err error
for _, ci := range cis {
sockpath := path.Join(sockdir, ci)
l := &stdinserverListener{clientIdentity: ci}
l := &stdinserverListener{clientIdentity: ci}
if err = nethelpers.PreparePrivateSockpath(sockpath); err != nil {
break
}
@ -66,7 +68,7 @@ func multiStdinserverListenerFromClientIdentities(sockdir string, cis []string)
return &MultiStdinserverListener{listeners: listeners}, nil
}
func (m *MultiStdinserverListener) Accept(ctx context.Context) (*transport.AuthConn, error){
func (m *MultiStdinserverListener) Accept(ctx context.Context) (*transport.AuthConn, error) {
if m.accepts == nil {
m.accepts = make(chan multiStdinserverAcceptRes, len(m.listeners))
@ -80,7 +82,7 @@ func (m *MultiStdinserverListener) Accept(ctx context.Context) (*transport.AuthC
}
}
res := <- m.accepts
res := <-m.accepts
return res.conn, res.err
}
@ -116,7 +118,7 @@ func (m *MultiStdinserverListener) Close() error {
// a single stdinserverListener (part of multiStinserverListener)
type stdinserverListener struct {
l *netssh.Listener
l *netssh.Listener
clientIdentity string
}

View File

@ -1,15 +1,17 @@
package tcp
import (
"github.com/zrepl/zrepl/config"
"net"
"github.com/pkg/errors"
"context"
"net"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/transport"
)
type ipMapEntry struct {
ip net.IP
ip net.IP
ident string
}
@ -25,7 +27,7 @@ func ipMapFromConfig(clients map[string]string) (*ipMap, error) {
return nil, errors.Errorf("cannot parse client IP %q", clientIPString)
}
if err := transport.ValidateClientIdentity(clientIdent); err != nil {
return nil, errors.Wrapf(err,"invalid client identity for IP %q", clientIPString)
return nil, errors.Wrapf(err, "invalid client identity for IP %q", clientIPString)
}
entries = append(entries, ipMapEntry{clientIP, clientIdent})
}
@ -79,4 +81,3 @@ func (f *TCPAuthListener) Accept(ctx context.Context) (*transport.AuthConn, erro
}
return transport.NewAuthConn(nc, clientIdent), nil
}

View File

@ -6,6 +6,7 @@ import (
"net"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/tlsconf"
"github.com/zrepl/zrepl/transport"

View File

@ -1,16 +1,18 @@
package tls
import (
"context"
"crypto/tls"
"crypto/x509"
"fmt"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/transport"
"github.com/zrepl/zrepl/tlsconf"
"net"
"time"
"context"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/tlsconf"
"github.com/zrepl/zrepl/transport"
)
type TLSListenerFactory struct {
@ -18,10 +20,10 @@ type TLSListenerFactory struct {
clientCA *x509.CertPool
serverCert tls.Certificate
handshakeTimeout time.Duration
clientCNs map[string]struct{}
clientCNs map[string]struct{}
}
func TLSListenerFactoryFromConfig(c *config.Global, in *config.TLSServe) (transport.AuthenticatedListenerFactory,error) {
func TLSListenerFactoryFromConfig(c *config.Global, in *config.TLSServe) (transport.AuthenticatedListenerFactory, error) {
address := in.Listen
handshakeTimeout := in.HandshakeTimeout
@ -85,5 +87,3 @@ func (l tlsAuthListener) Accept(ctx context.Context) (*transport.AuthConn, error
adaptor := newWireAdaptor(tlsConn, tcpConn)
return transport.NewAuthConn(adaptor, cn), nil
}

View File

@ -5,6 +5,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/zrepl/zrepl/zfs"
)

View File

@ -39,4 +39,4 @@ func (l *L) NewCond() *sync.Cond {
func (l *L) DropWhile(f func()) {
defer l.Unlock().Lock()
f()
}
}

View File

@ -3,11 +3,12 @@ package util
import (
"bytes"
"encoding/binary"
"github.com/stretchr/testify/assert"
"io"
"reflect"
"testing"
"testing/quick"
"github.com/stretchr/testify/assert"
)
func TestUnchunker(t *testing.T) {

View File

@ -2,10 +2,11 @@ package util
import (
"context"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestContextWithOptionalDeadline(t *testing.T) {

View File

@ -101,16 +101,16 @@ func (c *ChainedReader) Read(buf []byte) (n int, err error) {
}
type ByteCounterReader struct {
reader io.ReadCloser
reader io.ReadCloser
// called & accessed synchronously during Read, no external access
cb func(full int64)
cbEvery time.Duration
lastCbAt time.Time
cb func(full int64)
cbEvery time.Duration
lastCbAt time.Time
bytesSinceLastCb int64
// set atomically because it may be read by multiple threads
bytes int64
bytes int64
}
func NewByteCounterReader(reader io.ReadCloser) *ByteCounterReader {

View File

@ -4,18 +4,19 @@ import (
"bytes"
"context"
"fmt"
"github.com/zrepl/zrepl/util/envconst"
"io"
"os"
"os/exec"
"syscall"
"time"
"github.com/zrepl/zrepl/util/envconst"
)
// An IOCommand exposes a forked process's std(in|out|err) through the io.ReadWriteCloser interface.
type IOCommand struct {
Cmd *exec.Cmd
kill context.CancelFunc
kill context.CancelFunc
Stdin io.WriteCloser
Stdout io.ReadCloser
StderrBuf *bytes.Buffer

View File

@ -1,8 +1,9 @@
package zfs
import (
"github.com/stretchr/testify/assert"
"testing"
"github.com/stretchr/testify/assert"
)
func TestNewDatasetPathTree(t *testing.T) {

View File

@ -13,7 +13,8 @@ type DatasetFilter interface {
func NoFilter() DatasetFilter {
return noFilter{}
}
type noFilter struct {}
type noFilter struct{}
var _ DatasetFilter = noFilter{}

View File

@ -2,8 +2,9 @@ package zfs
import (
"fmt"
"github.com/pkg/errors"
"strconv"
"github.com/pkg/errors"
)
const ReplicationCursorBookmarkName = "zrepl_replication_cursor"

View File

@ -2,9 +2,11 @@ package zfs_test
import (
"context"
"github.com/stretchr/testify/assert"
"github.com/zrepl/zrepl/zfs"
"testing"
"github.com/stretchr/testify/assert"
"github.com/zrepl/zrepl/zfs"
)
type ResumeTokenTest struct {

View File

@ -5,11 +5,12 @@ import (
"context"
"errors"
"fmt"
"github.com/prometheus/client_golang/prometheus"
"io"
"strconv"
"strings"
"time"
"github.com/prometheus/client_golang/prometheus"
)
type VersionType string

View File

@ -15,9 +15,11 @@ import (
"time"
"context"
"github.com/prometheus/client_golang/prometheus"
"regexp"
"strconv"
"github.com/prometheus/client_golang/prometheus"
"github.com/zrepl/zrepl/util/envconst"
)
@ -351,7 +353,7 @@ type readErrRecorder struct {
type sendStreamCopierError struct {
isReadErr bool // if false, it's a write error
err error
err error
}
func (e sendStreamCopierError) Error() string {
@ -362,7 +364,7 @@ func (e sendStreamCopierError) Error() string {
}
}
func (e sendStreamCopierError) IsReadError() bool { return e.isReadErr }
func (e sendStreamCopierError) IsReadError() bool { return e.isReadErr }
func (e sendStreamCopierError) IsWriteError() bool { return !e.isReadErr }
func (r *readErrRecorder) Read(p []byte) (n int, err error) {
@ -410,13 +412,12 @@ func pipeWithCapacityHint(capacity int) (r, w *os.File, err error) {
}
type sendStream struct {
cmd *exec.Cmd
cmd *exec.Cmd
kill context.CancelFunc
closeMtx sync.Mutex
closeMtx sync.Mutex
stdoutReader *os.File
opErr error
opErr error
}
func (s *sendStream) Read(p []byte) (n int, err error) {
@ -484,7 +485,7 @@ func (s *sendStream) killAndWait(precedingReadErr error) error {
if closePipeErr == nil {
// avoid double-closes in case anything below doesn't work
// and someone calls Close again
s.stdoutReader = nil
s.stdoutReader = nil
} else {
return closePipeErr
}
@ -493,7 +494,7 @@ func (s *sendStream) killAndWait(precedingReadErr error) error {
// we managed to tear things down, no let's give the user some pretty *ZFSError
if exitErr != nil {
s.opErr = &ZFSError{
Stderr: exitErr.Stderr,
Stderr: exitErr.Stderr,
WaitErr: exitErr,
}
} else {
@ -545,15 +546,14 @@ func ZFSSend(ctx context.Context, fs string, from, to string, token string) (str
stdoutWriter.Close()
stream := &sendStream{
cmd: cmd,
kill: cancel,
cmd: cmd,
kill: cancel,
stdoutReader: stdoutReader,
}
return newSendStreamCopier(stream), err
}
type DrySendType string
const (
@ -563,25 +563,27 @@ const (
func DrySendTypeFromString(s string) (DrySendType, error) {
switch s {
case string(DrySendTypeFull): return DrySendTypeFull, nil
case string(DrySendTypeIncremental): return DrySendTypeIncremental, nil
case string(DrySendTypeFull):
return DrySendTypeFull, nil
case string(DrySendTypeIncremental):
return DrySendTypeIncremental, nil
default:
return "", fmt.Errorf("unknown dry send type %q", s)
}
}
type DrySendInfo struct {
Type DrySendType
Filesystem string // parsed from To field
From, To string // direct copy from ZFS output
SizeEstimate int64 // -1 if size estimate is not possible
Type DrySendType
Filesystem string // parsed from To field
From, To string // direct copy from ZFS output
SizeEstimate int64 // -1 if size estimate is not possible
}
var (
// keep same number of capture groups for unmarshalInfoLine homogenity
sendDryRunInfoLineRegexFull = regexp.MustCompile(`^(full)\t()([^\t]+@[^\t]+)\t([0-9]+)$`)
// cannot enforce '[#@]' in incremental source, see test cases
// cannot enforce '[#@]' in incremental source, see test cases
sendDryRunInfoLineRegexIncremental = regexp.MustCompile(`^(incremental)\t([^\t]+)\t([^\t]+@[^\t]+)\t([0-9]+)$`)
)
@ -602,7 +604,6 @@ func (s *DrySendInfo) unmarshalZFSOutput(output []byte) (err error) {
return fmt.Errorf("no match for info line (regex1 %s) (regex2 %s)", sendDryRunInfoLineRegexFull, sendDryRunInfoLineRegexIncremental)
}
// unmarshal info line, looks like this:
// full zroot/test/a@1 5389768
// incremental zroot/test/a@1 zroot/test/a@2 5383936
@ -653,19 +654,19 @@ func ZFSSendDry(fs string, from, to string, token string) (_ *DrySendInfo, err e
* Redacted send & recv will bring this functionality, see
* https://github.com/openzfs/openzfs/pull/484
*/
fromAbs, err := absVersion(fs, from)
if err != nil {
return nil, fmt.Errorf("error building abs version for 'from': %s", err)
}
toAbs, err := absVersion(fs, to)
if err != nil {
return nil, fmt.Errorf("error building abs version for 'to': %s", err)
}
return &DrySendInfo{
Type: DrySendTypeIncremental,
Filesystem: fs,
From: fromAbs,
To: toAbs,
fromAbs, err := absVersion(fs, from)
if err != nil {
return nil, fmt.Errorf("error building abs version for 'from': %s", err)
}
toAbs, err := absVersion(fs, to)
if err != nil {
return nil, fmt.Errorf("error building abs version for 'to': %s", err)
}
return &DrySendInfo{
Type: DrySendTypeIncremental,
Filesystem: fs,
From: fromAbs,
To: toAbs,
SizeEstimate: -1}, nil
}
@ -784,7 +785,7 @@ func ZFSRecv(ctx context.Context, fs string, streamCopier StreamCopier, opts Rec
if err != nil {
return err
}
cmd.Stdin = stdin
if err = cmd.Start(); err != nil {
@ -794,7 +795,7 @@ func ZFSRecv(ctx context.Context, fs string, streamCopier StreamCopier, opts Rec
}
stdin.Close()
defer stdinWriter.Close()
pid := cmd.Process.Pid
debug := func(format string, args ...interface{}) {
debug("recv: pid=%v: %s", pid, fmt.Sprintf(format, args...))
@ -823,7 +824,7 @@ func ZFSRecv(ctx context.Context, fs string, streamCopier StreamCopier, opts Rec
copierErr := <-copierErrChan
debug("copierErr: %T %s", copierErr, copierErr)
if copierErr != nil {
cancelCmd()
cancelCmd()
}
waitErr := <-waitErrChan
@ -838,7 +839,7 @@ func ZFSRecv(ctx context.Context, fs string, streamCopier StreamCopier, opts Rec
type ClearResumeTokenError struct {
ZFSOutput []byte
CmdError error
CmdError error
}
func (e ClearResumeTokenError) Error() string {
@ -947,13 +948,27 @@ const (
func (s zfsPropertySource) zfsGetSourceFieldPrefixes() []string {
prefixes := make([]string, 0, 7)
if s&sourceLocal != 0 {prefixes = append(prefixes, "local")}
if s&sourceDefault != 0 {prefixes = append(prefixes, "default")}
if s&sourceInherited != 0 {prefixes = append(prefixes, "inherited")}
if s&sourceNone != 0 {prefixes = append(prefixes, "-")}
if s&sourceTemporary != 0 { prefixes = append(prefixes, "temporary")}
if s&sourceReceived != 0 { prefixes = append(prefixes, "received")}
if s == sourceAny { prefixes = append(prefixes, "") }
if s&sourceLocal != 0 {
prefixes = append(prefixes, "local")
}
if s&sourceDefault != 0 {
prefixes = append(prefixes, "default")
}
if s&sourceInherited != 0 {
prefixes = append(prefixes, "inherited")
}
if s&sourceNone != 0 {
prefixes = append(prefixes, "-")
}
if s&sourceTemporary != 0 {
prefixes = append(prefixes, "temporary")
}
if s&sourceReceived != 0 {
prefixes = append(prefixes, "received")
}
if s == sourceAny {
prefixes = append(prefixes, "")
}
return prefixes
}
@ -992,7 +1007,7 @@ func zfsGet(path string, props []string, allowedSources zfsPropertySource) (*ZFS
return nil, fmt.Errorf("zfs get did not return property,value,source tuples")
}
for _, p := range allowedPrefixes {
if strings.HasPrefix(fields[2],p) {
if strings.HasPrefix(fields[2], p) {
res.m[fields[0]] = fields[1]
break
}
@ -1010,8 +1025,10 @@ func ZFSDestroy(dataset string) (err error) {
filesystem = dataset
} else {
switch dataset[idx] {
case '@': dstype = "snapshot"
case '#': dstype = "bookmark"
case '@':
dstype = "snapshot"
case '#':
dstype = "bookmark"
}
filesystem = dataset[:idx]
}

View File

@ -1,8 +1,9 @@
package zfs
import (
"github.com/stretchr/testify/assert"
"testing"
"github.com/stretchr/testify/assert"
)
func TestZFSListHandlesProducesZFSErrorOnNonZeroExit(t *testing.T) {
@ -33,8 +34,8 @@ func TestDatasetPathTrimNPrefixComps(t *testing.T) {
func TestZFSPropertySource(t *testing.T) {
tcs := []struct{
in zfsPropertySource
tcs := []struct {
in zfsPropertySource
exp []string
}{
{
@ -43,11 +44,11 @@ func TestZFSPropertySource(t *testing.T) {
exp: []string{"local", "default", "inherited", "-", "temporary", "received", ""},
},
{
in: sourceTemporary,
in: sourceTemporary,
exp: []string{"temporary"},
},
{
in: sourceLocal|sourceInherited,
in: sourceLocal | sourceInherited,
exp: []string{"local", "inherited"},
},
}
@ -137,9 +138,9 @@ size 10518512
incrementalWithSpacesInIntermediateComponent := "\nincremental\tblaffoo\tpool1/otherjob/another ds with spaces/childfs@blaffoo2\t624\nsize\t624\n"
type tc struct {
name string
in string
exp *DrySendInfo
name string
in string
exp *DrySendInfo
expErr bool
}
@ -147,10 +148,10 @@ size 10518512
{
name: "fullSend", in: fullSend,
exp: &DrySendInfo{
Type: DrySendTypeFull,
Filesystem: "zroot/test/a",
From: "",
To: "zroot/test/a@1",
Type: DrySendTypeFull,
Filesystem: "zroot/test/a",
From: "",
To: "zroot/test/a@1",
SizeEstimate: 5389768,
},
},
@ -158,7 +159,7 @@ size 10518512
name: "incSend", in: incSend,
exp: &DrySendInfo{
Type: DrySendTypeIncremental,
Filesystem: "zroot/test/a",
Filesystem: "zroot/test/a",
From: "zroot/test/a@1",
To: "zroot/test/a@2",
SizeEstimate: 5383936,
@ -168,16 +169,16 @@ size 10518512
name: "incSendBookmark", in: incSendBookmark,
exp: &DrySendInfo{
Type: DrySendTypeIncremental,
Filesystem: "zroot/test/a",
Filesystem: "zroot/test/a",
From: "zroot/test/a#1",
To: "zroot/test/a@2",
SizeEstimate: 5383312,
},
},
{
{
name: "incNoToken", in: incNoToken,
exp: &DrySendInfo{
Type: DrySendTypeIncremental,
Type: DrySendTypeIncremental,
Filesystem: "zroot/test/a",
// as can be seen in the string incNoToken,
// we cannot infer whether the incremental source is a snapshot or bookmark
@ -189,10 +190,10 @@ size 10518512
{
name: "fullNoToken", in: fullNoToken,
exp: &DrySendInfo{
Type: DrySendTypeFull,
Filesystem: "zroot/test/a",
From: "",
To: "zroot/test/a@3",
Type: DrySendTypeFull,
Filesystem: "zroot/test/a",
From: "",
To: "zroot/test/a@3",
SizeEstimate: 10518512,
},
},