wip floocode backup

This commit is contained in:
Anton Schirg 2018-08-27 15:22:32 +02:00
parent b0d17803f0
commit c2b04d10c5
19 changed files with 206 additions and 286 deletions

9
Gopkg.lock generated
View File

@ -17,14 +17,6 @@
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
branch = "master"
digest = "1:ae162f9b5c46f6d5ff4bd53a3d78f72e2eb6676c11c5d33b8b106c36f87ddb31"
name = "github.com/dustin/go-humanize"
packages = ["."]
pruneopts = ""
revision = "bb3d318650d48840a39aa21a027c6630e198e626"
[[projects]]
branch = "master"
digest = "1:5d0a2385edf4ba44f3b7b76bc0436ceb8f62bf55aa5d540a9eb9ec6c58d86809"
@ -247,7 +239,6 @@
analyzer-name = "dep"
analyzer-version = 1
input-imports = [
"github.com/dustin/go-humanize",
"github.com/go-logfmt/logfmt",
"github.com/go-yaml/yaml",
"github.com/golang/protobuf/proto",

View File

@ -6,6 +6,12 @@ import (
"strings"
)
type AnyFSVFilter struct{}
func (AnyFSVFilter) Filter(t zfs.VersionType, name string) (accept bool, err error) {
return true, nil
}
type PrefixFilter struct {
prefix string
fstype zfs.VersionType

View File

@ -6,6 +6,7 @@ import (
"context"
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/cmd/config"
"github.com/zrepl/zrepl/cmd/endpoint"
"github.com/zrepl/zrepl/replication"
"github.com/zrepl/zrepl/zfs"
@ -22,7 +23,7 @@ type LocalJob struct {
Debug JobDebugSettings
}
func parseLocalJob(c JobParsingContext, name string, i map[string]interface{}) (j *LocalJob, err error) {
func parseLocalJob(c config.Global, in source.LocalJob) (j *LocalJob, err error) {
var asMap struct {
Mapping map[string]string

View File

@ -11,6 +11,7 @@ import (
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
"github.com/problame/go-streamrpc"
"github.com/zrepl/zrepl/cmd/config"
"github.com/zrepl/zrepl/cmd/endpoint"
"github.com/zrepl/zrepl/replication"
)
@ -21,74 +22,44 @@ type PullJob struct {
Interval time.Duration
Mapping *DatasetMapFilter
// constructed from mapping during parsing
pruneFilter *DatasetMapFilter
SnapshotPrefix string
Prune PrunePolicy
Debug JobDebugSettings
pruneFilter *DatasetMapFilter
Prune PrunePolicy
rep *replication.Replication
}
func parsePullJob(c JobParsingContext, name string, i map[string]interface{}) (j *PullJob, err error) {
func parsePullJob(c config.Global, in config.PullJob) (j *PullJob, err error) {
var asMap struct {
Connect map[string]interface{}
Interval string
Mapping map[string]string
InitialReplPolicy string `mapstructure:"initial_repl_policy"`
Prune map[string]interface{}
SnapshotPrefix string `mapstructure:"snapshot_prefix"`
Debug map[string]interface{}
}
j = &PullJob{Name: in.Name}
if err = mapstructure.Decode(i, &asMap); err != nil {
err = errors.Wrap(err, "mapstructure error")
return nil, err
}
j = &PullJob{Name: name}
j.Connect, err = parseConnect(asMap.Connect)
j.Connect, err = parseConnect(in.Replication.Connect)
if err != nil {
err = errors.Wrap(err, "cannot parse 'connect'")
return nil, err
}
if j.Interval, err = parsePostitiveDuration(asMap.Interval); err != nil {
err = errors.Wrap(err, "cannot parse 'interval'")
j.Interval = in.Replication.Interval
j.Mapping = NewDatasetMapFilter(1, false)
if err := j.Mapping.Add("<", in.Replication.RootDataset); err != nil {
return nil, err
}
j.Mapping, err = parseDatasetMapFilter(asMap.Mapping, false)
if err != nil {
err = errors.Wrap(err, "cannot parse 'mapping'")
j.pruneFilter = NewDatasetMapFilter(1, true)
if err := j.pruneFilter.Add(in.Replication.RootDataset, MapFilterResultOk); err != nil {
return nil, err
}
if j.pruneFilter, err = j.Mapping.InvertedFilter(); err != nil {
err = errors.Wrap(err, "cannot automatically invert 'mapping' for prune job")
return nil, err
}
if j.SnapshotPrefix, err = parseSnapshotPrefix(asMap.SnapshotPrefix); err != nil {
return
}
if j.Prune, err = parsePrunePolicy(asMap.Prune, false); err != nil {
err = errors.Wrap(err, "cannot parse prune policy")
return
}
if err = mapstructure.Decode(asMap.Debug, &j.Debug); err != nil {
err = errors.Wrap(err, "cannot parse 'debug'")
return
}
if j.Debug.Conn.ReadDump != "" || j.Debug.Conn.WriteDump != "" {
if in.Debug.Conn.ReadDump != "" || j.Debug.Conn.WriteDump != "" {
logConnecter := logNetConnConnecter{
Connecter: j.Connect,
ReadDump: j.Debug.Conn.ReadDump,
WriteDump: j.Debug.Conn.WriteDump,
ReadDump: in.Debug.Conn.ReadDump,
WriteDump: in.Debug.Conn.WriteDump,
}
j.Connect = logConnecter
}
@ -96,11 +67,7 @@ func parsePullJob(c JobParsingContext, name string, i map[string]interface{}) (j
return
}
func (j *PullJob) JobName() string {
return j.Name
}
func (j *PullJob) JobType() JobType { return JobTypePull }
func (j *PullJob) JobName() string { return j.Name }
func (j *PullJob) JobStart(ctx context.Context) {
@ -159,10 +126,7 @@ func (j *PullJob) doRun(ctx context.Context) {
sender := endpoint.NewRemote(client)
receiver, err := endpoint.NewReceiver(
j.Mapping,
NewPrefixFilter(j.SnapshotPrefix),
)
receiver, err := endpoint.NewReceiver(j.Mapping, AnyFSVFilter{})
if err != nil {
log.WithError(err).Error("error creating receiver endpoint")
return
@ -198,7 +162,6 @@ func (j *PullJob) Pruner(side PrunePolicySide, dryRun bool) (p Pruner, err error
time.Now(),
dryRun,
j.pruneFilter,
j.SnapshotPrefix,
j.Prune,
}
return

View File

@ -4,7 +4,6 @@ import (
"fmt"
"strings"
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/cmd/endpoint"
"github.com/zrepl/zrepl/zfs"
@ -258,16 +257,14 @@ func (m DatasetMapFilter) parseDatasetFilterResult(result string) (pass bool, er
return false, fmt.Errorf("'%s' is not a valid filter result", result)
}
func parseDatasetMapFilter(mi interface{}, filterMode bool) (f *DatasetMapFilter, err error) {
func parseDatasetMapFilterFilesystems(in map[string]bool) (f *DatasetMapFilter, err error) {
var m map[string]string
if err = mapstructure.Decode(mi, &m); err != nil {
err = fmt.Errorf("maps / filters must be specified as map[string]string: %s", err)
return
}
f = NewDatasetMapFilter(len(m), filterMode)
for pathPattern, mapping := range m {
f = NewDatasetMapFilter(len(in), true)
for pathPattern, accept := range in {
mapping := MapFilterResultOmit
if accept {
mapping = MapFilterResultOk
}
if err = f.Add(pathPattern, mapping); err != nil {
err = fmt.Errorf("invalid mapping entry ['%s':'%s']: %s", pathPattern, mapping, err)
return

View File

@ -8,10 +8,9 @@ import (
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
"github.com/problame/go-streamrpc"
"github.com/zrepl/zrepl/cmd/config"
"github.com/zrepl/zrepl/cmd/pruning/retentiongrid"
"os"
"regexp"
"strconv"
"time"
)
var ConfigFileDefaultLocations []string = []string{
@ -141,116 +140,72 @@ func parseConfig(i interface{}) (c *Config, err error) {
}
func extractStringField(i map[string]interface{}, key string, notempty bool) (field string, err error) {
vi, ok := i[key]
if !ok {
err = errors.Errorf("must have field '%s'", key)
return "", err
}
field, ok = vi.(string)
if !ok {
err = errors.Errorf("'%s' field must have type string", key)
return "", err
}
if notempty && len(field) <= 0 {
err = errors.Errorf("'%s' field must not be empty", key)
return "", err
}
return
}
type JobParsingContext struct {
ConfigParsingContext
}
func parseJob(c JobParsingContext, i map[string]interface{}) (j Job, err error) {
func parseJob(c config.Global, in config.JobEnum) (j Job, err error) {
name, err := extractStringField(i, "name", true)
if err != nil {
return nil, err
switch v := in.Ret.(type) {
case config.PullJob:
return parsePullJob(c, v)
case config.SourceJob:
return parseSourceJob(c, v)
case config.LocalJob:
return parseLocalJob(c, v)
default:
panic(fmt.Sprintf("implementation error: unknown job type %s", v))
}
for _, r := range ReservedJobNames {
if name == r {
err = errors.Errorf("job name '%s' is reserved", name)
return nil, err
}
func parseConnect(in config.ConnectEnum) (c streamrpc.Connecter, err error) {
switch v := in.Ret.(type) {
case config.SSHStdinserverConnect:
return parseSSHStdinserverConnecter(v)
case config.TCPConnect:
return parseTCPConnecter(v)
case config.TLSConnect:
return parseTLSConnecter(v)
default:
panic(fmt.Sprintf("unknown connect type %v", v))
}
}
func parsePruning(in []config.PruningEnum, willSeeBookmarks bool) (p Pruner, err error) {
policies := make([]PrunePolicy, len(in))
for i := range in {
if policies[i], err = parseKeepRule(in[i]); err != nil {
return nil, errors.Wrapf(err, "invalid keep rule #%d:", i)
}
}
jobtypeStr, err := extractStringField(i, "type", true)
if err != nil {
return nil, err
}
jobtype, err := ParseUserJobType(jobtypeStr)
if err != nil {
return nil, err
}
switch jobtype {
case JobTypePull:
return parsePullJob(c, name, i)
case JobTypeSource:
return parseSourceJob(c, name, i)
case JobTypeLocal:
return parseLocalJob(c, name, i)
default:
panic(fmt.Sprintf("implementation error: unknown job type %s", jobtype))
}
}
func parseConnect(i map[string]interface{}) (c streamrpc.Connecter, err error) {
t, err := extractStringField(i, "type", true)
if err != nil {
return nil, err
}
switch t {
case "ssh+stdinserver":
return parseSSHStdinserverConnecter(i)
case "tcp":
return parseTCPConnecter(i)
func parseKeepRule(in config.PruningEnum) (p PrunePolicy, err error) {
switch v := in.Ret.(type) {
case config.PruneGrid:
return retentiongrid.ParseGridPrunePolicy(v, willSeeBookmarks)
//case config.PruneKeepLastN:
//case config.PruneKeepPrefix:
//case config.PruneKeepNotReplicated:
default:
return nil, errors.Errorf("unknown connection type '%s'", t)
}
}
func parsePrunePolicy(v map[string]interface{}, willSeeBookmarks bool) (p PrunePolicy, err error) {
policyName, err := extractStringField(v, "policy", true)
if err != nil {
return
}
switch policyName {
case "grid":
return parseGridPrunePolicy(v, willSeeBookmarks)
case "noprune":
return NoPrunePolicy{}, nil
default:
err = errors.Errorf("unknown policy '%s'", policyName)
return
panic(fmt.Sprintf("unknown keep rule type %v", v))
}
}
func parseAuthenticatedChannelListenerFactory(c JobParsingContext, v map[string]interface{}) (p ListenerFactory, err error) {
func parseAuthenticatedChannelListenerFactory(c config.Global, in config.ServeEnum) (p ListenerFactory, err error) {
t, err := extractStringField(v, "type", true)
if err != nil {
return nil, err
}
switch t {
case "stdinserver":
switch v := in.Ret.(type) {
case config.StdinserverServer:
return parseStdinserverListenerFactory(c, v)
case "tcp":
case config.TCPServe:
return parseTCPListenerFactory(c, v)
case config.TLSServe:
return parseTLSListenerFactory(c, v)
default:
err = errors.Errorf("unknown type '%s'", t)
return
panic(fmt.Sprintf("unknown listener type %v", v))
}
}

View File

@ -1,32 +1,25 @@
package cmd
import (
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
"github.com/problame/go-netssh"
"github.com/zrepl/zrepl/cmd/config"
"github.com/zrepl/zrepl/cmd/helpers"
"net"
"path"
"github.com/zrepl/zrepl/cmd/helpers"
)
type StdinserverListenerFactory struct {
ClientIdentity string `mapstructure:"client_identity"`
ClientIdentity string
sockpath string
}
func parseStdinserverListenerFactory(c JobParsingContext, i map[string]interface{}) (f *StdinserverListenerFactory, err error) {
func parseStdinserverListenerFactory(c config.Global, in config.StdinserverServer) (f *StdinserverListenerFactory, err error) {
f = &StdinserverListenerFactory{}
if err = mapstructure.Decode(i, f); err != nil {
return nil, errors.Wrap(err, "mapstructure error")
}
if !(len(f.ClientIdentity) > 0) {
err = errors.Errorf("must specify 'client_identity'")
return
f = &StdinserverListenerFactory{
ClientIdentity: in.ClientIdentity,
}
f.sockpath = path.Join(c.Global.Serve.Stdinserver.SockDir, f.ClientIdentity)
f.sockpath = path.Join(c.Serve.StdinServer.SockDir, f.ClientIdentity)
return
}

View File

@ -1,88 +1,26 @@
package cmd
import (
"crypto/tls"
"crypto/x509"
"net"
"time"
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/cmd/tlsconf"
"github.com/zrepl/zrepl/cmd/config"
)
type TCPListenerFactory struct {
Address string
tls bool
clientCA *x509.CertPool
serverCert tls.Certificate
clientCommonName string
Address string
}
func parseTCPListenerFactory(c JobParsingContext, i map[string]interface{}) (*TCPListenerFactory, error) {
func parseTCPListenerFactory(c config.Global, in config.TCPServe) (*TCPListenerFactory, error) {
var in struct {
Address string
TLS map[string]interface{}
lf := &TCPListenerFactory{
Address: in.Listen,
}
if err := mapstructure.Decode(i, &in); err != nil {
return nil, errors.Wrap(err, "mapstructure error")
}
lf := &TCPListenerFactory{}
if in.Address == "" {
return nil, errors.New("must specify field 'address'")
}
lf.Address = in.Address
if in.TLS != nil {
err := func(i map[string]interface{}) (err error) {
var in struct {
CA string
Cert string
Key string
ClientCN string `mapstructure:"client_cn"`
}
if err := mapstructure.Decode(i, &in); err != nil {
return errors.Wrap(err, "mapstructure error")
}
if in.CA == "" || in.Cert == "" || in.Key == "" || in.ClientCN == "" {
return errors.New("fields 'ca', 'cert', 'key' and 'client_cn' must be specified")
}
lf.clientCommonName = in.ClientCN
lf.clientCA, err = tlsconf.ParseCAFile(in.CA)
if err != nil {
return errors.Wrap(err, "cannot parse ca file")
}
lf.serverCert, err = tls.LoadX509KeyPair(in.Cert, in.Key)
if err != nil {
return errors.Wrap(err, "cannot parse cer/key pair")
}
lf.tls = true // mark success
return nil
}(in.TLS)
if err != nil {
return nil, errors.Wrap(err, "error parsing TLS config in field 'tls'")
}
}
return lf, nil
}
var TCPListenerHandshakeTimeout = 10 * time.Second // FIXME make configurable
func (f *TCPListenerFactory) Listen() (net.Listener, error) {
l, err := net.Listen("tcp", f.Address)
if !f.tls || err != nil {
return l, err
}
tl := tlsconf.NewClientAuthListener(l, f.clientCA, f.serverCert, f.clientCommonName, TCPListenerHandshakeTimeout)
return tl, nil
return net.Listen("tcp", f.Address)
}

78
cmd/config_serve_tls.go Normal file
View File

@ -0,0 +1,78 @@
package cmd
import (
"crypto/tls"
"crypto/x509"
"net"
"time"
"github.com/mitchellh/mapstructure"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/cmd/config"
"github.com/zrepl/zrepl/cmd/tlsconf"
)
type TCPListenerFactory struct {
Address string
tls bool
clientCA *x509.CertPool
serverCert tls.Certificate
clientCommonName string
}
func parseTCPListenerFactory(c config.Global, in config.TCPServe) (*TCPListenerFactory, error) {
lf := &TCPListenerFactory{
Address: in.Listen,
}
if in.TLS != nil {
err := func(i map[string]interface{}) (err error) {
var in struct {
CA string
Cert string
Key string
ClientCN string `mapstructure:"client_cn"`
}
if err := mapstructure.Decode(i, &in); err != nil {
return errors.Wrap(err, "mapstructure error")
}
if in.CA == "" || in.Cert == "" || in.Key == "" || in.ClientCN == "" {
return errors.New("fields 'ca', 'cert', 'key' and 'client_cn' must be specified")
}
lf.clientCommonName = in.ClientCN
lf.clientCA, err = tlsconf.ParseCAFile(in.CA)
if err != nil {
return errors.Wrap(err, "cannot parse ca file")
}
lf.serverCert, err = tls.LoadX509KeyPair(in.Cert, in.Key)
if err != nil {
return errors.Wrap(err, "cannot parse cer/key pair")
}
lf.tls = true // mark success
return nil
}(in.TLS)
if err != nil {
return nil, errors.Wrap(err, "error parsing TLS config in field 'tls'")
}
}
return lf, nil
}
var TCPListenerHandshakeTimeout = 10 * time.Second // FIXME make configurable
func (f *TCPListenerFactory) Listen() (net.Listener, error) {
l, err := net.Listen("tcp", f.Address)
if !f.tls || err != nil {
return l, err
}
tl := tlsconf.NewClientAuthListener(l, f.clientCA, f.serverCert, f.clientCommonName, TCPListenerHandshakeTimeout)
return tl, nil
}

View File

@ -7,14 +7,14 @@ import (
"fmt"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/zrepl/zrepl/cmd/daemon"
"github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/version"
"io"
golog "log"
"net"
"net/http"
"os"
"github.com/zrepl/zrepl/version"
"github.com/zrepl/zrepl/cmd/daemon"
)
var controlCmd = &cobra.Command{

View File

@ -5,13 +5,13 @@ import (
"fmt"
"github.com/spf13/cobra"
"github.com/zrepl/zrepl/cmd/config"
"github.com/zrepl/zrepl/cmd/daemon"
"github.com/zrepl/zrepl/cmd/daemon/job"
"github.com/zrepl/zrepl/logger"
"os"
"os/signal"
"syscall"
"time"
"github.com/zrepl/zrepl/cmd/daemon"
"github.com/zrepl/zrepl/cmd/daemon/job"
)
// daemonCmd represents the daemon command
@ -92,6 +92,7 @@ func doDaemon(cmd *cobra.Command, args []string) {
daemonJobs := make([]job.Job, 0, len(conf.Jobs))
for i := range conf.Jobs {
parseJob()
daemonJobs = append(daemonJobs, daemonJobAdaptor{conf.Jobs[i]})
}
daemon.Run(ctx, conf.Global.Control.Sockpath, conf.Global.logging.Outlets, daemonJobs)

View File

@ -5,18 +5,18 @@ import (
"context"
"encoding/json"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/cmd/daemon/job"
"github.com/zrepl/zrepl/cmd/helpers"
"github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/version"
"io"
"net"
"net/http"
"github.com/zrepl/zrepl/cmd/daemon/job"
"github.com/zrepl/zrepl/version"
"github.com/zrepl/zrepl/cmd/helpers"
)
type controlJob struct {
sockaddr *net.UnixAddr
jobs *jobs
jobs *jobs
}
func newControlJob(sockpath string, jobs *jobs) (j *controlJob, err error) {

View File

@ -2,19 +2,18 @@ package daemon
import (
"context"
"os"
"os/signal"
"syscall"
"sync"
"fmt"
"github.com/zrepl/zrepl/cmd/daemon/job"
"strings"
"github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/version"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"time"
)
func Run(ctx context.Context, controlSockpath string, outlets *logger.Outlets, confJobs []job.Job) {
ctx, cancel := context.WithCancel(ctx)
@ -59,10 +58,10 @@ func Run(ctx context.Context, controlSockpath string, outlets *logger.Outlets, c
}
select {
case <-jobs.wait():
log.Info("all jobs finished")
case <-ctx.Done():
log.WithError(ctx.Err()).Info("context finished")
case <-jobs.wait():
log.Info("all jobs finished")
case <-ctx.Done():
log.WithError(ctx.Err()).Info("context finished")
}
log.Info("daemon exiting")
}
@ -71,15 +70,15 @@ type jobs struct {
wg sync.WaitGroup
// m protects all fields below it
m sync.RWMutex
m sync.RWMutex
wakeups map[string]job.WakeupChan // by JobName
jobs map[string]job.Job
jobs map[string]job.Job
}
func newJobs() *jobs {
return &jobs{
wakeups: make(map[string]job.WakeupChan),
jobs: make(map[string]job.Job),
jobs: make(map[string]job.Job),
}
}
@ -102,7 +101,7 @@ func (s *jobs) status() map[string]interface{} {
defer s.m.RUnlock()
type res struct {
name string
name string
status interface{}
}
var wg sync.WaitGroup
@ -125,7 +124,7 @@ func (s *jobs) status() map[string]interface{} {
const (
jobNamePrometheus = "_prometheus"
jobNameControl = "_control"
jobNameControl = "_control"
)
func IsInternalJobName(s string) bool {

View File

@ -1,8 +1,8 @@
package job
import (
"github.com/zrepl/zrepl/logger"
"context"
"github.com/zrepl/zrepl/logger"
)
type Logger = logger.Logger

View File

@ -4,10 +4,10 @@ import (
"context"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/zrepl/zrepl/cmd/daemon/job"
"github.com/zrepl/zrepl/zfs"
"net"
"net/http"
"github.com/zrepl/zrepl/cmd/daemon/job"
)
type prometheusJob struct {
@ -48,7 +48,7 @@ func init() {
prometheus.MustRegister(prom.taskLogEntries)
}
func (j *prometheusJob) Name() string { return jobNamePrometheus }
func (j *prometheusJob) Name() string { return jobNamePrometheus }
func (j *prometheusJob) Status() interface{} { return nil }

View File

@ -1,10 +1,10 @@
package helpers
import (
"path/filepath"
"os"
"github.com/pkg/errors"
"net"
"os"
"path/filepath"
)
func PreparePrivateSockpath(sockpath string) error {

View File

@ -8,11 +8,10 @@ import (
)
type Pruner struct {
Now time.Time
DryRun bool
DatasetFilter zfs.DatasetFilter
SnapshotPrefix string
PrunePolicy PrunePolicy
Now time.Time
DryRun bool
DatasetFilter zfs.DatasetFilter
policies []PrunePolicy
}
type PruneResult struct {
@ -38,14 +37,14 @@ func (p *Pruner) filterFilesystems(ctx context.Context) (filesystems []*zfs.Data
func (p *Pruner) filterVersions(ctx context.Context, fs *zfs.DatasetPath) (fsversions []zfs.FilesystemVersion, stop bool) {
log := getLogger(ctx).WithField("fs", fs.ToString())
filter := NewPrefixFilter(p.SnapshotPrefix)
filter := AnyFSVFilter{}
fsversions, err := zfs.ZFSListFilesystemVersions(fs, filter)
if err != nil {
log.WithError(err).Error("error listing filesytem versions")
return nil, true
}
if len(fsversions) == 0 {
log.WithField("prefix", p.SnapshotPrefix).Info("no filesystem versions matching prefix")
log.Info("no filesystem versions matching prefix")
return nil, true
}
return fsversions, false

View File

@ -19,4 +19,3 @@ func init() {
func doVersion(cmd *cobra.Command, args []string) {
fmt.Println(version.NewZreplVersionInformation().String())
}

View File

@ -1,8 +1,8 @@
package version
import (
"runtime"
"fmt"
"runtime"
)
var (