mirror of
https://github.com/zrepl/zrepl.git
synced 2024-11-22 08:23:50 +01:00
gofmt
This commit is contained in:
parent
e082816de5
commit
e30ae972f4
@ -9,8 +9,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/problame/go-streamrpc"
|
"github.com/problame/go-streamrpc"
|
||||||
"github.com/zrepl/zrepl/util"
|
|
||||||
"github.com/zrepl/zrepl/logger"
|
"github.com/zrepl/zrepl/logger"
|
||||||
|
"github.com/zrepl/zrepl/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
type logNetConnConnecter struct {
|
type logNetConnConnecter struct {
|
||||||
@ -29,36 +29,35 @@ func (l logNetConnConnecter) Connect(ctx context.Context) (net.Conn, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type logListenerFactory struct {
|
type logListenerFactory struct {
|
||||||
ListenerFactory
|
ListenerFactory
|
||||||
ReadDump, WriteDump string
|
ReadDump, WriteDump string
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ ListenerFactory = logListenerFactory{}
|
var _ ListenerFactory = logListenerFactory{}
|
||||||
|
|
||||||
type logListener struct {
|
type logListener struct {
|
||||||
net.Listener
|
net.Listener
|
||||||
ReadDump, WriteDump string
|
ReadDump, WriteDump string
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ net.Listener = logListener{}
|
var _ net.Listener = logListener{}
|
||||||
|
|
||||||
func (m logListenerFactory) Listen() (net.Listener, error) {
|
func (m logListenerFactory) Listen() (net.Listener, error) {
|
||||||
l, err := m.ListenerFactory.Listen()
|
l, err := m.ListenerFactory.Listen()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return logListener{l, m.ReadDump, m.WriteDump}, nil
|
return logListener{l, m.ReadDump, m.WriteDump}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (l logListener) Accept() (net.Conn, error) {
|
func (l logListener) Accept() (net.Conn, error) {
|
||||||
conn, err := l.Listener.Accept()
|
conn, err := l.Listener.Accept()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return util.NewNetConnLogger(conn, l.ReadDump, l.WriteDump)
|
return util.NewNetConnLogger(conn, l.ReadDump, l.WriteDump)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
type netsshAddr struct{}
|
type netsshAddr struct{}
|
||||||
|
|
||||||
func (netsshAddr) Network() string { return "netssh" }
|
func (netsshAddr) Network() string { return "netssh" }
|
||||||
@ -87,7 +86,7 @@ type twoClassLogAdaptor struct {
|
|||||||
|
|
||||||
var _ streamrpc.Logger = twoClassLogAdaptor{}
|
var _ streamrpc.Logger = twoClassLogAdaptor{}
|
||||||
|
|
||||||
func (a twoClassLogAdaptor) Errorf(fmtStr string, args... interface{}) {
|
func (a twoClassLogAdaptor) Errorf(fmtStr string, args ...interface{}) {
|
||||||
const errorSuffix = ": %s"
|
const errorSuffix = ": %s"
|
||||||
if len(args) == 1 {
|
if len(args) == 1 {
|
||||||
if err, ok := args[0].(error); ok && strings.HasSuffix(fmtStr, errorSuffix) {
|
if err, ok := args[0].(error); ok && strings.HasSuffix(fmtStr, errorSuffix) {
|
||||||
@ -99,7 +98,6 @@ func (a twoClassLogAdaptor) Errorf(fmtStr string, args... interface{}) {
|
|||||||
a.Logger.Error(fmt.Sprintf(fmtStr, args...))
|
a.Logger.Error(fmt.Sprintf(fmtStr, args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a twoClassLogAdaptor) Infof(fmtStr string, args... interface{}) {
|
func (a twoClassLogAdaptor) Infof(fmtStr string, args ...interface{}) {
|
||||||
a.Logger.Info(fmt.Sprintf(fmtStr, args...))
|
a.Logger.Info(fmt.Sprintf(fmtStr, args...))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,8 +11,8 @@ import (
|
|||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/problame/go-netssh"
|
"github.com/problame/go-netssh"
|
||||||
"github.com/problame/go-streamrpc"
|
"github.com/problame/go-streamrpc"
|
||||||
"time"
|
|
||||||
"github.com/zrepl/zrepl/cmd/tlsconf"
|
"github.com/zrepl/zrepl/cmd/tlsconf"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SSHStdinserverConnecter struct {
|
type SSHStdinserverConnecter struct {
|
||||||
@ -51,12 +51,12 @@ func parseSSHStdinserverConnecter(i map[string]interface{}) (c *SSHStdinserverCo
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type netsshConnToConn struct { *netssh.SSHConn }
|
type netsshConnToConn struct{ *netssh.SSHConn }
|
||||||
|
|
||||||
var _ net.Conn = netsshConnToConn{}
|
var _ net.Conn = netsshConnToConn{}
|
||||||
|
|
||||||
func (netsshConnToConn) SetDeadline(dl time.Time) error { return nil }
|
func (netsshConnToConn) SetDeadline(dl time.Time) error { return nil }
|
||||||
func (netsshConnToConn) SetReadDeadline(dl time.Time) error { return nil }
|
func (netsshConnToConn) SetReadDeadline(dl time.Time) error { return nil }
|
||||||
func (netsshConnToConn) SetWriteDeadline(dl time.Time) error { return nil }
|
func (netsshConnToConn) SetWriteDeadline(dl time.Time) error { return nil }
|
||||||
|
|
||||||
func (c *SSHStdinserverConnecter) Connect(dialCtx context.Context) (net.Conn, error) {
|
func (c *SSHStdinserverConnecter) Connect(dialCtx context.Context) (net.Conn, error) {
|
||||||
|
@ -5,10 +5,10 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/zrepl/zrepl/logger"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"github.com/zrepl/zrepl/logger"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type ControlJob struct {
|
type ControlJob struct {
|
||||||
|
@ -6,10 +6,10 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"github.com/mitchellh/mapstructure"
|
"github.com/mitchellh/mapstructure"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/zrepl/zrepl/cmd/endpoint"
|
||||||
|
"github.com/zrepl/zrepl/replication"
|
||||||
"github.com/zrepl/zrepl/zfs"
|
"github.com/zrepl/zrepl/zfs"
|
||||||
"sync"
|
"sync"
|
||||||
"github.com/zrepl/zrepl/replication"
|
|
||||||
"github.com/zrepl/zrepl/cmd/endpoint"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type LocalJob struct {
|
type LocalJob struct {
|
||||||
|
@ -11,8 +11,8 @@ import (
|
|||||||
"github.com/mitchellh/mapstructure"
|
"github.com/mitchellh/mapstructure"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/problame/go-streamrpc"
|
"github.com/problame/go-streamrpc"
|
||||||
"github.com/zrepl/zrepl/replication"
|
|
||||||
"github.com/zrepl/zrepl/cmd/endpoint"
|
"github.com/zrepl/zrepl/cmd/endpoint"
|
||||||
|
"github.com/zrepl/zrepl/replication"
|
||||||
)
|
)
|
||||||
|
|
||||||
type PullJob struct {
|
type PullJob struct {
|
||||||
@ -28,7 +28,7 @@ type PullJob struct {
|
|||||||
Debug JobDebugSettings
|
Debug JobDebugSettings
|
||||||
|
|
||||||
task *Task
|
task *Task
|
||||||
rep *replication.Replication
|
rep *replication.Replication
|
||||||
}
|
}
|
||||||
|
|
||||||
func parsePullJob(c JobParsingContext, name string, i map[string]interface{}) (j *PullJob, err error) {
|
func parsePullJob(c JobParsingContext, name string, i map[string]interface{}) (j *PullJob, err error) {
|
||||||
@ -155,7 +155,6 @@ var STREAMRPC_CONFIG = &streamrpc.ConnConfig{ // FIXME oversight and configurabi
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
func (j *PullJob) doRun(ctx context.Context) {
|
func (j *PullJob) doRun(ctx context.Context) {
|
||||||
|
|
||||||
j.task.Enter("run")
|
j.task.Enter("run")
|
||||||
@ -184,7 +183,7 @@ func (j *PullJob) doRun(ctx context.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctx = replication.WithLogger(ctx, replicationLogAdaptor{j.task.Log().WithField("subsystem", "replication")})
|
ctx = replication.WithLogger(ctx, replicationLogAdaptor{j.task.Log().WithField("subsystem", "replication")})
|
||||||
ctx = streamrpc.ContextWithLogger(ctx, streamrpcLogAdaptor{j.task.Log().WithField("subsystem", "rpc.protocol")})
|
ctx = streamrpc.ContextWithLogger(ctx, streamrpcLogAdaptor{j.task.Log().WithField("subsystem", "rpc.protocol")})
|
||||||
ctx = endpoint.WithLogger(ctx, j.task.Log().WithField("subsystem", "rpc.endpoint"))
|
ctx = endpoint.WithLogger(ctx, j.task.Log().WithField("subsystem", "rpc.endpoint"))
|
||||||
|
|
||||||
j.rep = replication.NewReplication()
|
j.rep = replication.NewReplication()
|
||||||
|
@ -7,8 +7,8 @@ import (
|
|||||||
"github.com/mitchellh/mapstructure"
|
"github.com/mitchellh/mapstructure"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/problame/go-streamrpc"
|
"github.com/problame/go-streamrpc"
|
||||||
"net"
|
|
||||||
"github.com/zrepl/zrepl/cmd/endpoint"
|
"github.com/zrepl/zrepl/cmd/endpoint"
|
||||||
|
"net"
|
||||||
)
|
)
|
||||||
|
|
||||||
type SourceJob struct {
|
type SourceJob struct {
|
||||||
@ -160,7 +160,7 @@ func (j *SourceJob) serve(ctx context.Context, task *Task) {
|
|||||||
connChan := make(chan connChanMsg)
|
connChan := make(chan connChanMsg)
|
||||||
|
|
||||||
// Serve connections until interrupted or error
|
// Serve connections until interrupted or error
|
||||||
outer:
|
outer:
|
||||||
for {
|
for {
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -1,15 +1,15 @@
|
|||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/tls"
|
||||||
|
"crypto/x509"
|
||||||
"github.com/mattn/go-isatty"
|
"github.com/mattn/go-isatty"
|
||||||
"github.com/mitchellh/mapstructure"
|
"github.com/mitchellh/mapstructure"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/zrepl/zrepl/cmd/tlsconf"
|
||||||
"github.com/zrepl/zrepl/logger"
|
"github.com/zrepl/zrepl/logger"
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
|
||||||
"github.com/zrepl/zrepl/cmd/tlsconf"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type LoggingConfig struct {
|
type LoggingConfig struct {
|
||||||
|
@ -6,8 +6,8 @@ import (
|
|||||||
|
|
||||||
"github.com/mitchellh/mapstructure"
|
"github.com/mitchellh/mapstructure"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/zrepl/zrepl/zfs"
|
|
||||||
"github.com/zrepl/zrepl/cmd/endpoint"
|
"github.com/zrepl/zrepl/cmd/endpoint"
|
||||||
|
"github.com/zrepl/zrepl/zfs"
|
||||||
)
|
)
|
||||||
|
|
||||||
type DatasetMapFilter struct {
|
type DatasetMapFilter struct {
|
||||||
@ -210,8 +210,8 @@ func (m DatasetMapFilter) Invert() (endpoint.FSMap, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
inv.entries[0] = datasetMapFilterEntry{
|
inv.entries[0] = datasetMapFilterEntry{
|
||||||
path: mp,
|
path: mp,
|
||||||
mapping: e.path.ToString(),
|
mapping: e.path.ToString(),
|
||||||
subtreeMatch: e.subtreeMatch,
|
subtreeMatch: e.subtreeMatch,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -7,12 +7,12 @@ import (
|
|||||||
yaml "github.com/go-yaml/yaml"
|
yaml "github.com/go-yaml/yaml"
|
||||||
"github.com/mitchellh/mapstructure"
|
"github.com/mitchellh/mapstructure"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/problame/go-streamrpc"
|
||||||
|
"github.com/zrepl/zrepl/replication"
|
||||||
"os"
|
"os"
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"time"
|
"time"
|
||||||
"github.com/zrepl/zrepl/replication"
|
|
||||||
"github.com/problame/go-streamrpc"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var ConfigFileDefaultLocations []string = []string{
|
var ConfigFileDefaultLocations []string = []string{
|
||||||
|
@ -22,8 +22,8 @@ type TCPListenerFactory struct {
|
|||||||
func parseTCPListenerFactory(c JobParsingContext, i map[string]interface{}) (*TCPListenerFactory, error) {
|
func parseTCPListenerFactory(c JobParsingContext, i map[string]interface{}) (*TCPListenerFactory, error) {
|
||||||
|
|
||||||
var in struct {
|
var in struct {
|
||||||
Address string
|
Address string
|
||||||
TLS map[string]interface{}
|
TLS map[string]interface{}
|
||||||
}
|
}
|
||||||
if err := mapstructure.Decode(i, &in); err != nil {
|
if err := mapstructure.Decode(i, &in); err != nil {
|
||||||
return nil, errors.Wrap(err, "mapstructure error")
|
return nil, errors.Wrap(err, "mapstructure error")
|
||||||
@ -56,7 +56,7 @@ func parseTCPListenerFactory(c JobParsingContext, i map[string]interface{}) (*TC
|
|||||||
|
|
||||||
lf.clientCA, err = tlsconf.ParseCAFile(in.CA)
|
lf.clientCA, err = tlsconf.ParseCAFile(in.CA)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err,"cannot parse ca file")
|
return errors.Wrap(err, "cannot parse ca file")
|
||||||
}
|
}
|
||||||
|
|
||||||
lf.serverCert, err = tls.LoadX509KeyPair(in.Cert, in.Key)
|
lf.serverCert, err = tls.LoadX509KeyPair(in.Cert, in.Key)
|
||||||
|
@ -136,7 +136,7 @@ func TestDatasetMapFilter(t *testing.T) {
|
|||||||
expectMapping(map1, "q/r", "root4/1/2/r")
|
expectMapping(map1, "q/r", "root4/1/2/r")
|
||||||
|
|
||||||
map2 := map[string]string{ // identity mapping
|
map2 := map[string]string{ // identity mapping
|
||||||
"<":"",
|
"<": "",
|
||||||
}
|
}
|
||||||
expectMapping(map2, "foo/bar", "foo/bar")
|
expectMapping(map2, "foo/bar", "foo/bar")
|
||||||
|
|
||||||
|
@ -2,20 +2,20 @@
|
|||||||
package endpoint
|
package endpoint
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/zrepl/zrepl/replication/pdu"
|
|
||||||
"github.com/problame/go-streamrpc"
|
|
||||||
"github.com/zrepl/zrepl/zfs"
|
|
||||||
"io"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/golang/protobuf/proto"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/problame/go-streamrpc"
|
||||||
"github.com/zrepl/zrepl/replication"
|
"github.com/zrepl/zrepl/replication"
|
||||||
|
"github.com/zrepl/zrepl/replication/pdu"
|
||||||
|
"github.com/zrepl/zrepl/zfs"
|
||||||
|
"io"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Sender implements replication.ReplicationEndpoint for a sending side
|
// Sender implements replication.ReplicationEndpoint for a sending side
|
||||||
type Sender struct {
|
type Sender struct {
|
||||||
FSFilter zfs.DatasetFilter
|
FSFilter zfs.DatasetFilter
|
||||||
FilesystemVersionFilter zfs.FilesystemVersionFilter
|
FilesystemVersionFilter zfs.FilesystemVersionFilter
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -87,16 +87,16 @@ type FSFilter interface {
|
|||||||
// FIXME: can we get away without error types here?
|
// FIXME: can we get away without error types here?
|
||||||
type FSMap interface {
|
type FSMap interface {
|
||||||
FSFilter
|
FSFilter
|
||||||
Map(path *zfs.DatasetPath) (*zfs.DatasetPath,error)
|
Map(path *zfs.DatasetPath) (*zfs.DatasetPath, error)
|
||||||
Invert() (FSMap,error)
|
Invert() (FSMap, error)
|
||||||
AsFilter() (FSFilter)
|
AsFilter() FSFilter
|
||||||
}
|
}
|
||||||
|
|
||||||
// Receiver implements replication.ReplicationEndpoint for a receiving side
|
// Receiver implements replication.ReplicationEndpoint for a receiving side
|
||||||
type Receiver struct {
|
type Receiver struct {
|
||||||
fsmapInv FSMap
|
fsmapInv FSMap
|
||||||
fsmap FSMap
|
fsmap FSMap
|
||||||
fsvf zfs.FilesystemVersionFilter
|
fsvf zfs.FilesystemVersionFilter
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewReceiver(fsmap FSMap, fsvf zfs.FilesystemVersionFilter) (*Receiver, error) {
|
func NewReceiver(fsmap FSMap, fsvf zfs.FilesystemVersionFilter) (*Receiver, error) {
|
||||||
@ -219,12 +219,11 @@ func (e *Receiver) Receive(ctx context.Context, req *pdu.ReceiveReq, sendStream
|
|||||||
// RPC STUBS
|
// RPC STUBS
|
||||||
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
// =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
|
||||||
|
|
||||||
|
|
||||||
const (
|
const (
|
||||||
RPCListFilesystems = "ListFilesystems"
|
RPCListFilesystems = "ListFilesystems"
|
||||||
RPCListFilesystemVersions = "ListFilesystemVersions"
|
RPCListFilesystemVersions = "ListFilesystemVersions"
|
||||||
RPCReceive = "Receive"
|
RPCReceive = "Receive"
|
||||||
RPCSend = "Send"
|
RPCSend = "Send"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Remote implements an endpoint stub that uses streamrpc as a transport.
|
// Remote implements an endpoint stub that uses streamrpc as a transport.
|
||||||
@ -295,13 +294,13 @@ func (s Remote) Send(ctx context.Context, r *pdu.SendReq) (*pdu.SendRes, io.Read
|
|||||||
var res pdu.SendRes
|
var res pdu.SendRes
|
||||||
if err := proto.Unmarshal(rb.Bytes(), &res); err != nil {
|
if err := proto.Unmarshal(rb.Bytes(), &res); err != nil {
|
||||||
rs.Close()
|
rs.Close()
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
// FIXME make sure the consumer will read the reader until the end...
|
// FIXME make sure the consumer will read the reader until the end...
|
||||||
return &res, rs, nil
|
return &res, rs, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s Remote) Receive(ctx context.Context, r *pdu.ReceiveReq, sendStream io.ReadCloser) (error) {
|
func (s Remote) Receive(ctx context.Context, r *pdu.ReceiveReq, sendStream io.ReadCloser) error {
|
||||||
defer sendStream.Close()
|
defer sendStream.Close()
|
||||||
b, err := proto.Marshal(r)
|
b, err := proto.Marshal(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -414,6 +413,6 @@ func (a *Handler) Handle(ctx context.Context, endpoint string, reqStructured *by
|
|||||||
return bytes.NewBuffer(b), nil, err
|
return bytes.NewBuffer(b), nil, err
|
||||||
|
|
||||||
}
|
}
|
||||||
Err:
|
Err:
|
||||||
return nil, nil, errors.New("no handler for given endpoint")
|
return nil, nil, errors.New("no handler for given endpoint")
|
||||||
}
|
}
|
||||||
|
@ -43,7 +43,7 @@ func NewClientAuthListener(
|
|||||||
}
|
}
|
||||||
|
|
||||||
tlsConf := tls.Config{
|
tlsConf := tls.Config{
|
||||||
Certificates: []tls.Certificate{serverCert},
|
Certificates: []tls.Certificate{serverCert},
|
||||||
ClientCAs: ca,
|
ClientCAs: ca,
|
||||||
ClientAuth: tls.RequireAndVerifyClientCert,
|
ClientAuth: tls.RequireAndVerifyClientCert,
|
||||||
PreferServerCipherSuites: true,
|
PreferServerCipherSuites: true,
|
||||||
@ -114,7 +114,7 @@ func ClientAuthClient(serverName string, rootCA *x509.CertPool, clientCert tls.C
|
|||||||
tlsConfig := &tls.Config{
|
tlsConfig := &tls.Config{
|
||||||
Certificates: []tls.Certificate{clientCert},
|
Certificates: []tls.Certificate{clientCert},
|
||||||
RootCAs: rootCA,
|
RootCAs: rootCA,
|
||||||
ServerName: serverName,
|
ServerName: serverName,
|
||||||
}
|
}
|
||||||
tlsConfig.BuildNameToCertificate()
|
tlsConfig.BuildNameToCertificate()
|
||||||
return tlsConfig, nil
|
return tlsConfig, nil
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
package logger
|
package logger
|
||||||
|
|
||||||
|
type nullLogger struct{}
|
||||||
type nullLogger struct {}
|
|
||||||
|
|
||||||
var _ Logger = nullLogger{}
|
var _ Logger = nullLogger{}
|
||||||
|
|
||||||
@ -9,14 +8,13 @@ func NewNullLogger() Logger {
|
|||||||
return nullLogger{}
|
return nullLogger{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n nullLogger) WithOutlet(outlet Outlet, level Level) Logger { return n }
|
func (n nullLogger) WithOutlet(outlet Outlet, level Level) Logger { return n }
|
||||||
func (n nullLogger) ReplaceField(field string, val interface{}) Logger { return n }
|
func (n nullLogger) ReplaceField(field string, val interface{}) Logger { return n }
|
||||||
func (n nullLogger) WithField(field string, val interface{}) Logger { return n }
|
func (n nullLogger) WithField(field string, val interface{}) Logger { return n }
|
||||||
func (n nullLogger) WithFields(fields Fields) Logger { return n }
|
func (n nullLogger) WithFields(fields Fields) Logger { return n }
|
||||||
func (n nullLogger) WithError(err error) Logger { return n }
|
func (n nullLogger) WithError(err error) Logger { return n }
|
||||||
func (nullLogger) Debug(msg string) {}
|
func (nullLogger) Debug(msg string) {}
|
||||||
func (nullLogger) Info(msg string) {}
|
func (nullLogger) Info(msg string) {}
|
||||||
func (nullLogger) Warn(msg string) {}
|
func (nullLogger) Warn(msg string) {}
|
||||||
func (nullLogger) Error(msg string) {}
|
func (nullLogger) Error(msg string) {}
|
||||||
func (nullLogger) Printf(format string, args ...interface{}) {}
|
func (nullLogger) Printf(format string, args ...interface{}) {}
|
||||||
|
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
package replication
|
package replication
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/zrepl/zrepl/logger"
|
|
||||||
"context"
|
"context"
|
||||||
|
"github.com/zrepl/zrepl/logger"
|
||||||
"github.com/zrepl/zrepl/replication/fsrep"
|
"github.com/zrepl/zrepl/replication/fsrep"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -27,4 +27,3 @@ func getLogger(ctx context.Context) Logger {
|
|||||||
}
|
}
|
||||||
return l
|
return l
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -12,8 +12,8 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/zrepl/zrepl/replication/pdu"
|
|
||||||
"github.com/zrepl/zrepl/logger"
|
"github.com/zrepl/zrepl/logger"
|
||||||
|
"github.com/zrepl/zrepl/replication/pdu"
|
||||||
)
|
)
|
||||||
|
|
||||||
type contextKey int
|
type contextKey int
|
||||||
@ -51,18 +51,17 @@ type StepReport struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Report struct {
|
type Report struct {
|
||||||
Filesystem string
|
Filesystem string
|
||||||
Status string
|
Status string
|
||||||
Problem string
|
Problem string
|
||||||
Completed,Pending []*StepReport
|
Completed, Pending []*StepReport
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//go:generate stringer -type=State
|
//go:generate stringer -type=State
|
||||||
type State uint
|
type State uint
|
||||||
|
|
||||||
const (
|
const (
|
||||||
Ready State = 1 << iota
|
Ready State = 1 << iota
|
||||||
RetryWait
|
RetryWait
|
||||||
PermanentError
|
PermanentError
|
||||||
Completed
|
Completed
|
||||||
@ -123,7 +122,7 @@ func (b *ReplicationBuilder) Done() (r *Replication) {
|
|||||||
} else {
|
} else {
|
||||||
b.r.state = Completed
|
b.r.state = Completed
|
||||||
}
|
}
|
||||||
r = b.r
|
r = b.r
|
||||||
b.r = nil
|
b.r = nil
|
||||||
return r
|
return r
|
||||||
}
|
}
|
||||||
@ -136,12 +135,11 @@ func NewReplicationWithPermanentError(fs string, err error) *Replication {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//go:generate stringer -type=StepState
|
//go:generate stringer -type=StepState
|
||||||
type StepState uint
|
type StepState uint
|
||||||
|
|
||||||
const (
|
const (
|
||||||
StepReady StepState = 1 << iota
|
StepReady StepState = 1 << iota
|
||||||
StepRetry
|
StepRetry
|
||||||
StepPermanentError
|
StepPermanentError
|
||||||
StepCompleted
|
StepCompleted
|
||||||
@ -387,4 +385,3 @@ func (step *ReplicationStep) Report() *StepReport {
|
|||||||
}
|
}
|
||||||
return &rep
|
return &rep
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,8 +1,8 @@
|
|||||||
package queue
|
package queue
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"time"
|
|
||||||
"sort"
|
"sort"
|
||||||
|
"time"
|
||||||
|
|
||||||
. "github.com/zrepl/zrepl/replication/fsrep"
|
. "github.com/zrepl/zrepl/replication/fsrep"
|
||||||
)
|
)
|
||||||
@ -27,12 +27,12 @@ func NewReplicationQueue() *ReplicationQueue {
|
|||||||
func (q ReplicationQueue) Len() int { return len(q) }
|
func (q ReplicationQueue) Len() int { return len(q) }
|
||||||
func (q ReplicationQueue) Swap(i, j int) { q[i], q[j] = q[j], q[i] }
|
func (q ReplicationQueue) Swap(i, j int) { q[i], q[j] = q[j], q[i] }
|
||||||
|
|
||||||
type lessmapEntry struct{
|
type lessmapEntry struct {
|
||||||
prio int
|
prio int
|
||||||
less func(a,b *replicationQueueItem) bool
|
less func(a, b *replicationQueueItem) bool
|
||||||
}
|
}
|
||||||
|
|
||||||
var lessmap = map[State]lessmapEntry {
|
var lessmap = map[State]lessmapEntry{
|
||||||
Ready: {
|
Ready: {
|
||||||
prio: 0,
|
prio: 0,
|
||||||
less: func(a, b *replicationQueueItem) bool {
|
less: func(a, b *replicationQueueItem) bool {
|
||||||
@ -94,7 +94,7 @@ func (q *ReplicationQueue) GetNext() (done []*Replication, next *ReplicationQueu
|
|||||||
|
|
||||||
func (q *ReplicationQueue) Add(fsr *Replication) {
|
func (q *ReplicationQueue) Add(fsr *Replication) {
|
||||||
*q = append(*q, &replicationQueueItem{
|
*q = append(*q, &replicationQueueItem{
|
||||||
fsr: fsr,
|
fsr: fsr,
|
||||||
state: fsr.State(),
|
state: fsr.State(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -10,17 +10,17 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/zrepl/zrepl/replication/pdu"
|
|
||||||
"github.com/zrepl/zrepl/replication/fsrep"
|
"github.com/zrepl/zrepl/replication/fsrep"
|
||||||
. "github.com/zrepl/zrepl/replication/internal/queue"
|
|
||||||
. "github.com/zrepl/zrepl/replication/internal/diff"
|
. "github.com/zrepl/zrepl/replication/internal/diff"
|
||||||
|
. "github.com/zrepl/zrepl/replication/internal/queue"
|
||||||
|
"github.com/zrepl/zrepl/replication/pdu"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate stringer -type=State
|
//go:generate stringer -type=State
|
||||||
type State uint
|
type State uint
|
||||||
|
|
||||||
const (
|
const (
|
||||||
Planning State = 1 << iota
|
Planning State = 1 << iota
|
||||||
PlanningError
|
PlanningError
|
||||||
Working
|
Working
|
||||||
WorkingWait
|
WorkingWait
|
||||||
@ -77,7 +77,6 @@ type Report struct {
|
|||||||
Active *fsrep.Report
|
Active *fsrep.Report
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
func NewReplication() *Replication {
|
func NewReplication() *Replication {
|
||||||
r := Replication{
|
r := Replication{
|
||||||
state: Planning,
|
state: Planning,
|
||||||
@ -101,7 +100,6 @@ type Receiver interface {
|
|||||||
fsrep.Receiver
|
fsrep.Receiver
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
type FilteredError struct{ fs string }
|
type FilteredError struct{ fs string }
|
||||||
|
|
||||||
func NewFilteredError(fs string) *FilteredError {
|
func NewFilteredError(fs string) *FilteredError {
|
||||||
@ -110,7 +108,6 @@ func NewFilteredError(fs string) *FilteredError {
|
|||||||
|
|
||||||
func (f FilteredError) Error() string { return "endpoint does not allow access to filesystem " + f.fs }
|
func (f FilteredError) Error() string { return "endpoint does not allow access to filesystem " + f.fs }
|
||||||
|
|
||||||
|
|
||||||
type updater func(func(*Replication)) (newState State)
|
type updater func(func(*Replication)) (newState State)
|
||||||
type state func(ctx context.Context, sender Sender, receiver Receiver, u updater) state
|
type state func(ctx context.Context, sender Sender, receiver Receiver, u updater) state
|
||||||
|
|
||||||
@ -381,4 +378,3 @@ func (r *Replication) Report() *Report {
|
|||||||
|
|
||||||
return &rep
|
return &rep
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -33,11 +33,11 @@ func FilesystemVersionFromZFS(fsv zfs.FilesystemVersion) *FilesystemVersion {
|
|||||||
panic("unknown fsv.Type: " + fsv.Type)
|
panic("unknown fsv.Type: " + fsv.Type)
|
||||||
}
|
}
|
||||||
return &FilesystemVersion{
|
return &FilesystemVersion{
|
||||||
Type: t,
|
Type: t,
|
||||||
Name: fsv.Name,
|
Name: fsv.Name,
|
||||||
Guid: fsv.Guid,
|
Guid: fsv.Guid,
|
||||||
CreateTXG: fsv.CreateTXG,
|
CreateTXG: fsv.CreateTXG,
|
||||||
Creation: fsv.Creation.Format(time.RFC3339),
|
Creation: fsv.Creation.Format(time.RFC3339),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -64,10 +64,10 @@ func (v *FilesystemVersion) ZFSFilesystemVersion() *zfs.FilesystemVersion {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return &zfs.FilesystemVersion{
|
return &zfs.FilesystemVersion{
|
||||||
Type: v.Type.ZFSVersionType(),
|
Type: v.Type.ZFSVersionType(),
|
||||||
Name: v.Name,
|
Name: v.Name,
|
||||||
Guid: v.Guid,
|
Guid: v.Guid,
|
||||||
CreateTXG: v.CreateTXG,
|
CreateTXG: v.CreateTXG,
|
||||||
Creation: ct,
|
Creation: ct,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1,15 +1,15 @@
|
|||||||
package pdu
|
package pdu
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestFilesystemVersion_RelName(t *testing.T) {
|
func TestFilesystemVersion_RelName(t *testing.T) {
|
||||||
|
|
||||||
type TestCase struct {
|
type TestCase struct {
|
||||||
In FilesystemVersion
|
In FilesystemVersion
|
||||||
Out string
|
Out string
|
||||||
Panic bool
|
Panic bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -56,7 +56,7 @@ func TestFilesystemVersion_ZFSFilesystemVersion(t *testing.T) {
|
|||||||
emptyZFS := empty.ZFSFilesystemVersion()
|
emptyZFS := empty.ZFSFilesystemVersion()
|
||||||
assert.Zero(t, emptyZFS.Creation)
|
assert.Zero(t, emptyZFS.Creation)
|
||||||
|
|
||||||
dateInvalid := &FilesystemVersion{Creation:"foobar"}
|
dateInvalid := &FilesystemVersion{Creation: "foobar"}
|
||||||
assert.Panics(t, func() {
|
assert.Panics(t, func() {
|
||||||
dateInvalid.ZFSFilesystemVersion()
|
dateInvalid.ZFSFilesystemVersion()
|
||||||
})
|
})
|
||||||
|
@ -9,4 +9,3 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const DEFAULT_INITIAL_REPL_POLICY = InitialReplPolicyMostRecent
|
const DEFAULT_INITIAL_REPL_POLICY = InitialReplPolicyMostRecent
|
||||||
|
|
||||||
|
@ -2,18 +2,18 @@ package util
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"time"
|
|
||||||
"sync"
|
"sync"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type contextWithOptionalDeadline struct {
|
type contextWithOptionalDeadline struct {
|
||||||
context.Context
|
context.Context
|
||||||
|
|
||||||
m sync.Mutex
|
m sync.Mutex
|
||||||
deadline time.Time
|
deadline time.Time
|
||||||
|
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *contextWithOptionalDeadline) Deadline() (deadline time.Time, ok bool) {
|
func (c *contextWithOptionalDeadline) Deadline() (deadline time.Time, ok bool) {
|
||||||
@ -28,7 +28,7 @@ func (c *contextWithOptionalDeadline) Err() error {
|
|||||||
return c.err
|
return c.err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *contextWithOptionalDeadline) Done() (<-chan struct{}) {
|
func (c *contextWithOptionalDeadline) Done() <-chan struct{} {
|
||||||
return c.done
|
return c.done
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -37,8 +37,8 @@ func ContextWithOptionalDeadline(pctx context.Context) (ctx context.Context, enf
|
|||||||
// mctx can only be cancelled by cancelMctx, not by a potential cancel of pctx
|
// mctx can only be cancelled by cancelMctx, not by a potential cancel of pctx
|
||||||
rctx := &contextWithOptionalDeadline{
|
rctx := &contextWithOptionalDeadline{
|
||||||
Context: pctx,
|
Context: pctx,
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
err: nil,
|
err: nil,
|
||||||
}
|
}
|
||||||
enforceDeadline = func(deadline time.Time) {
|
enforceDeadline = func(deadline time.Time) {
|
||||||
|
|
||||||
|
@ -1,11 +1,11 @@
|
|||||||
package util
|
package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
|
||||||
"context"
|
"context"
|
||||||
"time"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestContextWithOptionalDeadline(t *testing.T) {
|
func TestContextWithOptionalDeadline(t *testing.T) {
|
||||||
@ -18,14 +18,14 @@ func TestContextWithOptionalDeadline(t *testing.T) {
|
|||||||
var cancellationError error
|
var cancellationError error
|
||||||
go func() {
|
go func() {
|
||||||
select {
|
select {
|
||||||
case <- cctx.Done():
|
case <-cctx.Done():
|
||||||
receivedCancellation = time.Now()
|
receivedCancellation = time.Now()
|
||||||
cancellationError = cctx.Err()
|
cancellationError = cctx.Err()
|
||||||
case <- time.After(600*time.Millisecond):
|
case <-time.After(600 * time.Millisecond):
|
||||||
t.Fatalf("should have been cancelled by deadline")
|
t.Fatalf("should have been cancelled by deadline")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
time.Sleep(100*time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
if !receivedCancellation.IsZero() {
|
if !receivedCancellation.IsZero() {
|
||||||
t.Fatalf("no enforcement means no cancellation")
|
t.Fatalf("no enforcement means no cancellation")
|
||||||
}
|
}
|
||||||
@ -33,11 +33,11 @@ func TestContextWithOptionalDeadline(t *testing.T) {
|
|||||||
dl, ok := cctx.Deadline()
|
dl, ok := cctx.Deadline()
|
||||||
require.False(t, ok)
|
require.False(t, ok)
|
||||||
require.Zero(t, dl)
|
require.Zero(t, dl)
|
||||||
enforceDeadline(begin.Add(200*time.Millisecond))
|
enforceDeadline(begin.Add(200 * time.Millisecond))
|
||||||
// second call must be ignored, i.e. we expect the deadline to be at begin+200ms, not begin+400ms
|
// second call must be ignored, i.e. we expect the deadline to be at begin+200ms, not begin+400ms
|
||||||
enforceDeadline(begin.Add(400*time.Millisecond))
|
enforceDeadline(begin.Add(400 * time.Millisecond))
|
||||||
|
|
||||||
time.Sleep(300*time.Millisecond) // 100ms margin for scheduler
|
time.Sleep(300 * time.Millisecond) // 100ms margin for scheduler
|
||||||
if receivedCancellation.Sub(begin) > 250*time.Millisecond {
|
if receivedCancellation.Sub(begin) > 250*time.Millisecond {
|
||||||
t.Fatalf("cancellation is beyond acceptable scheduler latency")
|
t.Fatalf("cancellation is beyond acceptable scheduler latency")
|
||||||
}
|
}
|
||||||
@ -47,7 +47,7 @@ func TestContextWithOptionalDeadline(t *testing.T) {
|
|||||||
func TestContextWithOptionalDeadlineNegativeDeadline(t *testing.T) {
|
func TestContextWithOptionalDeadlineNegativeDeadline(t *testing.T) {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
cctx, enforceDeadline := ContextWithOptionalDeadline(ctx)
|
cctx, enforceDeadline := ContextWithOptionalDeadline(ctx)
|
||||||
enforceDeadline(time.Now().Add(-10*time.Second))
|
enforceDeadline(time.Now().Add(-10 * time.Second))
|
||||||
select {
|
select {
|
||||||
case <-cctx.Done():
|
case <-cctx.Done():
|
||||||
default:
|
default:
|
||||||
@ -62,10 +62,10 @@ func TestContextWithOptionalDeadlineParentCancellation(t *testing.T) {
|
|||||||
|
|
||||||
// 0 ms
|
// 0 ms
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
enforceDeadline(start.Add(400*time.Millisecond))
|
enforceDeadline(start.Add(400 * time.Millisecond))
|
||||||
time.Sleep(100*time.Millisecond)
|
time.Sleep(100 * time.Millisecond)
|
||||||
cancel() // cancel @ ~100ms
|
cancel() // cancel @ ~100ms
|
||||||
time.Sleep(100*time.Millisecond) // give 100ms time to propagate cancel
|
time.Sleep(100 * time.Millisecond) // give 100ms time to propagate cancel
|
||||||
// @ ~200ms
|
// @ ~200ms
|
||||||
select {
|
select {
|
||||||
case <-cctx.Done():
|
case <-cctx.Done():
|
||||||
|
@ -14,7 +14,7 @@ type NetConnLogger struct {
|
|||||||
|
|
||||||
func NewNetConnLogger(conn net.Conn, readlog, writelog string) (l *NetConnLogger, err error) {
|
func NewNetConnLogger(conn net.Conn, readlog, writelog string) (l *NetConnLogger, err error) {
|
||||||
l = &NetConnLogger{
|
l = &NetConnLogger{
|
||||||
Conn: conn,
|
Conn: conn,
|
||||||
}
|
}
|
||||||
flags := os.O_CREATE | os.O_WRONLY
|
flags := os.O_CREATE | os.O_WRONLY
|
||||||
if readlog != "" {
|
if readlog != "" {
|
||||||
|
@ -5,9 +5,9 @@ import (
|
|||||||
"crypto/sha512"
|
"crypto/sha512"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"sort"
|
"sort"
|
||||||
"io"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type fsbyCreateTXG []FilesystemVersion
|
type fsbyCreateTXG []FilesystemVersion
|
||||||
@ -255,7 +255,7 @@ func ZFSIsPlaceholderFilesystem(p *DatasetPath) (isPlaceholder bool, err error)
|
|||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
isPlaceholder, _ = IsPlaceholder(p, props.Get(ZREPL_PLACEHOLDER_PROPERTY_NAME))
|
isPlaceholder, _ = IsPlaceholder(p, props.Get(ZREPL_PLACEHOLDER_PROPERTY_NAME))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,7 +39,7 @@ func ZFSListMappingProperties(filter DatasetFilter, properties []string) (datase
|
|||||||
panic("properties must not contain 'name'")
|
panic("properties must not contain 'name'")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
newProps := make([]string, len(properties) + 1)
|
newProps := make([]string, len(properties)+1)
|
||||||
newProps[0] = "name"
|
newProps[0] = "name"
|
||||||
copy(newProps[1:], properties)
|
copy(newProps[1:], properties)
|
||||||
properties = newProps
|
properties = newProps
|
||||||
@ -69,7 +69,7 @@ func ZFSListMappingProperties(filter DatasetFilter, properties []string) (datase
|
|||||||
}
|
}
|
||||||
if pass {
|
if pass {
|
||||||
datasets = append(datasets, ZFSListMappingPropertiesResult{
|
datasets = append(datasets, ZFSListMappingPropertiesResult{
|
||||||
Path: path,
|
Path: path,
|
||||||
Fields: r.Fields[1:],
|
Fields: r.Fields[1:],
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -78,5 +78,3 @@ func ZFSListMappingProperties(filter DatasetFilter, properties []string) (datase
|
|||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -6,10 +6,10 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"io"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
"io"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type VersionType string
|
type VersionType string
|
||||||
|
Loading…
Reference in New Issue
Block a user