run go1.19 gofmt and make adjustments as needed

(Go 1.19 expanded doc comment syntax)
This commit is contained in:
Christian Schwarz 2022-10-24 22:09:02 +02:00
parent 6c87bdb9fb
commit a6aa610165
16 changed files with 165 additions and 177 deletions

View File

@ -44,6 +44,7 @@ func TestSampleConfigsAreParsedWithoutErrors(t *testing.T) {
}
// template must be a template/text template with a single '{{ . }}' as placeholder for val
//
//nolint:deadcode,unused
func testValidConfigTemplate(t *testing.T, tmpl string, val string) *Config {
tmp, err := template.New("master").Parse(tmpl)

View File

@ -5,7 +5,7 @@
//
// This package also provides all supported hook type implementations and abstractions around them.
//
// Use For Other Kinds Of ExpectStepReports
// # Use For Other Kinds Of ExpectStepReports
//
// This package REQUIRES REFACTORING before it can be used for other activities than snapshots, e.g. pre- and post-replication:
//
@ -15,7 +15,7 @@
// The hook implementations should move out of this package.
// However, there is a lot of tight coupling which to untangle isn't worth it ATM.
//
// How This Package Is Used By Package Snapper
// # How This Package Is Used By Package Snapper
//
// Deserialize a config.List using ListFromConfig().
// Then it MUST filter the list to only contain hooks for a particular filesystem using
@ -30,5 +30,4 @@
// Command hooks make it available in the environment variable ZREPL_DRYRUN.
//
// Plan.Report() can be called while Plan.Run() is executing to give an overview of plan execution progress (future use in "zrepl status").
//
package hooks

View File

@ -17,19 +17,22 @@ import (
"github.com/zrepl/zrepl/zfs"
)
// Hook to implement the following recommmendation from MySQL docs
// https://dev.mysql.com/doc/mysql-backup-excerpt/5.7/en/backup-methods.html
//
// Making Backups Using a File System Snapshot:
// Making Backups Using a File System Snapshot:
//
// If you are using a Veritas file system, you can make a backup like this:
// If you are using a Veritas file system, you can make a backup like this:
//
// From a client program, execute FLUSH TABLES WITH READ LOCK.
// From another shell, execute mount vxfs snapshot.
// From the first client, execute UNLOCK TABLES.
// Copy files from the snapshot.
// Unmount the snapshot.
// From a client program, execute FLUSH TABLES WITH READ LOCK.
// From another shell, execute mount vxfs snapshot.
// From the first client, execute UNLOCK TABLES.
// Copy files from the snapshot.
// Unmount the snapshot.
//
// Similar snapshot capabilities may be available in other file systems, such as LVM or ZFS.
// Similar snapshot capabilities may be available in other file systems, such as LVM or ZFS.
//
type MySQLLockTables struct {
errIsFatal bool
connector sqldriver.Connector

View File

@ -138,7 +138,7 @@ outer:
// TODO:
// This is a work-around for the current package daemon/pruner
// and package pruning.Snapshot limitation: they require the
// `Replicated` getter method be present, but obviously,
// `Replicated` getter method be present, but obviously,
// a local job like SnapJob can't deliver on that.
// But the pruner.Pruner gives up on an FS if no replication
// cursor is present, which is why this pruner returns the

View File

@ -1,6 +1,6 @@
// package trace provides activity tracing via ctx through Tasks and Spans
//
// Basic Concepts
// # Basic Concepts
//
// Tracing can be used to identify where a piece of code spends its time.
//
@ -10,51 +10,50 @@
// to tech-savvy users (albeit not developers).
//
// This package provides the concept of Tasks and Spans to express what activity is happening within an application:
//
// - Neither task nor span is really tangible but instead contained within the context.Context tree
// - Tasks represent concurrent activity (i.e. goroutines).
// - Spans represent a semantic stack trace within a task.
//
// - Neither task nor span is really tangible but instead contained within the context.Context tree
// - Tasks represent concurrent activity (i.e. goroutines).
// - Spans represent a semantic stack trace within a task.
// As a consequence, whenever a context is propagated across goroutine boundary, you need to create a child task:
//
// go func(ctx context.Context) {
// ctx, endTask = WithTask(ctx, "what-happens-inside-the-child-task")
// defer endTask()
// // ...
// }(ctx)
// go func(ctx context.Context) {
// ctx, endTask = WithTask(ctx, "what-happens-inside-the-child-task")
// defer endTask()
// // ...
// }(ctx)
//
// Within the task, you can open up a hierarchy of spans.
// In contrast to tasks, which have can multiple concurrently running child tasks,
// spans must nest and not cross the goroutine boundary.
//
// ctx, endSpan = WithSpan(ctx, "copy-dir")
// defer endSpan()
// for _, f := range dir.Files() {
// func() {
// ctx, endSpan := WithSpan(ctx, fmt.Sprintf("copy-file %q", f))
// defer endspan()
// b, _ := ioutil.ReadFile(f)
// _ = ioutil.WriteFile(f + ".copy", b, 0600)
// }()
// }
// ctx, endSpan = WithSpan(ctx, "copy-dir")
// defer endSpan()
// for _, f := range dir.Files() {
// func() {
// ctx, endSpan := WithSpan(ctx, fmt.Sprintf("copy-file %q", f))
// defer endspan()
// b, _ := ioutil.ReadFile(f)
// _ = ioutil.WriteFile(f + ".copy", b, 0600)
// }()
// }
//
// In combination:
// ctx, endTask = WithTask(ctx, "copy-dirs")
// defer endTask()
// for i := range dirs {
// go func(dir string) {
// ctx, endTask := WithTask(ctx, "copy-dir")
// defer endTask()
// for _, f := range filesIn(dir) {
// func() {
// ctx, endSpan := WithSpan(ctx, fmt.Sprintf("copy-file %q", f))
// defer endspan()
// b, _ := ioutil.ReadFile(f)
// _ = ioutil.WriteFile(f + ".copy", b, 0600)
// }()
// }
// }()
// }
//
// ctx, endTask = WithTask(ctx, "copy-dirs")
// defer endTask()
// for i := range dirs {
// go func(dir string) {
// ctx, endTask := WithTask(ctx, "copy-dir")
// defer endTask()
// for _, f := range filesIn(dir) {
// func() {
// ctx, endSpan := WithSpan(ctx, fmt.Sprintf("copy-file %q", f))
// defer endspan()
// b, _ := ioutil.ReadFile(f)
// _ = ioutil.WriteFile(f + ".copy", b, 0600)
// }()
// }
// }()
// }
//
// Note that a span ends at the time you call endSpan - not before and not after that.
// If you violate the stack-like nesting of spans by forgetting an endSpan() invocation,
@ -65,8 +64,7 @@
//
// Recovering from endSpan() or endTask() panics will corrupt the trace stack and lead to corrupt tracefile output.
//
//
// Best Practices For Naming Tasks And Spans
// # Best Practices For Naming Tasks And Spans
//
// Tasks should always have string constants as names, and must not contain the `#` character. WHy?
// First, the visualization by chrome://tracing draws a horizontal bar for each task in the trace.
@ -74,8 +72,7 @@
// Note that the `#NUM` suffix will be reused if a task has ended, in order to avoid an
// infinite number of horizontal bars in the visualization.
//
//
// Chrome-compatible Tracefile Support
// # Chrome-compatible Tracefile Support
//
// The activity trace generated by usage of WithTask and WithSpan can be rendered to a JSON output file
// that can be loaded into chrome://tracing .

View File

@ -11,8 +11,6 @@ import (
// use like this:
//
// defer WithSpanFromStackUpdateCtx(&existingCtx)()
//
//
func WithSpanFromStackUpdateCtx(ctx *context.Context) DoneFunc {
childSpanCtx, end := WithSpan(*ctx, getMyCallerOrPanic())
*ctx = childSpanCtx

View File

@ -439,7 +439,7 @@ type FSMap interface { // FIXME unused
}
// NOTE: when adding members to this struct, remember
// to add them to `ReceiverConfig.copyIn()`
// to add them to `ReceiverConfig.copyIn()`
type ReceiverConfig struct {
JobID JobID

View File

@ -23,7 +23,7 @@ func replicationCursorBookmarkNameImpl(fs string, guid uint64, jobid string) (st
var ErrV1ReplicationCursor = fmt.Errorf("bookmark name is a v1-replication cursor")
//err != nil always means that the bookmark is not a valid replication bookmark
// err != nil always means that the bookmark is not a valid replication bookmark
//
// Returns ErrV1ReplicationCursor as error if the bookmark is a v1 replication cursor
func ParseReplicationCursorBookmarkName(fullname string) (uint64, JobID, error) {

View File

@ -6,12 +6,12 @@
// In commit 082335df5d85e1b0b9faa35ff182c71886142d3e and earlier, heartbeatconn would fail
// this benchmark with a writev I/O timeout (here the ss(8) output at the time of failure)
//
// ESTAB 33369 0 127.0.0.1:12345 127.0.0.1:57282 users:(("heartbeatconn_i",pid=25953,fd=5))
// cubic wscale:7,7 rto:203 rtt:2.992/5.849 ato:162 mss:32768 pmtu:65535 rcvmss:32741 advmss:65483 cwnd:10 bytes_sent:48 bytes_acked:48 bytes_received:195401 segs_out:44 segs_in:57 data_segs_out:6 data_segs_in:34 send 876.1Mbps lastsnd:125 lastrcv:9390 lastack:125 pacing_rate 1752.0Mbps delivery_rate 6393.8Mbps delivered:7 app_limited busy:42ms rcv_rtt:1 rcv_space:65483 rcv_ssthresh:65483 minrtt:0.029
// --
// ESTAB 0 3956805 127.0.0.1:57282 127.0.0.1:12345 users:(("heartbeatconn_i",pid=26100,fd=3))
// cubic wscale:7,7 rto:211 backoff:5 rtt:10.38/16.937 ato:40 mss:32768 pmtu:65535 rcvmss:536 advmss:65483 cwnd:10 bytes_sent:195401 bytes_acked:195402 bytes_received:48 segs_out:57 segs_in:45 data_segs_out:34 data_segs_in:6 send 252.5Mbps lastsnd:9390 lastrcv:125 lastack:125 pacing_rate 505.1Mbps delivery_rate 1971.0Mbps delivered:35 busy:30127ms rwnd_limited:30086ms(99.9%) rcv_space:65495 rcv_ssthresh:65495 notsent:3956805 minrtt:0.007
// panic: writev tcp 127.0.0.1:57282->127.0.0.1:12345: i/o timeout
// ESTAB 33369 0 127.0.0.1:12345 127.0.0.1:57282 users:(("heartbeatconn_i",pid=25953,fd=5))
// cubic wscale:7,7 rto:203 rtt:2.992/5.849 ato:162 mss:32768 pmtu:65535 rcvmss:32741 advmss:65483 cwnd:10 bytes_sent:48 bytes_acked:48 bytes_received:195401 segs_out:44 segs_in:57 data_segs_out:6 data_segs_in:34 send 876.1Mbps lastsnd:125 lastrcv:9390 lastack:125 pacing_rate 1752.0Mbps delivery_rate 6393.8Mbps delivered:7 app_limited busy:42ms rcv_rtt:1 rcv_space:65483 rcv_ssthresh:65483 minrtt:0.029
// --
// ESTAB 0 3956805 127.0.0.1:57282 127.0.0.1:12345 users:(("heartbeatconn_i",pid=26100,fd=3))
// cubic wscale:7,7 rto:211 backoff:5 rtt:10.38/16.937 ato:40 mss:32768 pmtu:65535 rcvmss:536 advmss:65483 cwnd:10 bytes_sent:195401 bytes_acked:195402 bytes_received:48 segs_out:57 segs_in:45 data_segs_out:34 data_segs_in:6 send 252.5Mbps lastsnd:9390 lastrcv:125 lastack:125 pacing_rate 505.1Mbps delivery_rate 1971.0Mbps delivered:35 busy:30127ms rwnd_limited:30086ms(99.9%) rcv_space:65495 rcv_ssthresh:65495 notsent:3956805 minrtt:0.007
// panic: writev tcp 127.0.0.1:57282->127.0.0.1:12345: i/o timeout
//
// The assumed reason for those writev timeouts is the following:
// - Sporadic server stalls (sever data handling, usually I/O) cause TCP exponential backoff on the client for client->server
@ -22,27 +22,23 @@
// The fix contained in the commit this message was committed with resets the deadline whenever
// a heartbeat is received from the server.
//
//
// How to run this integration test:
//
// Terminal 1:
// $ ZREPL_RPC_DATACONN_HEARTBEATCONN_DEBUG=1 go run heartbeatconn_integration_variablereceiverate.go -mode server -addr 127.0.0.1:12345
// rpc/dataconn/heartbeatconn: send heartbeat
// rpc/dataconn/heartbeatconn: send heartbeat
// ...
//
// Terminal 1:
// $ ZREPL_RPC_DATACONN_HEARTBEATCONN_DEBUG=1 go run heartbeatconn_integration_variablereceiverate.go -mode server -addr 127.0.0.1:12345
// rpc/dataconn/heartbeatconn: send heartbeat
// rpc/dataconn/heartbeatconn: send heartbeat
// ...
//
// Terminal 2:
// $ ZREPL_RPC_DATACONN_HEARTBEATCONN_DEBUG=1 go run heartbeatconn_integration_variablereceiverate.go -mode client -addr 127.0.0.1:12345
// rpc/dataconn/heartbeatconn: received heartbeat, resetting write timeout
// rpc/dataconn/heartbeatconn: renew frameconn write timeout returned errT=<nil> err=%!s(<nil>)
// rpc/dataconn/heartbeatconn: send heartbeat
// rpc/dataconn/heartbeatconn: received heartbeat, resetting write timeout
// rpc/dataconn/heartbeatconn: renew frameconn write timeout returned errT=<nil> err=%!s(<nil>)
// rpc/dataconn/heartbeatconn: received heartbeat, resetting write timeout
// ...
//
// You should observe
// Terminal 2:
// $ ZREPL_RPC_DATACONN_HEARTBEATCONN_DEBUG=1 go run heartbeatconn_integration_variablereceiverate.go -mode client -addr 127.0.0.1:12345
// rpc/dataconn/heartbeatconn: received heartbeat, resetting write timeout
// rpc/dataconn/heartbeatconn: renew frameconn write timeout returned errT=<nil> err=%!s(<nil>)
// rpc/dataconn/heartbeatconn: send heartbeat
// rpc/dataconn/heartbeatconn: received heartbeat, resetting write timeout
// rpc/dataconn/heartbeatconn: renew frameconn write timeout returned errT=<nil> err=%!s(<nil>)
// rpc/dataconn/heartbeatconn: received heartbeat, resetting write timeout
// ...
package main
import (

View File

@ -2,15 +2,13 @@
//
// With stdin / stdout on client and server, simulating zfs send|recv piping
//
// ./microbenchmark -appmode server | pv -r > /dev/null
// ./microbenchmark -appmode client -direction recv < /dev/zero
//
// ./microbenchmark -appmode server | pv -r > /dev/null
// ./microbenchmark -appmode client -direction recv < /dev/zero
//
// Without the overhead of pipes (just protocol performance, mostly useful with perf bc no bw measurement)
//
// ./microbenchmark -appmode client -direction recv -devnoopWriter -devnoopReader
// ./microbenchmark -appmode server -devnoopReader -devnoopWriter
//
// ./microbenchmark -appmode client -direction recv -devnoopWriter -devnoopReader
// ./microbenchmark -appmode server -devnoopReader -devnoopWriter
package main
import (

View File

@ -3,7 +3,7 @@
// The zrepl documentation refers to the client as the
// `active side` and to the server as the `passive side`.
//
// Design Considerations
// # Design Considerations
//
// zrepl has non-standard requirements to remote procedure calls (RPC):
// whereas the coordination of replication (the planning phase) mostly
@ -35,7 +35,7 @@
//
// Hence, this package attempts to combine the best of both worlds:
//
// GRPC for Coordination and Dataconn for Bulk Data Transfer
// # GRPC for Coordination and Dataconn for Bulk Data Transfer
//
// This package's Client uses its transport.Connecter to maintain
// separate control and data connections to the Server.
@ -47,68 +47,66 @@
// The following ASCII diagram gives an overview of how the individual
// building blocks are glued together:
//
// +------------+
// | rpc.Client |
// +------------+
// | |
// +--------+ +------------+
// | |
// +---------v-----------+ +--------v------+
// |pdu.ReplicationClient| |dataconn.Client|
// +---------------------+ +--------v------+
// | label: label: |
// | zrepl_control zrepl_data |
// +--------+ +------------+
// | |
// +--v---------v---+
// | transportmux |
// +-------+--------+
// | uses
// +-------v--------+
// |versionhandshake|
// +-------+--------+
// | uses
// +------v------+
// | transport |
// +------+------+
// |
// NETWORK
// |
// +------+------+
// | transport |
// +------^------+
// | uses
// +-------+--------+
// |versionhandshake|
// +-------^--------+
// | uses
// +-------+--------+
// | transportmux |
// +--^--------^----+
// | |
// +--------+ --------------+ ---
// | | |
// | label: label: | |
// | zrepl_control zrepl_data | |
// +-----+----+ +-----------+---+ |
// |netadaptor| |dataconn.Server| | rpc.Server
// | + | +------+--------+ |
// |grpcclient| | |
// |identity | | |
// +-----+----+ | |
// | | |
// +---------v-----------+ | |
// |pdu.ReplicationServer| | |
// +---------+-----------+ | |
// | | ---
// +----------+ +------------+
// | |
// +---v--v-----+
// | Handler |
// +------------+
// (usually endpoint.{Sender,Receiver})
//
//
// +------------+
// | rpc.Client |
// +------------+
// | |
// +--------+ +------------+
// | |
// +---------v-----------+ +--------v------+
// |pdu.ReplicationClient| |dataconn.Client|
// +---------------------+ +--------v------+
// | label: label: |
// | zrepl_control zrepl_data |
// +--------+ +------------+
// | |
// +--v---------v---+
// | transportmux |
// +-------+--------+
// | uses
// +-------v--------+
// |versionhandshake|
// +-------+--------+
// | uses
// +------v------+
// | transport |
// +------+------+
// |
// NETWORK
// |
// +------+------+
// | transport |
// +------^------+
// | uses
// +-------+--------+
// |versionhandshake|
// +-------^--------+
// | uses
// +-------+--------+
// | transportmux |
// +--^--------^----+
// | |
// +--------+ --------------+ ---
// | | |
// | label: label: | |
// | zrepl_control zrepl_data | |
// +-----+----+ +-----------+---+ |
// |netadaptor| |dataconn.Server| | rpc.Server
// | + | +------+--------+ |
// |grpcclient| | |
// |identity | | |
// +-----+----+ | |
// | | |
// +---------v-----------+ | |
// |pdu.ReplicationServer| | |
// +---------+-----------+ | |
// | | ---
// +----------+ +------------+
// | |
// +---v--v-----+
// | Handler |
// +------------+
// (usually endpoint.{Sender,Receiver})
package rpc
// edit trick for the ASCII art above:

View File

@ -9,7 +9,7 @@ import (
type Logger = logger.Logger
/// All fields must be non-nil
// All fields must be non-nil
type Loggers struct {
General Logger
Control Logger

View File

@ -3,13 +3,12 @@
//
// Intended Usage
//
// defer s.lock().unlock()
// // drop lock while waiting for wait group
// func() {
// defer a.l.Unlock().Lock()
// fssesDone.Wait()
// }()
//
// defer s.lock().unlock()
// // drop lock while waiting for wait group
// func() {
// defer a.l.Unlock().Lock()
// fssesDone.Wait()
// }()
package chainlock
import "sync"

View File

@ -7,26 +7,25 @@
//
// Example:
//
// type Config struct {
// // This field must be set to a non-nil value,
// // forcing the caller to make their mind up
// // about this field.
// CriticalSetting *nodefault.Bool
// }
// type Config struct {
// // This field must be set to a non-nil value,
// // forcing the caller to make their mind up
// // about this field.
// CriticalSetting *nodefault.Bool
// }
//
// An function that takes such a Config should _not_ check for nil-ness:
// and instead unconditionally dereference:
//
// func f(c Config) {
// if (c.CriticalSetting) { }
// }
// func f(c Config) {
// if (c.CriticalSetting) { }
// }
//
// If the caller of f forgot to specify the .CriticalSetting
// field, the Go runtime will issue a nil-pointer deref panic
// and it'll be clear that the caller did not read the docs of Config.
//
// f(Config{}) // crashes
//
// f Config{ CriticalSetting: &nodefault.Bool{B: false}} // doesn't crash
// f(Config{}) // crashes
//
// f Config{ CriticalSetting: &nodefault.Bool{B: false}} // doesn't crash
package nodefault

View File

@ -53,7 +53,6 @@ var componentValidChar = regexp.MustCompile(`^[0-9a-zA-Z-_\.: ]+$`)
// characters:
//
// [-_.: ]
//
func ComponentNamecheck(datasetPathComponent string) error {
if len(datasetPathComponent) == 0 {
return fmt.Errorf("path component must not be empty")
@ -91,8 +90,8 @@ func (e *PathValidationError) Error() string {
// combines
//
// lib/libzfs/libzfs_dataset.c: zfs_validate_name
// module/zcommon/zfs_namecheck.c: entity_namecheck
// lib/libzfs/libzfs_dataset.c: zfs_validate_name
// module/zcommon/zfs_namecheck.c: entity_namecheck
//
// The '%' character is not allowed because it's reserved for zfs-internal use
func EntityNamecheck(path string, t EntityType) (err *PathValidationError) {

View File

@ -1022,8 +1022,10 @@ func (s *DrySendInfo) unmarshalZFSOutput(output []byte) (err error) {
}
// unmarshal info line, looks like this:
// full zroot/test/a@1 5389768
// incremental zroot/test/a@1 zroot/test/a@2 5383936
//
// full zroot/test/a@1 5389768
// incremental zroot/test/a@1 zroot/test/a@2 5383936
//
// => see test cases
func (s *DrySendInfo) unmarshalInfoLine(l string) (regexMatched bool, err error) {
@ -1855,7 +1857,6 @@ var ErrBookmarkCloningNotSupported = fmt.Errorf("bookmark cloning feature is not
// unless a bookmark with the name `bookmark` exists and has the same idenitty (zfs.FilesystemVersionEqualIdentity)
//
// v must be validated by the caller
//
func ZFSBookmark(ctx context.Context, fs string, v FilesystemVersion, bookmark string) (bm FilesystemVersion, err error) {
bm = FilesystemVersion{