Merge pull request #159 from zrepl/problame/cleanup-project

cleanup project: formatting & basic lints
This commit is contained in:
Christian Schwarz 2019-03-27 16:25:10 +01:00 committed by GitHub
commit 56e63ff551
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
124 changed files with 1548 additions and 810 deletions

View File

@ -62,6 +62,7 @@ jobs:
- run: make - run: make
- run: make vet - run: make vet
- run: make test - run: make test
- run: make lint
- run: make release - run: make release
- store_artifacts: - store_artifacts:

10
.golangci.yml Normal file
View File

@ -0,0 +1,10 @@
linters:
enable:
- goimports
issues:
exclude-rules:
- path: _test\.go
linters:
- errcheck

View File

@ -29,6 +29,7 @@ matrix:
- make - make
- make vet - make vet
- make test - make test
- make lint
- make artifacts/zrepl-freebsd-amd64 - make artifacts/zrepl-freebsd-amd64
- make artifacts/zrepl-linux-amd64 - make artifacts/zrepl-linux-amd64
- make artifacts/zrepl-darwin-amd64 - make artifacts/zrepl-darwin-amd64

556
Gopkg.lock generated
View File

@ -1,6 +1,22 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
digest = "1:e4b30804a381d7603b8a344009987c1ba351c26043501b23b8c7ce21f0b67474"
name = "github.com/BurntSushi/toml"
packages = ["."]
pruneopts = ""
revision = "3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005"
version = "v0.3.1"
[[projects]]
branch = "master"
digest = "1:bf241ce6eec44d9ebca17e22af43f13c25f55267b308ce8b29b9820c0d6d1d25"
name = "github.com/OpenPeeDeeP/depguard"
packages = ["."]
pruneopts = ""
revision = "1f388ab2d81096755d25043aa729e2fb889f3dae"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:8cf2cf1ab10480b5e0df950dac1517aaabde05d055d9d955652997ae4b9ecbbf" digest = "1:8cf2cf1ab10480b5e0df950dac1517aaabde05d055d9d955652997ae4b9ecbbf"
@ -33,6 +49,14 @@
revision = "5b77d2a35fb0ede96d138fc9a99f5c9b6aef11b4" revision = "5b77d2a35fb0ede96d138fc9a99f5c9b6aef11b4"
version = "v1.7.0" version = "v1.7.0"
[[projects]]
digest = "1:eb53021a8aa3f599d29c7102e65026242bdedce998a54837dc67f14b6a97c5fd"
name = "github.com/fsnotify/fsnotify"
packages = ["."]
pruneopts = ""
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
version = "v1.4.7"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:5d0a2385edf4ba44f3b7b76bc0436ceb8f62bf55aa5d540a9eb9ec6c58d86809" digest = "1:5d0a2385edf4ba44f3b7b76bc0436ceb8f62bf55aa5d540a9eb9ec6c58d86809"
@ -61,6 +85,28 @@
revision = "de7e78efa4a71b3f36c7154989c529dbdf9ae623" revision = "de7e78efa4a71b3f36c7154989c529dbdf9ae623"
version = "v1.1.0" version = "v1.1.0"
[[projects]]
digest = "1:9d3f086381a257229b34fcd5690c3e5cadcb5f365cb35757536f3c51ccbb9049"
name = "github.com/go-critic/go-critic"
packages = [
"checkers",
"checkers/internal/lintutil",
]
pruneopts = ""
revision = "d7b3038bc7a1c35a1d02fdd7cf4094f0f1a12001"
version = "v0.3.4"
[[projects]]
digest = "1:50907242db0cb4c5d982ae213b995e9176b917edb269b645097af3289d9a15da"
name = "github.com/go-lintpack/lintpack"
packages = [
".",
"astwalk",
]
pruneopts = ""
revision = "80adc0715ac409128d0b7212719896ad8d3444b7"
version = "v0.5.2"
[[projects]] [[projects]]
digest = "1:6a4a01d58b227c4b6b11111b9f172ec5c17682b82724e58e6daf3f19f4faccd8" digest = "1:6a4a01d58b227c4b6b11111b9f172ec5c17682b82724e58e6daf3f19f4faccd8"
name = "github.com/go-logfmt/logfmt" name = "github.com/go-logfmt/logfmt"
@ -69,6 +115,95 @@
revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5" revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5"
version = "v0.3.0" version = "v0.3.0"
[[projects]]
digest = "1:1119997895278e1b27810308d8f802590e713bf0263b13be304ca9e086bd22a7"
name = "github.com/go-toolsmith/astcast"
packages = ["."]
pruneopts = ""
revision = "a6cb19f07b66b859a53f3f2be6e4c3bba892db7e"
version = "v1.0.0"
[[projects]]
digest = "1:bcff57ad40d16a950986eb45dae40ed142d51c702e41fad2177518071bcc3d40"
name = "github.com/go-toolsmith/astcopy"
packages = ["."]
pruneopts = ""
revision = "245af3020944a15e09072c8ad3883c1451d1fdef"
version = "v1.0.0"
[[projects]]
digest = "1:f6629a0ef3b819e34d4634c7540edf0cb392054ade814c4a9587ab6a23685def"
name = "github.com/go-toolsmith/astequal"
packages = ["."]
pruneopts = ""
revision = "dcb477bfacd6e00a13c6d63bfc73db28dd343160"
version = "v1.0.0"
[[projects]]
digest = "1:05a2d85ca9e1164efa7cfb988d44bff221382658e567d958a78734bb9cccb758"
name = "github.com/go-toolsmith/astfmt"
packages = ["."]
pruneopts = ""
revision = "0d74c731079884bda287cf8df9ce7b92e688af8c"
version = "v1.0.0"
[[projects]]
digest = "1:a6f3d4784ec69928a0a54521dd2536e6bacc73edf988edefc2820230d7be7703"
name = "github.com/go-toolsmith/astp"
packages = ["."]
pruneopts = ""
revision = "6373270dee65bfb0479f2acd16d4c8e9d5db13f8"
version = "v1.0.0"
[[projects]]
digest = "1:9ba1aaf89cddc1cfe5d9a4a83d16ff8778369eaa358278a3fa2ef97847ebdb35"
name = "github.com/go-toolsmith/strparse"
packages = ["."]
pruneopts = ""
revision = "830b6daa1241714c12a9b9a4a56849fe2f93aedc"
version = "v1.0.0"
[[projects]]
digest = "1:b827014e6963ac236f3698ce5ca1c85ad3ccfbf722186eab207836e1d7b9d615"
name = "github.com/go-toolsmith/typep"
packages = ["."]
pruneopts = ""
revision = "cab1745ffd84a567b524317c7f90e96755b18fcf"
version = "v1.0.0"
[[projects]]
digest = "1:9ab1b1c637d7c8f49e39d8538a650d7eb2137b076790cff69d160823b505964c"
name = "github.com/gobwas/glob"
packages = [
".",
"compiler",
"match",
"syntax",
"syntax/ast",
"syntax/lexer",
"util/runes",
"util/strings",
]
pruneopts = ""
revision = "5ccd90ef52e1e632236f7326478d4faa74f99438"
version = "v0.2.3"
[[projects]]
digest = "1:fd53b471edb4c28c7d297f617f4da0d33402755f58d6301e7ca1197ef0a90937"
name = "github.com/gogo/protobuf"
packages = ["proto"]
pruneopts = ""
revision = "ba06b47c162d49f2af050fb4c75bcbc86a159d5c"
version = "v1.2.1"
[[projects]]
digest = "1:530233672f656641b365f8efb38ed9fba80e420baff2ce87633813ab3755ed6d"
name = "github.com/golang/mock"
packages = ["gomock"]
pruneopts = ""
revision = "51421b967af1f557f93a59e0057aaf15ca02e29c"
version = "v1.2.0"
[[projects]] [[projects]]
digest = "1:3dd078fda7500c341bc26cfbc6c6a34614f295a2457149fc1045cab767cbcf18" digest = "1:3dd078fda7500c341bc26cfbc6c6a34614f295a2457149fc1045cab767cbcf18"
name = "github.com/golang/protobuf" name = "github.com/golang/protobuf"
@ -89,6 +224,213 @@
revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5"
version = "v1.2.0" version = "v1.2.0"
[[projects]]
branch = "master"
digest = "1:f6a3ed95affdd867195dec281bf1d328dddb37590463eba1f7b39b44a9829e3d"
name = "github.com/golangci/check"
packages = [
"cmd/structcheck",
"cmd/varcheck",
]
pruneopts = ""
revision = "cfe4005ccda277a820149d44d6ededc400cc99a2"
[[projects]]
branch = "master"
digest = "1:262000a2de14b1d0a802acb611e7ee69208b7e3e08f7d0e62226bb324f12e375"
name = "github.com/golangci/dupl"
packages = [
".",
"job",
"printer",
"suffixtree",
"syntax",
"syntax/golang",
]
pruneopts = ""
revision = "3e9179ac440a0386ac7cc9a085fc44397c6b9bbc"
[[projects]]
branch = "master"
digest = "1:2298a8780ede449cb58108de23925ac2a14cca8ac151cfae45ea5992054d6cf2"
name = "github.com/golangci/errcheck"
packages = [
"golangci",
"internal/errcheck",
]
pruneopts = ""
revision = "ef45e06d44b6e018d817c16c762d448990adc5e0"
[[projects]]
branch = "master"
digest = "1:9b38ad496c9dabd1c820609c481f59c6c9597926c6125810af3d7a71bf2d649c"
name = "github.com/golangci/go-misc"
packages = ["deadcode"]
pruneopts = ""
revision = "927a3d87b613e9f6f0fb7ef8bb8de8b83c30a5a2"
[[projects]]
branch = "master"
digest = "1:e4bbd53b867030ca8b2e0f6d7338cec2373baf14109858312daa51a144f4a091"
name = "github.com/golangci/go-tools"
packages = [
"arg",
"callgraph",
"callgraph/static",
"config",
"deprecated",
"functions",
"internal/sharedcheck",
"lint",
"lint/lintdsl",
"lint/lintutil",
"lint/lintutil/format",
"simple",
"ssa",
"ssa/ssautil",
"ssautil",
"staticcheck",
"staticcheck/vrp",
"stylecheck",
"unused",
"version",
]
pruneopts = ""
revision = "35a9f45a5db090b0227d692d823151104cd695fa"
[[projects]]
branch = "master"
digest = "1:bc3387ddcbdacf135af0a16b9e9ec6ac7bf5a1f822f679d8d29c2d97cfcce205"
name = "github.com/golangci/goconst"
packages = ["."]
pruneopts = ""
revision = "041c5f2b40f3dd334a4a6ee6a3f84ca3fc70680a"
[[projects]]
branch = "master"
digest = "1:c5c9e52a4aaca585c1ce9c79f5ea31d74d03da39dfccda0b140f93d6a1be17b7"
name = "github.com/golangci/gocyclo"
packages = ["pkg/gocyclo"]
pruneopts = ""
revision = "0a533e8fa43d6605069e94f455bf9d79d4b8ea8c"
[[projects]]
branch = "master"
digest = "1:edccfa947bd237dcd1bceef56d1670c22930831ca196ff04f0e2c4a8483bf97b"
name = "github.com/golangci/gofmt"
packages = [
"gofmt",
"goimports",
]
pruneopts = ""
revision = "0b8337e80d98f7eec18e4504a4557b34423fd039"
[[projects]]
digest = "1:0071a728673f03bd75b65863a37d4b1c5bb06ffc4a4416f1a8b6b90f36b2c5e3"
name = "github.com/golangci/golangci-lint"
packages = [
"cmd/golangci-lint",
"pkg/commands",
"pkg/config",
"pkg/exitcodes",
"pkg/fsutils",
"pkg/golinters",
"pkg/goutil",
"pkg/lint",
"pkg/lint/astcache",
"pkg/lint/linter",
"pkg/lint/lintersdb",
"pkg/logutils",
"pkg/packages",
"pkg/printers",
"pkg/report",
"pkg/result",
"pkg/result/processors",
"pkg/timeutils",
]
pruneopts = ""
revision = "901cf25e20f86b7e9dc6f73eaba5afbd0cbdc257"
version = "v1.15.0"
[[projects]]
branch = "master"
digest = "1:30c45dd735f55c7dbd0ea6040e3ccc35f867532b8e1919016c0565510392417a"
name = "github.com/golangci/gosec"
packages = [
".",
"rules",
]
pruneopts = ""
revision = "8afd9cbb6cfb34a3b4d4d5711bafdc6640ae892f"
[[projects]]
branch = "master"
digest = "1:081d9ed8ba13ebbd4bd3e1f17cd703f77268416074588c38ce985d654b1fc0e1"
name = "github.com/golangci/govet"
packages = [
".",
"lib/cfg",
"lib/whitelist",
]
pruneopts = ""
revision = "44ddbe260190d79165f4150b828650780405d801"
[[projects]]
branch = "master"
digest = "1:7da7fde58cf7cf5e19f6a1c77eb153945b28cf03bab227e0d831897b7070b546"
name = "github.com/golangci/ineffassign"
packages = ["."]
pruneopts = ""
revision = "2ee8f2867dde308c46d401d6d30f6c644094b167"
[[projects]]
branch = "master"
digest = "1:8e50794fcb5f229576cd7eda5627a6c2f20341079f0c571077a7ab807c518da9"
name = "github.com/golangci/lint-1"
packages = ["."]
pruneopts = ""
revision = "d2cdd8c0821928c61cb0903441f8b35457a98a61"
[[projects]]
branch = "master"
digest = "1:8665edfb3c5371fbac9820d127fa0d9aed813cc2349a27a7d16064dd89fed146"
name = "github.com/golangci/maligned"
packages = ["."]
pruneopts = ""
revision = "b1d89398deca2fd3f8578e5a9551e819bd01ca5f"
[[projects]]
digest = "1:dbf28ceee27335219701dd4c6639c767eee31e2abb61485cdb1044587a04c077"
name = "github.com/golangci/misspell"
packages = ["."]
pruneopts = ""
revision = "b90dc15cfd220ecf8bbc9043ecb928cef381f011"
version = "v0.3.4"
[[projects]]
branch = "master"
digest = "1:045c2735b360cbebf398a0e9312aeafebf08fd38f0d51cb2aa0f9420364c3cd1"
name = "github.com/golangci/prealloc"
packages = ["."]
pruneopts = ""
revision = "215b22d4de21190b80ce05e7d8466677c1aa3223"
[[projects]]
branch = "master"
digest = "1:c23cf3c7078c3ba927492557a40c1ee1755734d4fff0e7fbe6d2f092604dae6d"
name = "github.com/golangci/revgrep"
packages = ["."]
pruneopts = ""
revision = "276a5c0a103935ee65af49afc254a65335bf1fcf"
[[projects]]
branch = "master"
digest = "1:c553e7c7483f2d6db1e84a27a18df144ed4041792d7556916369f86ccf5409fe"
name = "github.com/golangci/unconvert"
packages = ["."]
pruneopts = ""
revision = "28b1c447d1f4a810737ee6ab40ea6c1d0ceae4ad"
[[projects]] [[projects]]
digest = "1:ad92aa49f34cbc3546063c7eb2cabb55ee2278b72842eda80e2a20a8a06a8d73" digest = "1:ad92aa49f34cbc3546063c7eb2cabb55ee2278b72842eda80e2a20a8a06a8d73"
name = "github.com/google/uuid" name = "github.com/google/uuid"
@ -97,6 +439,25 @@
revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4" revision = "0cd6bf5da1e1c83f8b45653022c74f71af0538a4"
version = "v1.1.1" version = "v1.1.1"
[[projects]]
digest = "1:d14365c51dd1d34d5c79833ec91413bfbb166be978724f15701e17080dc06dec"
name = "github.com/hashicorp/hcl"
packages = [
".",
"hcl/ast",
"hcl/parser",
"hcl/printer",
"hcl/scanner",
"hcl/strconv",
"hcl/token",
"json/parser",
"json/scanner",
"json/token",
]
pruneopts = ""
revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241"
version = "v1.0.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:cb09475f771b9167fb9333629f5d6a7161572602ea040f1094602b0dc8709878" digest = "1:cb09475f771b9167fb9333629f5d6a7161572602ea040f1094602b0dc8709878"
@ -105,6 +466,25 @@
pruneopts = "" pruneopts = ""
revision = "db4671f3a9b8df855e993f7c94ec5ef1ffb0a23b" revision = "db4671f3a9b8df855e993f7c94ec5ef1ffb0a23b"
[[projects]]
digest = "1:765270f95ea68ad2150f6143eb8b9c0c17b038a7e2255b46580674471af00e27"
name = "github.com/kisielk/gotool"
packages = [
".",
"internal/load",
]
pruneopts = ""
revision = "80517062f582ea3340cd4baf70e86d539ae7d84d"
version = "v1.0.0"
[[projects]]
digest = "1:0f51cee70b0d254dbc93c22666ea2abf211af81c1701a96d04e2284b408621db"
name = "github.com/konsorten/go-windows-terminal-sequences"
packages = ["."]
pruneopts = ""
revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e"
version = "v1.0.2"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:1ed9eeebdf24aadfbca57eb50e6455bd1d2474525e0f0d4454de8c8e9bc7ee9a" digest = "1:1ed9eeebdf24aadfbca57eb50e6455bd1d2474525e0f0d4454de8c8e9bc7ee9a"
@ -137,6 +517,14 @@
revision = "345fbb3dbcdb252d9985ee899a84963c0fa24c82" revision = "345fbb3dbcdb252d9985ee899a84963c0fa24c82"
version = "v1.0" version = "v1.0"
[[projects]]
digest = "1:961dc3b1d11f969370533390fdf203813162980c858e1dabe827b60940c909a5"
name = "github.com/magiconair/properties"
packages = ["."]
pruneopts = ""
revision = "c2353362d570a7bfa228149c62842019201cfb71"
version = "v1.8.0"
[[projects]] [[projects]]
digest = "1:9ea83adf8e96d6304f394d40436f2eb44c1dc3250d223b74088cc253a6cd0a1c" digest = "1:9ea83adf8e96d6304f394d40436f2eb44c1dc3250d223b74088cc253a6cd0a1c"
name = "github.com/mattn/go-colorable" name = "github.com/mattn/go-colorable"
@ -169,6 +557,22 @@
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
version = "v1.0.0" version = "v1.0.0"
[[projects]]
digest = "1:6dbb0eb72090871f2e58d1e37973fe3cb8c0f45f49459398d3fc740cb30e13bd"
name = "github.com/mitchellh/go-homedir"
packages = ["."]
pruneopts = ""
revision = "af06845cf3004701891bf4fdb884bfe4920b3727"
version = "v1.1.0"
[[projects]]
digest = "1:bcc46a0fbd9e933087bef394871256b5c60269575bb661935874729c65bbbf60"
name = "github.com/mitchellh/mapstructure"
packages = ["."]
pruneopts = ""
revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe"
version = "v1.1.2"
[[projects]] [[projects]]
digest = "1:4ff67dde814694496d7aa31be44b900f9717a10c8bc9136b13f49c8ef97f439a" digest = "1:4ff67dde814694496d7aa31be44b900f9717a10c8bc9136b13f49c8ef97f439a"
name = "github.com/montanaflynn/stats" name = "github.com/montanaflynn/stats"
@ -177,6 +581,24 @@
revision = "63fbb2597b7a13043b453a4b819945badb8f8926" revision = "63fbb2597b7a13043b453a4b819945badb8f8926"
version = "v0.5.0" version = "v0.5.0"
[[projects]]
digest = "1:9da71b9d17d6231f1486dc62d81af3f9d34535703ba9e7a60a902433c3091e3b"
name = "github.com/nbutton23/zxcvbn-go"
packages = [
".",
"adjacency",
"data",
"entropy",
"frequency",
"match",
"matching",
"scoring",
"utils/math",
]
pruneopts = ""
revision = "eafdab6b0663b4b528c35975c8b0e78be6e25261"
version = "v0.1"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:f60ff065b58bd53e641112b38bbda9d2684deb828393c7ffb89c69a1ee301d17" digest = "1:f60ff065b58bd53e641112b38bbda9d2684deb828393c7ffb89c69a1ee301d17"
@ -185,6 +607,14 @@
pruneopts = "" pruneopts = ""
revision = "0fd16699aae1833640fca52a937944c6f3b1d58c" revision = "0fd16699aae1833640fca52a937944c6f3b1d58c"
[[projects]]
digest = "1:894aef961c056b6d85d12bac890bf60c44e99b46292888bfa66caf529f804457"
name = "github.com/pelletier/go-toml"
packages = ["."]
pruneopts = ""
revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194"
version = "v1.2.0"
[[projects]] [[projects]]
digest = "1:7365acd48986e205ccb8652cc746f09c8b7876030d53710ea6ef7d0bd0dcd7ca" digest = "1:7365acd48986e205ccb8652cc746f09c8b7876030d53710ea6ef7d0bd0dcd7ca"
name = "github.com/pkg/errors" name = "github.com/pkg/errors"
@ -269,6 +699,33 @@
revision = "1744e2970ca51c86172c8190fadad617561ed6e7" revision = "1744e2970ca51c86172c8190fadad617561ed6e7"
version = "v1.0.0" version = "v1.0.0"
[[projects]]
digest = "1:b73fe282e350b3ef2c71d8ff08e929e0b9670b1bb5b7fde1d3c1b4cd6e6dc8b1"
name = "github.com/sirupsen/logrus"
packages = ["."]
pruneopts = ""
revision = "dae0fa8d5b0c810a8ab733fbd5510c7cae84eca4"
version = "v1.4.0"
[[projects]]
digest = "1:956f655c87b7255c6b1ae6c203ebb0af98cf2a13ef2507e34c9bf1c0332ac0f5"
name = "github.com/spf13/afero"
packages = [
".",
"mem",
]
pruneopts = ""
revision = "588a75ec4f32903aa5e39a2619ba6a4631e28424"
version = "v1.2.2"
[[projects]]
digest = "1:ae3493c780092be9d576a1f746ab967293ec165e8473425631f06658b6212afc"
name = "github.com/spf13/cast"
packages = ["."]
pruneopts = ""
revision = "8c9545af88b134710ab1cd196795e7f2388358d7"
version = "v1.3.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:146327ce93be37e68bd3ff8541090d96da8cb3adc9e35d57570e9170a29f6bf6" digest = "1:146327ce93be37e68bd3ff8541090d96da8cb3adc9e35d57570e9170a29f6bf6"
@ -277,6 +734,14 @@
pruneopts = "" pruneopts = ""
revision = "b78744579491c1ceeaaa3b40205e56b0591b93a3" revision = "b78744579491c1ceeaaa3b40205e56b0591b93a3"
[[projects]]
digest = "1:cc15ae4fbdb02ce31f3392361a70ac041f4f02e0485de8ffac92bd8033e3d26e"
name = "github.com/spf13/jwalterweatherman"
packages = ["."]
pruneopts = ""
revision = "94f6ae3ed3bceceafa716478c5fbf8d29ca601a1"
version = "v1.1.0"
[[projects]] [[projects]]
digest = "1:261bc565833ef4f02121450d74eb88d5ae4bd74bfe5d0e862cddb8550ec35000" digest = "1:261bc565833ef4f02121450d74eb88d5ae4bd74bfe5d0e862cddb8550ec35000"
name = "github.com/spf13/pflag" name = "github.com/spf13/pflag"
@ -285,6 +750,14 @@
revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66" revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66"
version = "v1.0.0" version = "v1.0.0"
[[projects]]
digest = "1:90fe60ab6f827e308b0c8cc1e11dce8ff1e96a927c8b171271a3cb04dd517606"
name = "github.com/spf13/viper"
packages = ["."]
pruneopts = ""
revision = "9e56dacc08fbbf8c9ee2dbc717553c758ce42bc9"
version = "v1.3.2"
[[projects]] [[projects]]
digest = "1:3926a4ec9a4ff1a072458451aa2d9b98acd059a45b38f7335d31e06c3d6a0159" digest = "1:3926a4ec9a4ff1a072458451aa2d9b98acd059a45b38f7335d31e06c3d6a0159"
name = "github.com/stretchr/testify" name = "github.com/stretchr/testify"
@ -331,6 +804,14 @@
pruneopts = "" pruneopts = ""
revision = "08227ad854131f7dfcdfb12579fb73dd8a38a03a" revision = "08227ad854131f7dfcdfb12579fb73dd8a38a03a"
[[projects]]
branch = "master"
digest = "1:36ef1d8645934b1744cc7d8726e00d3dd9d8d84c18617bf7367a3a6d532f3370"
name = "golang.org/x/crypto"
packages = ["ssh/terminal"]
pruneopts = ""
revision = "a5d413f7728c81fb97d96a2b722368945f651e78"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:ea539c13b066dac72a940b62f37600a20ab8e88057397c78f3197c1a48475425" digest = "1:ea539c13b066dac72a940b62f37600a20ab8e88057397c78f3197c1a48475425"
@ -351,7 +832,10 @@
branch = "master" branch = "master"
digest = "1:f358024b019f87eecaadcb098113a40852c94fe58ea670ef3c3e2d2c7bd93db1" digest = "1:f358024b019f87eecaadcb098113a40852c94fe58ea670ef3c3e2d2c7bd93db1"
name = "golang.org/x/sys" name = "golang.org/x/sys"
packages = ["unix"] packages = [
"unix",
"windows",
]
pruneopts = "" pruneopts = ""
revision = "4ed8d59d0b35e1e29334a206d1b3f38b1e5dfb31" revision = "4ed8d59d0b35e1e29334a206d1b3f38b1e5dfb31"
@ -375,6 +859,7 @@
"unicode/cldr", "unicode/cldr",
"unicode/norm", "unicode/norm",
"unicode/rangetable", "unicode/rangetable",
"width",
] ]
pruneopts = "" pruneopts = ""
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
@ -384,7 +869,24 @@
branch = "master" branch = "master"
digest = "1:4cd780b2ee42c8eac9c02bfb6e6b52dcbaef770774458c8938f5cbfb73a7b6d3" digest = "1:4cd780b2ee42c8eac9c02bfb6e6b52dcbaef770774458c8938f5cbfb73a7b6d3"
name = "golang.org/x/tools" name = "golang.org/x/tools"
packages = ["cmd/stringer"] packages = [
"cmd/goimports",
"cmd/stringer",
"go/ast/astutil",
"go/buildutil",
"go/gcexportdata",
"go/internal/cgo",
"go/internal/gcimporter",
"go/loader",
"go/packages",
"go/ssa",
"go/ssa/ssautil",
"go/types/typeutil",
"imports",
"internal/fastwalk",
"internal/gopathwalk",
"internal/semver",
]
pruneopts = "" pruneopts = ""
revision = "d0ca3933b724e6be513276cc2edb34e10d667438" revision = "d0ca3933b724e6be513276cc2edb34e10d667438"
@ -436,6 +938,54 @@
revision = "df014850f6dee74ba2fc94874043a9f3f75fbfd8" revision = "df014850f6dee74ba2fc94874043a9f3f75fbfd8"
version = "v1.17.0" version = "v1.17.0"
[[projects]]
digest = "1:cedccf16b71e86db87a24f8d4c70b0a855872eb967cb906a66b95de56aefbd0d"
name = "gopkg.in/yaml.v2"
packages = ["."]
pruneopts = ""
revision = "51d6538a90f86fe93ac480b35f37b2be17fef232"
version = "v2.2.2"
[[projects]]
branch = "master"
digest = "1:2a6012038cdeb9851f1a71497544820e17ff2772b3cf799d24a76208cb9843b8"
name = "mvdan.cc/interfacer"
packages = ["check"]
pruneopts = ""
revision = "c20040233aedb03da82d460eca6130fcd91c629a"
[[projects]]
branch = "master"
digest = "1:68e12be99c0d3355e04eecba6bc302876268a134a0eecd75258d8fefe44a94ed"
name = "mvdan.cc/lint"
packages = ["."]
pruneopts = ""
revision = "adc824a0674b99099789b6188a058d485eaf61c0"
[[projects]]
branch = "master"
digest = "1:4af0788cd865cab3c8276462de56bad858e178199415241c2420e13e95d8594c"
name = "mvdan.cc/unparam"
packages = ["check"]
pruneopts = ""
revision = "1b9ccfa71afe53433971717161c9666adfc4d8c5"
[[projects]]
digest = "1:0778809e0f18d0c0c05105a5c1e583d2253c5fd66fbd2b79b00e5f6439402491"
name = "sourcegraph.com/sourcegraph/go-diff"
packages = ["diff"]
pruneopts = ""
revision = "c613306ac97fb4807862c088149199f0dab8685a"
version = "v0.5.0"
[[projects]]
digest = "1:ffc8cfc88692d5daab7abac1d989e9f7fc09727e42a945702f8f2d6d67f0fd6c"
name = "sourcegraph.com/sqs/pbtypes"
packages = ["."]
pruneopts = ""
revision = "688c2c2cb411327a50aae0f89119af9f38b0fc03"
version = "v1.0.0"
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
@ -446,6 +996,7 @@
"github.com/go-logfmt/logfmt", "github.com/go-logfmt/logfmt",
"github.com/golang/protobuf/proto", "github.com/golang/protobuf/proto",
"github.com/golang/protobuf/protoc-gen-go", "github.com/golang/protobuf/protoc-gen-go",
"github.com/golangci/golangci-lint/cmd/golangci-lint",
"github.com/google/uuid", "github.com/google/uuid",
"github.com/jinzhu/copier", "github.com/jinzhu/copier",
"github.com/kr/pretty", "github.com/kr/pretty",
@ -465,6 +1016,7 @@
"github.com/zrepl/yaml-config", "github.com/zrepl/yaml-config",
"golang.org/x/net/context", "golang.org/x/net/context",
"golang.org/x/sys/unix", "golang.org/x/sys/unix",
"golang.org/x/tools/cmd/goimports",
"golang.org/x/tools/cmd/stringer", "golang.org/x/tools/cmd/stringer",
"google.golang.org/grpc", "google.golang.org/grpc",
"google.golang.org/grpc/codes", "google.golang.org/grpc/codes",

View File

@ -5,6 +5,8 @@ ignored = [
required = [ required = [
"golang.org/x/tools/cmd/stringer", "golang.org/x/tools/cmd/stringer",
"github.com/alvaroloes/enumer", "github.com/alvaroloes/enumer",
"github.com/golangci/golangci-lint/cmd/golangci-lint",
"golang.org/x/tools/cmd/goimports",
] ]
[[constraint]] [[constraint]]

View File

@ -1,4 +1,4 @@
.PHONY: generate build test vet cover release docs docs-clean clean vendordeps .PHONY: generate build test vet cover release docs docs-clean clean vendordeps format lint
.DEFAULT_GOAL := build .DEFAULT_GOAL := build
ARTIFACTDIR := artifacts ARTIFACTDIR := artifacts
@ -30,6 +30,12 @@ generate: #not part of the build, must do that manually
protoc -I=replication/logic/pdu --go_out=plugins=grpc:replication/logic/pdu replication/logic/pdu/pdu.proto protoc -I=replication/logic/pdu --go_out=plugins=grpc:replication/logic/pdu replication/logic/pdu/pdu.proto
go generate -x ./... go generate -x ./...
format:
goimports -srcdir . -local 'github.com/zrepl/zrepl' -w $(shell find . -type f -name '*.go' -not -path "./vendor/*" -not -name '*.pb.go' -not -name '*_enumer.go')
lint:
golangci-lint run ./...
build: build:
@echo "INFO: In case of missing dependencies, run 'make vendordeps'" @echo "INFO: In case of missing dependencies, run 'make vendordeps'"
$(GO_BUILD) -o "$(ARTIFACTDIR)/zrepl" $(GO_BUILD) -o "$(ARTIFACTDIR)/zrepl"
@ -68,7 +74,7 @@ docs-clean:
.PHONY: $(RELEASE_BINS) .PHONY: $(RELEASE_BINS)
# TODO: two wildcards possible # TODO: two wildcards possible
$(RELEASE_BINS): $(ARTIFACTDIR)/zrepl-%-amd64: generate $(ARTIFACTDIR) vet test $(RELEASE_BINS): $(ARTIFACTDIR)/zrepl-%-amd64: generate $(ARTIFACTDIR) vet test lint
@echo "INFO: In case of missing dependencies, run 'make vendordeps'" @echo "INFO: In case of missing dependencies, run 'make vendordeps'"
GOOS=$* GOARCH=amd64 $(GO_BUILD) -o "$(ARTIFACTDIR)/zrepl-$*-amd64" GOOS=$* GOARCH=amd64 $(GO_BUILD) -o "$(ARTIFACTDIR)/zrepl-$*-amd64"

View File

@ -11,7 +11,8 @@
package main package main
import ( import (
_ "fmt" "fmt"
_ "github.com/alvaroloes/enumer" _ "github.com/alvaroloes/enumer"
_ "github.com/golang/protobuf/protoc-gen-go" _ "github.com/golang/protobuf/protoc-gen-go"
_ "golang.org/x/tools/cmd/stringer" _ "golang.org/x/tools/cmd/stringer"

View File

@ -2,10 +2,12 @@ package cli
import ( import (
"fmt" "fmt"
"os"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/pflag" "github.com/spf13/pflag"
"github.com/zrepl/zrepl/config" "github.com/zrepl/zrepl/config"
"os"
) )
var rootArgs struct { var rootArgs struct {
@ -23,7 +25,10 @@ var bashcompCmd = &cobra.Command{
Run: func(cmd *cobra.Command, args []string) { Run: func(cmd *cobra.Command, args []string) {
if len(args) != 1 { if len(args) != 1 {
fmt.Fprintf(os.Stderr, "specify exactly one positional agument\n") fmt.Fprintf(os.Stderr, "specify exactly one positional agument\n")
cmd.Usage() err := cmd.Usage()
if err != nil {
panic(err)
}
os.Exit(1) os.Exit(1)
} }
if err := rootCmd.GenBashCompletionFile(args[0]); err != nil { if err := rootCmd.GenBashCompletionFile(args[0]); err != nil {
@ -40,15 +45,15 @@ func init() {
} }
type Subcommand struct { type Subcommand struct {
Use string Use string
Short string Short string
Example string Example string
NoRequireConfig bool NoRequireConfig bool
Run func(subcommand *Subcommand, args []string) error Run func(subcommand *Subcommand, args []string) error
SetupFlags func(f *pflag.FlagSet) SetupFlags func(f *pflag.FlagSet)
SetupSubcommands func() []*Subcommand SetupSubcommands func() []*Subcommand
config *config.Config config *config.Config
configErr error configErr error
} }
@ -93,8 +98,8 @@ func AddSubcommand(s *Subcommand) {
func addSubcommandToCobraCmd(c *cobra.Command, s *Subcommand) { func addSubcommandToCobraCmd(c *cobra.Command, s *Subcommand) {
cmd := cobra.Command{ cmd := cobra.Command{
Use: s.Use, Use: s.Use,
Short: s.Short, Short: s.Short,
Example: s.Example, Example: s.Example,
} }
if s.SetupSubcommands == nil { if s.SetupSubcommands == nil {
@ -110,7 +115,6 @@ func addSubcommandToCobraCmd(c *cobra.Command, s *Subcommand) {
c.AddCommand(&cmd) c.AddCommand(&cmd)
} }
func Run() { func Run() {
if err := rootCmd.Execute(); err != nil { if err := rootCmd.Execute(); err != nil {
os.Exit(1) os.Exit(1)

View File

@ -3,39 +3,49 @@ package client
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"os"
"github.com/kr/pretty" "github.com/kr/pretty"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/spf13/pflag" "github.com/spf13/pflag"
"github.com/zrepl/yaml-config" "github.com/zrepl/yaml-config"
"github.com/zrepl/zrepl/cli" "github.com/zrepl/zrepl/cli"
"github.com/zrepl/zrepl/config" "github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon/job" "github.com/zrepl/zrepl/daemon/job"
"github.com/zrepl/zrepl/daemon/logging" "github.com/zrepl/zrepl/daemon/logging"
"github.com/zrepl/zrepl/logger" "github.com/zrepl/zrepl/logger"
"os"
) )
var configcheckArgs struct { var configcheckArgs struct {
format string format string
what string what string
} }
var ConfigcheckCmd = &cli.Subcommand{ var ConfigcheckCmd = &cli.Subcommand{
Use: "configcheck", Use: "configcheck",
Short: "check if config can be parsed without errors", Short: "check if config can be parsed without errors",
SetupFlags: func(f *pflag.FlagSet) { SetupFlags: func(f *pflag.FlagSet) {
f.StringVar(&configcheckArgs.format, "format", "", "dump parsed config object [pretty|yaml|json]") f.StringVar(&configcheckArgs.format, "format", "", "dump parsed config object [pretty|yaml|json]")
f.StringVar(&configcheckArgs.what, "what", "all", "what to print [all|config|jobs|logging]") f.StringVar(&configcheckArgs.what, "what", "all", "what to print [all|config|jobs|logging]")
}, },
Run: func(subcommand *cli.Subcommand, args []string) error { Run: func(subcommand *cli.Subcommand, args []string) error {
formatMap := map[string]func(interface{}) { formatMap := map[string]func(interface{}){
"": func(i interface{}) {}, "": func(i interface{}) {},
"pretty": func(i interface{}) { pretty.Println(i) }, "pretty": func(i interface{}) {
if _, err := pretty.Println(i); err != nil {
panic(err)
}
},
"json": func(i interface{}) { "json": func(i interface{}) {
json.NewEncoder(os.Stdout).Encode(subcommand.Config()) if err := json.NewEncoder(os.Stdout).Encode(subcommand.Config()); err != nil {
panic(err)
}
}, },
"yaml": func(i interface{}) { "yaml": func(i interface{}) {
yaml.NewEncoder(os.Stdout).Encode(subcommand.Config()) if err := yaml.NewEncoder(os.Stdout).Encode(subcommand.Config()); err != nil {
panic(err)
}
}, },
} }
@ -71,12 +81,11 @@ var ConfigcheckCmd = &cli.Subcommand{
} }
} }
whatMap := map[string]func(){
whatMap := map[string]func() {
"all": func() { "all": func() {
o := struct { o := struct {
config *config.Config config *config.Config
jobs []job.Job jobs []job.Job
logging *logger.Outlets logging *logger.Outlets
}{ }{
subcommand.Config(), subcommand.Config(),
@ -109,4 +118,3 @@ var ConfigcheckCmd = &cli.Subcommand{
} }
}, },
} }

View File

@ -4,10 +4,11 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/json" "encoding/json"
"github.com/pkg/errors"
"io" "io"
"net" "net"
"net/http" "net/http"
"github.com/pkg/errors"
) )
func controlHttpClient(sockpath string) (client http.Client, err error) { func controlHttpClient(sockpath string) (client http.Client, err error) {
@ -35,7 +36,7 @@ func jsonRequestResponse(c http.Client, endpoint string, req interface{}, res in
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
var msg bytes.Buffer var msg bytes.Buffer
io.CopyN(&msg, resp.Body, 4096) _, _ = io.CopyN(&msg, resp.Body, 4096) // ignore error, just display what we got
return errors.Errorf("%s", msg.String()) return errors.Errorf("%s", msg.String())
} }

View File

@ -6,6 +6,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/spf13/pflag" "github.com/spf13/pflag"
"github.com/zrepl/zrepl/zfs" "github.com/zrepl/zrepl/zfs"
"github.com/zrepl/zrepl/cli" "github.com/zrepl/zrepl/cli"
@ -22,11 +23,6 @@ var (
} }
) )
type migration struct {
name string
method func(config *config.Config, args []string) error
}
var migrations = []*cli.Subcommand{ var migrations = []*cli.Subcommand{
&cli.Subcommand{ &cli.Subcommand{
Use: "0.0.X:0.1:placeholder", Use: "0.0.X:0.1:placeholder",

View File

@ -2,11 +2,12 @@ package client
import ( import (
"errors" "errors"
"log"
"os"
"github.com/zrepl/zrepl/cli" "github.com/zrepl/zrepl/cli"
"github.com/zrepl/zrepl/config" "github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon" "github.com/zrepl/zrepl/daemon"
"log"
"os"
) )
var pprofArgs struct { var pprofArgs struct {

View File

@ -2,6 +2,7 @@ package client
import ( import (
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/zrepl/zrepl/cli" "github.com/zrepl/zrepl/cli"
"github.com/zrepl/zrepl/config" "github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon" "github.com/zrepl/zrepl/daemon"
@ -28,10 +29,10 @@ func runSignalCmd(config *config.Config, args []string) error {
err = jsonRequestResponse(httpc, daemon.ControlJobEndpointSignal, err = jsonRequestResponse(httpc, daemon.ControlJobEndpointSignal,
struct { struct {
Name string Name string
Op string Op string
}{ }{
Name: args[1], Name: args[1],
Op: args[0], Op: args[0],
}, },
struct{}{}, struct{}{},
) )

View File

@ -2,15 +2,6 @@ package client
import ( import (
"fmt" "fmt"
"github.com/gdamore/tcell/termbox"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"github.com/zrepl/yaml-config"
"github.com/zrepl/zrepl/cli"
"github.com/zrepl/zrepl/daemon"
"github.com/zrepl/zrepl/daemon/job"
"github.com/zrepl/zrepl/daemon/pruner"
"github.com/zrepl/zrepl/replication/report"
"io" "io"
"math" "math"
"net/http" "net/http"
@ -19,18 +10,29 @@ import (
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/gdamore/tcell/termbox"
"github.com/pkg/errors"
"github.com/spf13/pflag"
"github.com/zrepl/yaml-config"
"github.com/zrepl/zrepl/cli"
"github.com/zrepl/zrepl/daemon"
"github.com/zrepl/zrepl/daemon/job"
"github.com/zrepl/zrepl/daemon/pruner"
"github.com/zrepl/zrepl/replication/report"
) )
type byteProgressMeasurement struct { type byteProgressMeasurement struct {
time time.Time time time.Time
val int64 val int64
} }
type bytesProgressHistory struct { type bytesProgressHistory struct {
last *byteProgressMeasurement // pointer as poor man's optional last *byteProgressMeasurement // pointer as poor man's optional
changeCount int changeCount int
lastChange time.Time lastChange time.Time
bpsAvg float64 bpsAvg float64
} }
func (p *bytesProgressHistory) Update(currentVal int64) (bytesPerSecondAvg int64, changeCount int) { func (p *bytesProgressHistory) Update(currentVal int64) (bytesPerSecondAvg int64, changeCount int) {
@ -38,7 +40,7 @@ func (p *bytesProgressHistory) Update(currentVal int64) (bytesPerSecondAvg int64
if p.last == nil { if p.last == nil {
p.last = &byteProgressMeasurement{ p.last = &byteProgressMeasurement{
time: time.Now(), time: time.Now(),
val: currentVal, val: currentVal,
} }
return 0, 0 return 0, 0
} }
@ -48,18 +50,17 @@ func (p *bytesProgressHistory) Update(currentVal int64) (bytesPerSecondAvg int64
p.lastChange = time.Now() p.lastChange = time.Now()
} }
if time.Now().Sub(p.lastChange) > 3 * time.Second { if time.Since(p.lastChange) > 3*time.Second {
p.last = nil p.last = nil
return 0, 0 return 0, 0
} }
deltaV := currentVal - p.last.val
deltaV := currentVal - p.last.val; deltaT := time.Since(p.last.time)
deltaT := time.Now().Sub(p.last.time)
rate := float64(deltaV) / deltaT.Seconds() rate := float64(deltaV) / deltaT.Seconds()
factor := 0.3 factor := 0.3
p.bpsAvg = (1-factor) * p.bpsAvg + factor * rate p.bpsAvg = (1-factor)*p.bpsAvg + factor*rate
p.last.time = time.Now() p.last.time = time.Now()
p.last.val = currentVal p.last.val = currentVal
@ -80,15 +81,10 @@ type tui struct {
func newTui() tui { func newTui() tui {
return tui{ return tui{
replicationProgress: make(map[string]*bytesProgressHistory, 0), replicationProgress: make(map[string]*bytesProgressHistory),
} }
} }
func (t *tui) moveCursor(x, y int) {
t.x += x
t.y += y
}
const INDENT_MULTIPLIER = 4 const INDENT_MULTIPLIER = 4
func (t *tui) moveLine(dl int, col int) { func (t *tui) moveLine(dl int, col int) {
@ -119,7 +115,7 @@ func wrap(s string, width int) string {
rem = len(s) rem = len(s)
} }
if idx := strings.IndexAny(s, "\n\r"); idx != -1 && idx < rem { if idx := strings.IndexAny(s, "\n\r"); idx != -1 && idx < rem {
rem = idx+1 rem = idx + 1
} }
untilNewline := strings.TrimRight(s[:rem], "\n\r") untilNewline := strings.TrimRight(s[:rem], "\n\r")
s = s[rem:] s = s[rem:]
@ -135,12 +131,12 @@ func wrap(s string, width int) string {
func (t *tui) printfDrawIndentedAndWrappedIfMultiline(format string, a ...interface{}) { func (t *tui) printfDrawIndentedAndWrappedIfMultiline(format string, a ...interface{}) {
whole := fmt.Sprintf(format, a...) whole := fmt.Sprintf(format, a...)
width, _ := termbox.Size() width, _ := termbox.Size()
if !strings.ContainsAny(whole, "\n\r") && t.x + len(whole) <= width { if !strings.ContainsAny(whole, "\n\r") && t.x+len(whole) <= width {
t.printf(format, a...) t.printf(format, a...)
} else { } else {
t.addIndent(1) t.addIndent(1)
t.newline() t.newline()
t.write(wrap(whole, width - INDENT_MULTIPLIER*t.indent)) t.write(wrap(whole, width-INDENT_MULTIPLIER*t.indent))
t.addIndent(-1) t.addIndent(-1)
} }
} }
@ -159,7 +155,6 @@ func (t *tui) addIndent(indent int) {
t.moveLine(0, 0) t.moveLine(0, 0)
} }
var statusFlags struct { var statusFlags struct {
Raw bool Raw bool
} }
@ -180,14 +175,17 @@ func runStatus(s *cli.Subcommand, args []string) error {
} }
if statusFlags.Raw { if statusFlags.Raw {
resp, err := httpc.Get("http://unix"+daemon.ControlJobEndpointStatus) resp, err := httpc.Get("http://unix" + daemon.ControlJobEndpointStatus)
if err != nil { if err != nil {
return err return err
} }
defer resp.Body.Close() defer resp.Body.Close()
if resp.StatusCode != http.StatusOK { if resp.StatusCode != http.StatusOK {
fmt.Fprintf(os.Stderr, "Received error response:\n") fmt.Fprintf(os.Stderr, "Received error response:\n")
io.CopyN(os.Stderr, resp.Body, 4096) _, err := io.CopyN(os.Stderr, resp.Body, 4096)
if err != nil {
return err
}
return errors.Errorf("exit") return errors.Errorf("exit")
} }
if _, err := io.Copy(os.Stdout, resp.Body); err != nil { if _, err := io.Copy(os.Stdout, resp.Body); err != nil {
@ -226,7 +224,7 @@ func runStatus(s *cli.Subcommand, args []string) error {
ticker := time.NewTicker(500 * time.Millisecond) ticker := time.NewTicker(500 * time.Millisecond)
defer ticker.Stop() defer ticker.Stop()
go func() { go func() {
for _ = range ticker.C { for range ticker.C {
update() update()
} }
}() }()
@ -277,7 +275,7 @@ func (t *tui) draw() {
//Iterate over map in alphabetical order //Iterate over map in alphabetical order
keys := make([]string, len(t.report)) keys := make([]string, len(t.report))
i := 0 i := 0
for k, _ := range t.report { for k := range t.report {
keys[i] = k keys[i] = k
i++ i++
} }
@ -363,7 +361,7 @@ func (t *tui) renderReplicationReport(rep *report.Report, history *bytesProgress
t.newline() t.newline()
} }
if !rep.WaitReconnectSince.IsZero() { if !rep.WaitReconnectSince.IsZero() {
delta := rep.WaitReconnectUntil.Sub(time.Now()).Round(time.Second) delta := time.Until(rep.WaitReconnectUntil).Round(time.Second)
if rep.WaitReconnectUntil.IsZero() || delta > 0 { if rep.WaitReconnectUntil.IsZero() || delta > 0 {
var until string var until string
if rep.WaitReconnectUntil.IsZero() { if rep.WaitReconnectUntil.IsZero() {
@ -390,7 +388,7 @@ func (t *tui) renderReplicationReport(rep *report.Report, history *bytesProgress
t.newline() t.newline()
t.addIndent(1) t.addIndent(1)
for i, a := range rep.Attempts[:len(rep.Attempts)-1] { for i, a := range rep.Attempts[:len(rep.Attempts)-1] {
t.printfDrawIndentedAndWrappedIfMultiline("#%d: %s (failed at %s) (ran %s)", i + 1, a.State, a.FinishAt, a.FinishAt.Sub(a.StartAt)) t.printfDrawIndentedAndWrappedIfMultiline("#%d: %s (failed at %s) (ran %s)", i+1, a.State, a.FinishAt, a.FinishAt.Sub(a.StartAt))
t.newline() t.newline()
} }
t.addIndent(-1) t.addIndent(-1)
@ -462,7 +460,7 @@ func (t *tui) renderPrunerReport(r *pruner.Report) {
*pruner.FSReport *pruner.FSReport
completed bool completed bool
} }
all := make([]commonFS, 0, len(r.Pending) + len(r.Completed)) all := make([]commonFS, 0, len(r.Pending)+len(r.Completed))
for i := range r.Pending { for i := range r.Pending {
all = append(all, commonFS{&r.Pending[i], false}) all = append(all, commonFS{&r.Pending[i], false})
} }
@ -471,7 +469,8 @@ func (t *tui) renderPrunerReport(r *pruner.Report) {
} }
switch state { switch state {
case pruner.Plan: fallthrough case pruner.Plan:
fallthrough
case pruner.PlanErr: case pruner.PlanErr:
return return
} }
@ -499,7 +498,7 @@ func (t *tui) renderPrunerReport(r *pruner.Report) {
t.write("[") t.write("[")
t.write(times("=", progress)) t.write(times("=", progress))
t.write(">") t.write(">")
t.write(times("-", 80 - progress)) t.write(times("-", 80-progress))
t.write("]") t.write("]")
t.printf(" %d/%d snapshots", completedDestroyCount, totalDestroyCount) t.printf(" %d/%d snapshots", completedDestroyCount, totalDestroyCount)
t.newline() t.newline()
@ -519,9 +518,9 @@ func (t *tui) renderPrunerReport(r *pruner.Report) {
if fs.LastError != "" { if fs.LastError != "" {
if strings.ContainsAny(fs.LastError, "\r\n") { if strings.ContainsAny(fs.LastError, "\r\n") {
t.printf("ERROR:") t.printf("ERROR:")
t.printfDrawIndentedAndWrappedIfMultiline("%s\n", fs.LastError) t.printfDrawIndentedAndWrappedIfMultiline("%s\n", fs.LastError)
} else { } else {
t.printfDrawIndentedAndWrappedIfMultiline("ERROR: %s\n", fs.LastError) t.printfDrawIndentedAndWrappedIfMultiline("ERROR: %s\n", fs.LastError)
} }
t.newline() t.newline()
continue continue
@ -531,7 +530,7 @@ func (t *tui) renderPrunerReport(r *pruner.Report) {
len(fs.DestroyList), len(fs.SnapshotList)) len(fs.DestroyList), len(fs.SnapshotList))
if fs.completed { if fs.completed {
t.printf( "Completed %s\n", pruneRuleActionStr) t.printf("Completed %s\n", pruneRuleActionStr)
continue continue
} }
@ -560,14 +559,6 @@ func rightPad(str string, length int, pad string) string {
return str + times(pad, length-len(str)) return str + times(pad, length-len(str))
} }
func leftPad(str string, length int, pad string) string {
if len(str) > length {
return str[len(str)-length:]
}
return times(pad, length-len(str)) + str
}
var arrowPositions = `>\|/` var arrowPositions = `>\|/`
// changeCount = 0 indicates stall / no progresss // changeCount = 0 indicates stall / no progresss
@ -584,7 +575,7 @@ func (t *tui) drawBar(length int, bytes, totalBytes int64, changeCount int) {
t.write("[") t.write("[")
t.write(times("=", completedLength)) t.write(times("=", completedLength))
t.write( string(arrowPositions[changeCount%len(arrowPositions)])) t.write(string(arrowPositions[changeCount%len(arrowPositions)]))
t.write(times("-", length-completedLength)) t.write(times("-", length-completedLength))
t.write("]") t.write("]")
} }

View File

@ -1,13 +1,15 @@
package client package client
import ( import (
"github.com/zrepl/zrepl/cli"
"os" "os"
"github.com/problame/go-netssh"
"github.com/zrepl/zrepl/cli"
"github.com/zrepl/zrepl/config"
"context" "context"
"errors" "errors"
"github.com/problame/go-netssh"
"github.com/zrepl/zrepl/config"
"log" "log"
"path" "path"
) )

View File

@ -2,15 +2,17 @@ package client
import ( import (
"fmt" "fmt"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/spf13/pflag" "github.com/spf13/pflag"
"github.com/zrepl/zrepl/cli" "github.com/zrepl/zrepl/cli"
"github.com/zrepl/zrepl/config" "github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon/filters" "github.com/zrepl/zrepl/daemon/filters"
"github.com/zrepl/zrepl/zfs" "github.com/zrepl/zrepl/zfs"
) )
var TestCmd = &cli.Subcommand { var TestCmd = &cli.Subcommand{
Use: "test", Use: "test",
SetupSubcommands: func() []*cli.Subcommand { SetupSubcommands: func() []*cli.Subcommand {
return []*cli.Subcommand{testFilter, testPlaceholder} return []*cli.Subcommand{testFilter, testPlaceholder}
@ -18,13 +20,13 @@ var TestCmd = &cli.Subcommand {
} }
var testFilterArgs struct { var testFilterArgs struct {
job string job string
all bool all bool
input string input string
} }
var testFilter = &cli.Subcommand{ var testFilter = &cli.Subcommand{
Use: "filesystems --job JOB [--all | --input INPUT]", Use: "filesystems --job JOB [--all | --input INPUT]",
Short: "test filesystems filter specified in push or source job", Short: "test filesystems filter specified in push or source job",
SetupFlags: func(f *pflag.FlagSet) { SetupFlags: func(f *pflag.FlagSet) {
f.StringVar(&testFilterArgs.job, "job", "", "the name of the push or source job") f.StringVar(&testFilterArgs.job, "job", "", "the name of the push or source job")
@ -51,8 +53,10 @@ func runTestFilterCmd(subcommand *cli.Subcommand, args []string) error {
return err return err
} }
switch j := job.Ret.(type) { switch j := job.Ret.(type) {
case *config.SourceJob: confFilter = j.Filesystems case *config.SourceJob:
case *config.PushJob: confFilter = j.Filesystems confFilter = j.Filesystems
case *config.PushJob:
confFilter = j.Filesystems
default: default:
return fmt.Errorf("job type %T does not have filesystems filter", j) return fmt.Errorf("job type %T does not have filesystems filter", j)
} }
@ -109,10 +113,8 @@ func runTestFilterCmd(subcommand *cli.Subcommand, args []string) error {
} }
var testPlaceholderArgs struct { var testPlaceholderArgs struct {
action string ds string
ds string all bool
plv string
all bool
} }
var testPlaceholder = &cli.Subcommand{ var testPlaceholder = &cli.Subcommand{

View File

@ -2,23 +2,25 @@ package client
import ( import (
"fmt" "fmt"
"os"
"github.com/spf13/pflag" "github.com/spf13/pflag"
"github.com/zrepl/zrepl/cli" "github.com/zrepl/zrepl/cli"
"github.com/zrepl/zrepl/config" "github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon" "github.com/zrepl/zrepl/daemon"
"github.com/zrepl/zrepl/version" "github.com/zrepl/zrepl/version"
"os"
) )
var versionArgs struct { var versionArgs struct {
Show string Show string
Config *config.Config Config *config.Config
ConfigErr error ConfigErr error
} }
var VersionCmd = &cli.Subcommand{ var VersionCmd = &cli.Subcommand{
Use: "version", Use: "version",
Short: "print version of zrepl binary and running daemon", Short: "print version of zrepl binary and running daemon",
NoRequireConfig: true, NoRequireConfig: true,
SetupFlags: func(f *pflag.FlagSet) { SetupFlags: func(f *pflag.FlagSet) {
f.StringVar(&versionArgs.Show, "show", "", "version info to show (client|daemon)") f.StringVar(&versionArgs.Show, "show", "", "version info to show (client|daemon)")

View File

@ -2,8 +2,6 @@ package config
import ( import (
"fmt" "fmt"
"github.com/pkg/errors"
"github.com/zrepl/yaml-config"
"io/ioutil" "io/ioutil"
"log/syslog" "log/syslog"
"os" "os"
@ -11,6 +9,9 @@ import (
"regexp" "regexp"
"strconv" "strconv"
"time" "time"
"github.com/pkg/errors"
"github.com/zrepl/yaml-config"
) )
type Config struct { type Config struct {
@ -34,11 +35,16 @@ type JobEnum struct {
func (j JobEnum) Name() string { func (j JobEnum) Name() string {
var name string var name string
switch v := j.Ret.(type) { switch v := j.Ret.(type) {
case *SnapJob: name = v.Name case *SnapJob:
case *PushJob: name = v.Name name = v.Name
case *SinkJob: name = v.Name case *PushJob:
case *PullJob: name = v.Name name = v.Name
case *SourceJob: name = v.Name case *SinkJob:
name = v.Name
case *PullJob:
name = v.Name
case *SourceJob:
name = v.Name
default: default:
panic(fmt.Sprintf("unknown job type %T", v)) panic(fmt.Sprintf("unknown job type %T", v))
} }
@ -46,38 +52,38 @@ func (j JobEnum) Name() string {
} }
type ActiveJob struct { type ActiveJob struct {
Type string `yaml:"type"` Type string `yaml:"type"`
Name string `yaml:"name"` Name string `yaml:"name"`
Connect ConnectEnum `yaml:"connect"` Connect ConnectEnum `yaml:"connect"`
Pruning PruningSenderReceiver `yaml:"pruning"` Pruning PruningSenderReceiver `yaml:"pruning"`
Debug JobDebugSettings `yaml:"debug,optional"` Debug JobDebugSettings `yaml:"debug,optional"`
} }
type PassiveJob struct { type PassiveJob struct {
Type string `yaml:"type"` Type string `yaml:"type"`
Name string `yaml:"name"` Name string `yaml:"name"`
Serve ServeEnum `yaml:"serve"` Serve ServeEnum `yaml:"serve"`
Debug JobDebugSettings `yaml:"debug,optional"` Debug JobDebugSettings `yaml:"debug,optional"`
} }
type SnapJob struct { type SnapJob struct {
Type string `yaml:"type"` Type string `yaml:"type"`
Name string `yaml:"name"` Name string `yaml:"name"`
Pruning PruningLocal `yaml:"pruning"` Pruning PruningLocal `yaml:"pruning"`
Debug JobDebugSettings `yaml:"debug,optional"` Debug JobDebugSettings `yaml:"debug,optional"`
Snapshotting SnapshottingEnum `yaml:"snapshotting"` Snapshotting SnapshottingEnum `yaml:"snapshotting"`
Filesystems FilesystemsFilter `yaml:"filesystems"` Filesystems FilesystemsFilter `yaml:"filesystems"`
} }
type PushJob struct { type PushJob struct {
ActiveJob `yaml:",inline"` ActiveJob `yaml:",inline"`
Snapshotting SnapshottingEnum `yaml:"snapshotting"` Snapshotting SnapshottingEnum `yaml:"snapshotting"`
Filesystems FilesystemsFilter `yaml:"filesystems"` Filesystems FilesystemsFilter `yaml:"filesystems"`
} }
type PullJob struct { type PullJob struct {
ActiveJob `yaml:",inline"` ActiveJob `yaml:",inline"`
RootFS string `yaml:"root_fs"` RootFS string `yaml:"root_fs"`
Interval PositiveDurationOrManual `yaml:"interval"` Interval PositiveDurationOrManual `yaml:"interval"`
} }
@ -118,9 +124,9 @@ type SinkJob struct {
} }
type SourceJob struct { type SourceJob struct {
PassiveJob `yaml:",inline"` PassiveJob `yaml:",inline"`
Snapshotting SnapshottingEnum `yaml:"snapshotting"` Snapshotting SnapshottingEnum `yaml:"snapshotting"`
Filesystems FilesystemsFilter `yaml:"filesystems"` Filesystems FilesystemsFilter `yaml:"filesystems"`
} }
type FilesystemsFilter map[string]bool type FilesystemsFilter map[string]bool
@ -130,8 +136,8 @@ type SnapshottingEnum struct {
} }
type SnapshottingPeriodic struct { type SnapshottingPeriodic struct {
Type string `yaml:"type"` Type string `yaml:"type"`
Prefix string `yaml:"prefix"` Prefix string `yaml:"prefix"`
Interval time.Duration `yaml:"interval,positive"` Interval time.Duration `yaml:"interval,positive"`
} }
@ -191,7 +197,7 @@ type ConnectEnum struct {
} }
type ConnectCommon struct { type ConnectCommon struct {
Type string `yaml:"type"` Type string `yaml:"type"`
} }
type TCPConnect struct { type TCPConnect struct {
@ -223,8 +229,8 @@ type SSHStdinserverConnect struct {
} }
type LocalConnect struct { type LocalConnect struct {
ConnectCommon `yaml:",inline"` ConnectCommon `yaml:",inline"`
ListenerName string `yaml:"listener_name"` ListenerName string `yaml:"listener_name"`
ClientIdentity string `yaml:"client_identity"` ClientIdentity string `yaml:"client_identity"`
} }
@ -233,7 +239,7 @@ type ServeEnum struct {
} }
type ServeCommon struct { type ServeCommon struct {
Type string `yaml:"type"` Type string `yaml:"type"`
} }
type TCPServe struct { type TCPServe struct {
@ -253,12 +259,12 @@ type TLSServe struct {
} }
type StdinserverServer struct { type StdinserverServer struct {
ServeCommon `yaml:",inline"` ServeCommon `yaml:",inline"`
ClientIdentities []string `yaml:"client_identities"` ClientIdentities []string `yaml:"client_identities"`
} }
type LocalServe struct { type LocalServe struct {
ServeCommon `yaml:",inline"` ServeCommon `yaml:",inline"`
ListenerName string `yaml:"listener_name"` ListenerName string `yaml:"listener_name"`
} }
@ -267,8 +273,8 @@ type PruningEnum struct {
} }
type PruneKeepNotReplicated struct { type PruneKeepNotReplicated struct {
Type string `yaml:"type"` Type string `yaml:"type"`
KeepSnapshotAtCursor bool `yaml:"keep_snapshot_at_cursor,optional,default=true"` KeepSnapshotAtCursor bool `yaml:"keep_snapshot_at_cursor,optional,default=true"`
} }
type PruneKeepLastN struct { type PruneKeepLastN struct {
@ -277,8 +283,8 @@ type PruneKeepLastN struct {
} }
type PruneKeepRegex struct { // FIXME rename to KeepRegex type PruneKeepRegex struct { // FIXME rename to KeepRegex
Type string `yaml:"type"` Type string `yaml:"type"`
Regex string `yaml:"regex"` Regex string `yaml:"regex"`
Negate bool `yaml:"negate,optional,default=false"` Negate bool `yaml:"negate,optional,default=false"`
} }
@ -301,7 +307,7 @@ type StdoutLoggingOutlet struct {
type SyslogLoggingOutlet struct { type SyslogLoggingOutlet struct {
LoggingOutletCommon `yaml:",inline"` LoggingOutletCommon `yaml:",inline"`
Facility *SyslogFacility `yaml:"facility,optional,fromdefaults"` Facility *SyslogFacility `yaml:"facility,optional,fromdefaults"`
RetryInterval time.Duration `yaml:"retry_interval,positive,default=10s"` RetryInterval time.Duration `yaml:"retry_interval,positive,default=10s"`
} }
type TCPLoggingOutlet struct { type TCPLoggingOutlet struct {
@ -392,7 +398,7 @@ func (t *ConnectEnum) UnmarshalYAML(u func(interface{}, bool) error) (err error)
"tcp": &TCPConnect{}, "tcp": &TCPConnect{},
"tls": &TLSConnect{}, "tls": &TLSConnect{},
"ssh+stdinserver": &SSHStdinserverConnect{}, "ssh+stdinserver": &SSHStdinserverConnect{},
"local": &LocalConnect{}, "local": &LocalConnect{},
}) })
return return
} }
@ -402,7 +408,7 @@ func (t *ServeEnum) UnmarshalYAML(u func(interface{}, bool) error) (err error) {
"tcp": &TCPServe{}, "tcp": &TCPServe{},
"tls": &TLSServe{}, "tls": &TLSServe{},
"stdinserver": &StdinserverServer{}, "stdinserver": &StdinserverServer{},
"local" : &LocalServe{}, "local": &LocalServe{},
}) })
return return
} }
@ -420,7 +426,7 @@ func (t *PruningEnum) UnmarshalYAML(u func(interface{}, bool) error) (err error)
func (t *SnapshottingEnum) UnmarshalYAML(u func(interface{}, bool) error) (err error) { func (t *SnapshottingEnum) UnmarshalYAML(u func(interface{}, bool) error) (err error) {
t.Ret, err = enumUnmarshal(u, map[string]interface{}{ t.Ret, err = enumUnmarshal(u, map[string]interface{}{
"periodic": &SnapshottingPeriodic{}, "periodic": &SnapshottingPeriodic{},
"manual": &SnapshottingManual{}, "manual": &SnapshottingManual{},
}) })
return return
} }
@ -448,31 +454,51 @@ func (t *SyslogFacility) UnmarshalYAML(u func(interface{}, bool) error) (err err
} }
var level syslog.Priority var level syslog.Priority
switch s { switch s {
case "kern": level = syslog.LOG_KERN case "kern":
case "user": level = syslog.LOG_USER level = syslog.LOG_KERN
case "mail": level = syslog.LOG_MAIL case "user":
case "daemon": level = syslog.LOG_DAEMON level = syslog.LOG_USER
case "auth": level = syslog.LOG_AUTH case "mail":
case "syslog": level = syslog.LOG_SYSLOG level = syslog.LOG_MAIL
case "lpr": level = syslog.LOG_LPR case "daemon":
case "news": level = syslog.LOG_NEWS level = syslog.LOG_DAEMON
case "uucp": level = syslog.LOG_UUCP case "auth":
case "cron": level = syslog.LOG_CRON level = syslog.LOG_AUTH
case "authpriv": level = syslog.LOG_AUTHPRIV case "syslog":
case "ftp": level = syslog.LOG_FTP level = syslog.LOG_SYSLOG
case "local0": level = syslog.LOG_LOCAL0 case "lpr":
case "local1": level = syslog.LOG_LOCAL1 level = syslog.LOG_LPR
case "local2": level = syslog.LOG_LOCAL2 case "news":
case "local3": level = syslog.LOG_LOCAL3 level = syslog.LOG_NEWS
case "local4": level = syslog.LOG_LOCAL4 case "uucp":
case "local5": level = syslog.LOG_LOCAL5 level = syslog.LOG_UUCP
case "local6": level = syslog.LOG_LOCAL6 case "cron":
case "local7": level = syslog.LOG_LOCAL7 level = syslog.LOG_CRON
case "authpriv":
level = syslog.LOG_AUTHPRIV
case "ftp":
level = syslog.LOG_FTP
case "local0":
level = syslog.LOG_LOCAL0
case "local1":
level = syslog.LOG_LOCAL1
case "local2":
level = syslog.LOG_LOCAL2
case "local3":
level = syslog.LOG_LOCAL3
case "local4":
level = syslog.LOG_LOCAL4
case "local5":
level = syslog.LOG_LOCAL5
case "local6":
level = syslog.LOG_LOCAL6
case "local7":
level = syslog.LOG_LOCAL7
default: default:
return fmt.Errorf("invalid syslog level: %q", s) return fmt.Errorf("invalid syslog level: %q", s)
} }
*t = SyslogFacility(level) *t = SyslogFacility(level)
return nil return nil
} }
var ConfigFileDefaultLocations = []string{ var ConfigFileDefaultLocations = []string{

View File

@ -2,11 +2,12 @@ package config
import ( import (
"fmt" "fmt"
"log/syslog"
"testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/zrepl/yaml-config" "github.com/zrepl/yaml-config"
"log/syslog"
"testing"
) )
func testValidGlobalSection(t *testing.T, s string) *Config { func testValidGlobalSection(t *testing.T, s string) *Config {
@ -24,7 +25,7 @@ jobs:
` `
_, err := ParseConfigBytes([]byte(jobdef)) _, err := ParseConfigBytes([]byte(jobdef))
require.NoError(t, err) require.NoError(t, err)
return testValidConfig(t, s + jobdef) return testValidConfig(t, s+jobdef)
} }
func TestOutletTypes(t *testing.T) { func TestOutletTypes(t *testing.T) {
@ -71,7 +72,7 @@ global:
- type: prometheus - type: prometheus
listen: ':9091' listen: ':9091'
`) `)
assert.Equal(t, ":9091", conf.Global.Monitoring[0].Ret.(*PrometheusMonitoring).Listen) assert.Equal(t, ":9091", conf.Global.Monitoring[0].Ret.(*PrometheusMonitoring).Listen)
} }
func TestSyslogLoggingOutletFacility(t *testing.T) { func TestSyslogLoggingOutletFacility(t *testing.T) {

View File

@ -2,6 +2,7 @@ package config
import ( import (
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@ -36,4 +37,4 @@ jobs:
- type: last_n - type: last_n
count: 1 count: 1
`) `)
} }

View File

@ -2,9 +2,10 @@ package config
import ( import (
"fmt" "fmt"
"github.com/stretchr/testify/assert"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert"
) )
func TestSnapshotting(t *testing.T) { func TestSnapshotting(t *testing.T) {
@ -37,7 +38,7 @@ jobs:
interval: 10m interval: 10m
` `
fillSnapshotting := func(s string) string {return fmt.Sprintf(tmpl, s)} fillSnapshotting := func(s string) string { return fmt.Sprintf(tmpl, s) }
var c *Config var c *Config
t.Run("manual", func(t *testing.T) { t.Run("manual", func(t *testing.T) {
@ -51,7 +52,7 @@ jobs:
snp := c.Jobs[0].Ret.(*PushJob).Snapshotting.Ret.(*SnapshottingPeriodic) snp := c.Jobs[0].Ret.(*PushJob).Snapshotting.Ret.(*SnapshottingPeriodic)
assert.Equal(t, "periodic", snp.Type) assert.Equal(t, "periodic", snp.Type)
assert.Equal(t, 10*time.Minute, snp.Interval) assert.Equal(t, 10*time.Minute, snp.Interval)
assert.Equal(t, "zrepl_" , snp.Prefix) assert.Equal(t, "zrepl_", snp.Prefix)
}) })
} }

View File

@ -39,6 +39,7 @@ func TestSampleConfigsAreParsedWithoutErrors(t *testing.T) {
} }
// template must be a template/text template with a single '{{ . }}' as placehodler for val // template must be a template/text template with a single '{{ . }}' as placehodler for val
//nolint[:deadcode,unused]
func testValidConfigTemplate(t *testing.T, tmpl string, val string) *Config { func testValidConfigTemplate(t *testing.T, tmpl string, val string) *Config {
tmp, err := template.New("master").Parse(tmpl) tmp, err := template.New("master").Parse(tmpl)
if err != nil { if err != nil {

View File

@ -11,9 +11,9 @@ import (
type RetentionIntervalList []RetentionInterval type RetentionIntervalList []RetentionInterval
type PruneGrid struct { type PruneGrid struct {
Type string `yaml:"type"` Type string `yaml:"type"`
Grid RetentionIntervalList `yaml:"grid"` Grid RetentionIntervalList `yaml:"grid"`
Regex string `yaml:"regex"` Regex string `yaml:"regex"`
} }
type RetentionInterval struct { type RetentionInterval struct {
@ -31,10 +31,6 @@ func (i *RetentionInterval) KeepCount() int {
const RetentionGridKeepCountAll int = -1 const RetentionGridKeepCountAll int = -1
type RetentionGrid struct {
intervals []RetentionInterval
}
func (t *RetentionIntervalList) UnmarshalYAML(u func(interface{}, bool) error) (err error) { func (t *RetentionIntervalList) UnmarshalYAML(u func(interface{}, bool) error) (err error) {
var in string var in string
if err := u(&in, true); err != nil { if err := u(&in, true); err != nil {

View File

@ -12,6 +12,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/zrepl/zrepl/daemon/job" "github.com/zrepl/zrepl/daemon/job"
"github.com/zrepl/zrepl/daemon/nethelpers" "github.com/zrepl/zrepl/daemon/nethelpers"
"github.com/zrepl/zrepl/logger" "github.com/zrepl/zrepl/logger"
@ -43,24 +44,24 @@ func (j *controlJob) Status() *job.Status { return &job.Status{Type: job.TypeInt
func (j *controlJob) OwnedDatasetSubtreeRoot() (p *zfs.DatasetPath, ok bool) { return nil, false } func (j *controlJob) OwnedDatasetSubtreeRoot() (p *zfs.DatasetPath, ok bool) { return nil, false }
var promControl struct { var promControl struct {
requestBegin *prometheus.CounterVec requestBegin *prometheus.CounterVec
requestFinished *prometheus.HistogramVec requestFinished *prometheus.HistogramVec
} }
func (j *controlJob) RegisterMetrics(registerer prometheus.Registerer) { func (j *controlJob) RegisterMetrics(registerer prometheus.Registerer) {
promControl.requestBegin = prometheus.NewCounterVec(prometheus.CounterOpts{ promControl.requestBegin = prometheus.NewCounterVec(prometheus.CounterOpts{
Namespace: "zrepl", Namespace: "zrepl",
Subsystem: "control", Subsystem: "control",
Name: "request_begin", Name: "request_begin",
Help: "number of request we started to handle", Help: "number of request we started to handle",
}, []string{"endpoint"}) }, []string{"endpoint"})
promControl.requestFinished = prometheus.NewHistogramVec(prometheus.HistogramOpts{ promControl.requestFinished = prometheus.NewHistogramVec(prometheus.HistogramOpts{
Namespace: "zrepl", Namespace: "zrepl",
Subsystem: "control", Subsystem: "control",
Name: "request_finished", Name: "request_finished",
Help: "time it took a request to finih", Help: "time it took a request to finih",
Buckets: []float64{1e-6, 10e-6, 100e-6, 500e-6, 1e-3,10e-3, 100e-3, 200e-3,400e-3,800e-3, 1, 10, 20}, Buckets: []float64{1e-6, 10e-6, 100e-6, 500e-6, 1e-3, 10e-3, 100e-3, 200e-3, 400e-3, 800e-3, 1, 10, 20},
}, []string{"endpoint"}) }, []string{"endpoint"})
registerer.MustRegister(promControl.requestBegin) registerer.MustRegister(promControl.requestBegin)
registerer.MustRegister(promControl.requestFinished) registerer.MustRegister(promControl.requestFinished)
@ -88,7 +89,7 @@ func (j *controlJob) Run(ctx context.Context) {
mux := http.NewServeMux() mux := http.NewServeMux()
mux.Handle(ControlJobEndpointPProf, mux.Handle(ControlJobEndpointPProf,
requestLogger{log: log, handler: jsonRequestResponder{func(decoder jsonDecoder) (interface{}, error) { requestLogger{log: log, handler: jsonRequestResponder{log, func(decoder jsonDecoder) (interface{}, error) {
var msg PprofServerControlMsg var msg PprofServerControlMsg
err := decoder(&msg) err := decoder(&msg)
if err != nil { if err != nil {
@ -99,22 +100,22 @@ func (j *controlJob) Run(ctx context.Context) {
}}}) }}})
mux.Handle(ControlJobEndpointVersion, mux.Handle(ControlJobEndpointVersion,
requestLogger{log: log, handler: jsonResponder{func() (interface{}, error) { requestLogger{log: log, handler: jsonResponder{log, func() (interface{}, error) {
return version.NewZreplVersionInformation(), nil return version.NewZreplVersionInformation(), nil
}}}) }}})
mux.Handle(ControlJobEndpointStatus, mux.Handle(ControlJobEndpointStatus,
// don't log requests to status endpoint, too spammy // don't log requests to status endpoint, too spammy
jsonResponder{func() (interface{}, error) { jsonResponder{log, func() (interface{}, error) {
s := j.jobs.status() s := j.jobs.status()
return s, nil return s, nil
}}) }})
mux.Handle(ControlJobEndpointSignal, mux.Handle(ControlJobEndpointSignal,
requestLogger{log: log, handler: jsonRequestResponder{func(decoder jsonDecoder) (interface{}, error) { requestLogger{log: log, handler: jsonRequestResponder{log, func(decoder jsonDecoder) (interface{}, error) {
type reqT struct { type reqT struct {
Name string Name string
Op string Op string
} }
var req reqT var req reqT
if decoder(&req) != nil { if decoder(&req) != nil {
@ -136,8 +137,8 @@ func (j *controlJob) Run(ctx context.Context) {
server := http.Server{ server := http.Server{
Handler: mux, Handler: mux,
// control socket is local, 1s timeout should be more than sufficient, even on a loaded system // control socket is local, 1s timeout should be more than sufficient, even on a loaded system
WriteTimeout: 1*time.Second, WriteTimeout: 1 * time.Second,
ReadTimeout: 1*time.Second, ReadTimeout: 1 * time.Second,
} }
outer: outer:
@ -152,7 +153,10 @@ outer:
select { select {
case <-ctx.Done(): case <-ctx.Done():
log.WithError(ctx.Err()).Info("context done") log.WithError(ctx.Err()).Info("context done")
server.Shutdown(context.Background()) err := server.Shutdown(context.Background())
if err != nil {
log.WithError(err).Error("cannot shutdown server")
}
break outer break outer
case err = <-served: case err = <-served:
if err != nil { if err != nil {
@ -166,33 +170,50 @@ outer:
} }
type jsonResponder struct { type jsonResponder struct {
log Logger
producer func() (interface{}, error) producer func() (interface{}, error)
} }
func (j jsonResponder) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (j jsonResponder) ServeHTTP(w http.ResponseWriter, r *http.Request) {
logIoErr := func(err error) {
if err != nil {
j.log.WithError(err).Error("control handler io error")
}
}
res, err := j.producer() res, err := j.producer()
if err != nil { if err != nil {
j.log.WithError(err).Error("control handler error")
w.WriteHeader(http.StatusInternalServerError) w.WriteHeader(http.StatusInternalServerError)
io.WriteString(w, err.Error()) _, err = io.WriteString(w, err.Error())
logIoErr(err)
return return
} }
var buf bytes.Buffer var buf bytes.Buffer
err = json.NewEncoder(&buf).Encode(res) err = json.NewEncoder(&buf).Encode(res)
if err != nil { if err != nil {
j.log.WithError(err).Error("control handler json marshal error")
w.WriteHeader(http.StatusInternalServerError) w.WriteHeader(http.StatusInternalServerError)
io.WriteString(w, err.Error()) _, err = io.WriteString(w, err.Error())
} else { } else {
io.Copy(w, &buf) _, err = io.Copy(w, &buf)
} }
logIoErr(err)
} }
type jsonDecoder = func(interface{}) error type jsonDecoder = func(interface{}) error
type jsonRequestResponder struct { type jsonRequestResponder struct {
log Logger
producer func(decoder jsonDecoder) (interface{}, error) producer func(decoder jsonDecoder) (interface{}, error)
} }
func (j jsonRequestResponder) ServeHTTP(w http.ResponseWriter, r *http.Request) { func (j jsonRequestResponder) ServeHTTP(w http.ResponseWriter, r *http.Request) {
logIoErr := func(err error) {
if err != nil {
j.log.WithError(err).Error("control handler io error")
}
}
var decodeError error var decodeError error
decoder := func(i interface{}) error { decoder := func(i interface{}) error {
err := json.NewDecoder(r.Body).Decode(&i) err := json.NewDecoder(r.Body).Decode(&i)
@ -204,22 +225,28 @@ func (j jsonRequestResponder) ServeHTTP(w http.ResponseWriter, r *http.Request)
//If we had a decode error ignore output of producer and return error //If we had a decode error ignore output of producer and return error
if decodeError != nil { if decodeError != nil {
w.WriteHeader(http.StatusBadRequest) w.WriteHeader(http.StatusBadRequest)
io.WriteString(w, decodeError.Error()) _, err := io.WriteString(w, decodeError.Error())
logIoErr(err)
return return
} }
if producerErr != nil { if producerErr != nil {
j.log.WithError(producerErr).Error("control handler error")
w.WriteHeader(http.StatusInternalServerError) w.WriteHeader(http.StatusInternalServerError)
io.WriteString(w, producerErr.Error()) _, err := io.WriteString(w, producerErr.Error())
logIoErr(err)
return return
} }
var buf bytes.Buffer var buf bytes.Buffer
encodeErr := json.NewEncoder(&buf).Encode(res) encodeErr := json.NewEncoder(&buf).Encode(res)
if encodeErr != nil { if encodeErr != nil {
j.log.WithError(producerErr).Error("control handler json marhsal error")
w.WriteHeader(http.StatusInternalServerError) w.WriteHeader(http.StatusInternalServerError)
io.WriteString(w, encodeErr.Error()) _, err := io.WriteString(w, encodeErr.Error())
logIoErr(err)
} else { } else {
io.Copy(w, &buf) _, err := io.Copy(w, &buf)
logIoErr(err)
} }
} }

View File

@ -3,8 +3,16 @@ package daemon
import ( import (
"context" "context"
"fmt" "fmt"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"time"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/zrepl/zrepl/config" "github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon/job" "github.com/zrepl/zrepl/daemon/job"
"github.com/zrepl/zrepl/daemon/job/reset" "github.com/zrepl/zrepl/daemon/job/reset"
@ -12,12 +20,6 @@ import (
"github.com/zrepl/zrepl/daemon/logging" "github.com/zrepl/zrepl/daemon/logging"
"github.com/zrepl/zrepl/logger" "github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/version" "github.com/zrepl/zrepl/version"
"os"
"os/signal"
"strings"
"sync"
"syscall"
"time"
) )
func Run(conf *config.Config) error { func Run(conf *config.Config) error {
@ -74,12 +76,11 @@ func Run(conf *config.Config) error {
return errors.Errorf("unknown monitoring job #%d (type %T)", i, v) return errors.Errorf("unknown monitoring job #%d (type %T)", i, v)
} }
if err != nil { if err != nil {
return errors.Wrapf(err,"cannot build monitorin gjob #%d", i) return errors.Wrapf(err, "cannot build monitorin gjob #%d", i)
} }
jobs.start(ctx, job, true) jobs.start(ctx, job, true)
} }
log.Info("starting daemon") log.Info("starting daemon")
// start regular jobs // start regular jobs
@ -103,7 +104,7 @@ type jobs struct {
// m protects all fields below it // m protects all fields below it
m sync.RWMutex m sync.RWMutex
wakeups map[string]wakeup.Func // by Job.Name wakeups map[string]wakeup.Func // by Job.Name
resets map[string]reset.Func // by Job.Name resets map[string]reset.Func // by Job.Name
jobs map[string]job.Job jobs map[string]job.Job
} }
@ -116,9 +117,7 @@ func newJobs() *jobs {
} }
const ( const (
logJobField string = "job" logJobField string = "job"
logTaskField string = "task"
logSubsysField string = "subsystem"
) )
func (s *jobs) wait() <-chan struct{} { func (s *jobs) wait() <-chan struct{} {

View File

@ -2,10 +2,12 @@ package filters
import ( import (
"fmt" "fmt"
"strings"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/zrepl/zrepl/endpoint" "github.com/zrepl/zrepl/endpoint"
"github.com/zrepl/zrepl/zfs" "github.com/zrepl/zrepl/zfs"
"strings"
) )
type DatasetMapFilter struct { type DatasetMapFilter struct {

View File

@ -1,8 +1,9 @@
package filters package filters
import ( import (
"github.com/zrepl/zrepl/zfs"
"strings" "strings"
"github.com/zrepl/zrepl/zfs"
) )
type AnyFSVFilter struct{} type AnyFSVFilter struct{}
@ -17,7 +18,6 @@ func (AnyFSVFilter) Filter(t zfs.VersionType, name string) (accept bool, err err
return true, nil return true, nil
} }
type PrefixFilter struct { type PrefixFilter struct {
prefix string prefix string
fstype zfs.VersionType fstype zfs.VersionType

View File

@ -68,8 +68,7 @@ type activeSideTasks struct {
func (a *ActiveSide) updateTasks(u func(*activeSideTasks)) activeSideTasks { func (a *ActiveSide) updateTasks(u func(*activeSideTasks)) activeSideTasks {
a.tasksMtx.Lock() a.tasksMtx.Lock()
defer a.tasksMtx.Unlock() defer a.tasksMtx.Unlock()
var copy activeSideTasks copy := a.tasks
copy = a.tasks
if u == nil { if u == nil {
return copy return copy
} }

View File

@ -6,6 +6,7 @@ import (
"strings" "strings"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/zrepl/zrepl/config" "github.com/zrepl/zrepl/config"
) )
@ -24,13 +25,13 @@ func JobsFromConfig(c *config.Config) ([]Job, error) {
// receiving-side root filesystems must not overlap // receiving-side root filesystems must not overlap
{ {
rfss := make([]string, len(js)) rfss := make([]string, 0, len(js))
for i, j := range js { for _, j := range js {
jrfs, ok := j.OwnedDatasetSubtreeRoot() jrfs, ok := j.OwnedDatasetSubtreeRoot()
if !ok { if !ok {
continue continue
} }
rfss[i] = jrfs.ToString() rfss = append(rfss, jrfs.ToString())
} }
if err := validateReceivingSidesDoNotOverlap(rfss); err != nil { if err := validateReceivingSidesDoNotOverlap(rfss); err != nil {
return nil, err return nil, err

View File

@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/zrepl/zrepl/logger" "github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/zfs" "github.com/zrepl/zrepl/zfs"
) )
@ -29,7 +30,6 @@ func WithLogger(ctx context.Context, l Logger) context.Context {
return context.WithValue(ctx, contextKeyLog, l) return context.WithValue(ctx, contextKeyLog, l)
} }
type Job interface { type Job interface {
Name() string Name() string
Run(ctx context.Context) Run(ctx context.Context)
@ -44,15 +44,15 @@ type Type string
const ( const (
TypeInternal Type = "internal" TypeInternal Type = "internal"
TypeSnap Type = "snap" TypeSnap Type = "snap"
TypePush Type = "push" TypePush Type = "push"
TypeSink Type = "sink" TypeSink Type = "sink"
TypePull Type = "pull" TypePull Type = "pull"
TypeSource Type = "source" TypeSource Type = "source"
) )
type Status struct { type Status struct {
Type Type Type Type
JobSpecific interface{} JobSpecific interface{}
} }
@ -65,8 +65,8 @@ func (s *Status) MarshalJSON() ([]byte, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
m := map[string]json.RawMessage { m := map[string]json.RawMessage{
"type": typeJson, "type": typeJson,
string(s.Type): jobJSON, string(s.Type): jobJSON,
} }
return json.Marshal(m) return json.Marshal(m)
@ -94,16 +94,21 @@ func (s *Status) UnmarshalJSON(in []byte) (err error) {
var st SnapJobStatus var st SnapJobStatus
err = json.Unmarshal(jobJSON, &st) err = json.Unmarshal(jobJSON, &st)
s.JobSpecific = &st s.JobSpecific = &st
case TypePull: fallthrough
case TypePull:
fallthrough
case TypePush: case TypePush:
var st ActiveSideStatus var st ActiveSideStatus
err = json.Unmarshal(jobJSON, &st) err = json.Unmarshal(jobJSON, &st)
s.JobSpecific = &st s.JobSpecific = &st
case TypeSource: fallthrough
case TypeSource:
fallthrough
case TypeSink: case TypeSink:
var st PassiveStatus var st PassiveStatus
err = json.Unmarshal(jobJSON, &st) err = json.Unmarshal(jobJSON, &st)
s.JobSpecific = &st s.JobSpecific = &st
case TypeInternal: case TypeInternal:
// internal jobs do not report specifics // internal jobs do not report specifics
default: default:

View File

@ -7,6 +7,7 @@ import (
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/zrepl/zrepl/config" "github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon/filters" "github.com/zrepl/zrepl/daemon/filters"
"github.com/zrepl/zrepl/daemon/job/wakeup" "github.com/zrepl/zrepl/daemon/job/wakeup"

View File

@ -4,11 +4,13 @@ import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"fmt" "fmt"
"time"
"github.com/fatih/color" "github.com/fatih/color"
"github.com/go-logfmt/logfmt" "github.com/go-logfmt/logfmt"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/zrepl/zrepl/logger" "github.com/zrepl/zrepl/logger"
"time"
) )
const ( const (
@ -159,10 +161,16 @@ func (f *LogfmtFormatter) Format(e *logger.Entry) ([]byte, error) {
enc := logfmt.NewEncoder(&buf) enc := logfmt.NewEncoder(&buf)
if f.metadataFlags&MetadataTime != 0 { if f.metadataFlags&MetadataTime != 0 {
enc.EncodeKeyval(FieldTime, e.Time) err := enc.EncodeKeyval(FieldTime, e.Time)
if err != nil {
return nil, errors.Wrap(err, "logfmt: encode time")
}
} }
if f.metadataFlags&MetadataLevel != 0 { if f.metadataFlags&MetadataLevel != 0 {
enc.EncodeKeyval(FieldLevel, e.Level) err := enc.EncodeKeyval(FieldLevel, e.Level)
if err != nil {
return nil, errors.Wrap(err, "logfmt: encode level")
}
} }
// at least try and put job and task in front // at least try and put job and task in front
@ -179,8 +187,10 @@ func (f *LogfmtFormatter) Format(e *logger.Entry) ([]byte, error) {
prefixed[pf] = true prefixed[pf] = true
} }
enc.EncodeKeyval(FieldMessage, e.Message) err := enc.EncodeKeyval(FieldMessage, e.Message)
if err != nil {
return nil, errors.Wrap(err, "logfmt: encode message")
}
for k, v := range e.Fields { for k, v := range e.Fields {
if !prefixed[k] { if !prefixed[k] {
if err := logfmtTryEncodeKeyval(enc, k, v); err != nil { if err := logfmtTryEncodeKeyval(enc, k, v); err != nil {
@ -199,7 +209,10 @@ func logfmtTryEncodeKeyval(enc *logfmt.Encoder, field, value interface{}) error
case nil: // ok case nil: // ok
return nil return nil
case logfmt.ErrUnsupportedValueType: case logfmt.ErrUnsupportedValueType:
enc.EncodeKeyval(field, fmt.Sprintf("<%T>", value)) err := enc.EncodeKeyval(field, fmt.Sprintf("<%T>", value))
if err != nil {
return errors.Wrap(err, "cannot encode unsuuported value type Go type")
}
return nil return nil
} }
return errors.Wrapf(err, "cannot encode field '%s'", field) return errors.Wrapf(err, "cannot encode field '%s'", field)

View File

@ -4,12 +4,14 @@ import (
"bytes" "bytes"
"context" "context"
"crypto/tls" "crypto/tls"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/logger"
"io" "io"
"log/syslog" "log/syslog"
"net" "net"
"time" "time"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/logger"
) )
type EntryFormatter interface { type EntryFormatter interface {
@ -28,7 +30,10 @@ func (h WriterOutlet) WriteEntry(entry logger.Entry) error {
return err return err
} }
_, err = h.writer.Write(bytes) _, err = h.writer.Write(bytes)
h.writer.Write([]byte("\n")) if err != nil {
return err
}
_, err = h.writer.Write([]byte("\n"))
return err return err
} }
@ -92,8 +97,10 @@ func (h *TCPOutlet) outLoop(retryInterval time.Duration) {
conn = nil conn = nil
} }
} }
conn.SetWriteDeadline(time.Now().Add(retryInterval)) err = conn.SetWriteDeadline(time.Now().Add(retryInterval))
_, err = io.Copy(conn, msg) if err == nil {
_, err = io.Copy(conn, msg)
}
if err != nil { if err != nil {
retry = time.Now().Add(retryInterval) retry = time.Now().Add(retryInterval)
conn.Close() conn.Close()

View File

@ -7,7 +7,7 @@ import (
type Logger = logger.Logger type Logger = logger.Logger
var DaemonCmd = &cli.Subcommand { var DaemonCmd = &cli.Subcommand{
Use: "daemon", Use: "daemon",
Short: "run the zrepl daemon", Short: "run the zrepl daemon",
Run: func(subcommand *cli.Subcommand, args []string) error { Run: func(subcommand *cli.Subcommand, args []string) error {

View File

@ -1,10 +1,11 @@
package nethelpers package nethelpers
import ( import (
"github.com/pkg/errors"
"net" "net"
"os" "os"
"path/filepath" "path/filepath"
"github.com/pkg/errors"
) )
func PreparePrivateSockpath(sockpath string) error { func PreparePrivateSockpath(sockpath string) error {

View File

@ -7,11 +7,12 @@ import (
"context" "context"
"net" "net"
"net/http/pprof" "net/http/pprof"
"github.com/zrepl/zrepl/daemon/job"
) )
type pprofServer struct { type pprofServer struct {
cc chan PprofServerControlMsg cc chan PprofServerControlMsg
state PprofServerControlMsg
listener net.Listener listener net.Listener
} }
@ -63,7 +64,14 @@ outer:
mux.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile)) mux.Handle("/debug/pprof/profile", http.HandlerFunc(pprof.Profile))
mux.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol)) mux.Handle("/debug/pprof/symbol", http.HandlerFunc(pprof.Symbol))
mux.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace)) mux.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace))
go http.Serve(s.listener, mux) go func() {
err := http.Serve(s.listener, mux)
if ctx.Err() != nil {
return
} else if err != nil {
job.GetLogger(ctx).WithError(err).Error("pprof server serve error")
}
}()
continue continue
} }

View File

@ -2,15 +2,17 @@ package daemon
import ( import (
"context" "context"
"net"
"net/http"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/zrepl/zrepl/config" "github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon/job" "github.com/zrepl/zrepl/daemon/job"
"github.com/zrepl/zrepl/logger" "github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/rpc/dataconn/frameconn" "github.com/zrepl/zrepl/rpc/dataconn/frameconn"
"github.com/zrepl/zrepl/zfs" "github.com/zrepl/zrepl/zfs"
"net"
"net/http"
) )
type prometheusJob struct { type prometheusJob struct {
@ -25,7 +27,7 @@ func newPrometheusJobFromConfig(in *config.PrometheusMonitoring) (*prometheusJob
} }
var prom struct { var prom struct {
taskLogEntries *prometheus.CounterVec taskLogEntries *prometheus.CounterVec
} }
func init() { func init() {
@ -63,17 +65,15 @@ func (j *prometheusJob) Run(ctx context.Context) {
log.WithError(err).Error("cannot listen") log.WithError(err).Error("cannot listen")
} }
go func() { go func() {
select { <-ctx.Done()
case <-ctx.Done(): l.Close()
l.Close()
}
}() }()
mux := http.NewServeMux() mux := http.NewServeMux()
mux.Handle("/metrics", promhttp.Handler()) mux.Handle("/metrics", promhttp.Handler())
err = http.Serve(l, mux) err = http.Serve(l, mux)
if err != nil { if err != nil && ctx.Err() == nil {
log.WithError(err).Error("error while serving") log.WithError(err).Error("error while serving")
} }
@ -93,4 +93,3 @@ func (o prometheusJobOutlet) WriteEntry(entry logger.Entry) error {
prom.taskLogEntries.WithLabelValues(o.jobName, entry.Level.String()).Inc() prom.taskLogEntries.WithLabelValues(o.jobName, entry.Level.String()).Inc()
return nil return nil
} }

View File

@ -3,17 +3,19 @@ package pruner
import ( import (
"context" "context"
"fmt" "fmt"
"sort"
"strings"
"sync"
"time"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/zrepl/zrepl/config" "github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/logger" "github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/pruning" "github.com/zrepl/zrepl/pruning"
"github.com/zrepl/zrepl/replication/logic/pdu" "github.com/zrepl/zrepl/replication/logic/pdu"
"github.com/zrepl/zrepl/util/envconst" "github.com/zrepl/zrepl/util/envconst"
"sort"
"strings"
"sync"
"time"
) )
// Try to keep it compatible with gitub.com/zrepl/zrepl/endpoint.Endpoint // Try to keep it compatible with gitub.com/zrepl/zrepl/endpoint.Endpoint
@ -53,7 +55,7 @@ type args struct {
rules []pruning.KeepRule rules []pruning.KeepRule
retryWait time.Duration retryWait time.Duration
considerSnapAtCursorReplicated bool considerSnapAtCursorReplicated bool
promPruneSecs prometheus.Observer promPruneSecs prometheus.Observer
} }
type Pruner struct { type Pruner struct {
@ -64,7 +66,7 @@ type Pruner struct {
state State state State
// State PlanErr // State PlanErr
err error err error
// State Exec // State Exec
execQueue *execQueue execQueue *execQueue
@ -75,7 +77,7 @@ type PrunerFactory struct {
receiverRules []pruning.KeepRule receiverRules []pruning.KeepRule
retryWait time.Duration retryWait time.Duration
considerSnapAtCursorReplicated bool considerSnapAtCursorReplicated bool
promPruneSecs *prometheus.HistogramVec promPruneSecs *prometheus.HistogramVec
} }
type LocalPrunerFactory struct { type LocalPrunerFactory struct {
@ -84,19 +86,6 @@ type LocalPrunerFactory struct {
promPruneSecs *prometheus.HistogramVec promPruneSecs *prometheus.HistogramVec
} }
func checkContainsKeep1(rules []pruning.KeepRule) error {
if len(rules) == 0 {
return nil //No keep rules means keep all - ok
}
for _, e := range rules {
switch e.(type) {
case *pruning.KeepLastN:
return nil
}
}
return errors.New("sender keep rules must contain last_n or be empty so that the last snapshot is definitely kept")
}
func NewLocalPrunerFactory(in config.PruningLocal, promPruneSecs *prometheus.HistogramVec) (*LocalPrunerFactory, error) { func NewLocalPrunerFactory(in config.PruningLocal, promPruneSecs *prometheus.HistogramVec) (*LocalPrunerFactory, error) {
rules, err := pruning.RulesFromConfig(in.Keep) rules, err := pruning.RulesFromConfig(in.Keep)
if err != nil { if err != nil {
@ -137,11 +126,11 @@ func NewPrunerFactory(in config.PruningSenderReceiver, promPruneSecs *prometheus
considerSnapAtCursorReplicated = considerSnapAtCursorReplicated || !knr.KeepSnapshotAtCursor considerSnapAtCursorReplicated = considerSnapAtCursorReplicated || !knr.KeepSnapshotAtCursor
} }
f := &PrunerFactory{ f := &PrunerFactory{
senderRules: keepRulesSender, senderRules: keepRulesSender,
receiverRules: keepRulesReceiver, receiverRules: keepRulesReceiver,
retryWait: envconst.Duration("ZREPL_PRUNER_RETRY_INTERVAL", 10 * time.Second), retryWait: envconst.Duration("ZREPL_PRUNER_RETRY_INTERVAL", 10*time.Second),
considerSnapAtCursorReplicated: considerSnapAtCursorReplicated, considerSnapAtCursorReplicated: considerSnapAtCursorReplicated,
promPruneSecs: promPruneSecs, promPruneSecs: promPruneSecs,
} }
return f, nil return f, nil
} }
@ -213,17 +202,17 @@ func (p *Pruner) Prune() {
func (p *Pruner) prune(args args) { func (p *Pruner) prune(args args) {
u := func(f func(*Pruner)) { u := func(f func(*Pruner)) {
p.mtx.Lock() p.mtx.Lock()
defer p.mtx.Unlock() defer p.mtx.Unlock()
f(p) f(p)
} }
// TODO support automatic retries // TODO support automatic retries
// It is advisable to merge this code with package replication/driver before // It is advisable to merge this code with package replication/driver before
// That will likely require re-modelling struct fs like replication/driver.attempt, // That will likely require re-modelling struct fs like replication/driver.attempt,
// including figuring out how to resume a plan after being interrupted by network errors // including figuring out how to resume a plan after being interrupted by network errors
// The non-retrying code in this package should move straight to replication/logic. // The non-retrying code in this package should move straight to replication/logic.
doOneAttempt(&args, u) doOneAttempt(&args, u)
} }
type Report struct { type Report struct {
State string State string
@ -239,9 +228,9 @@ type FSReport struct {
} }
type SnapshotReport struct { type SnapshotReport struct {
Name string Name string
Replicated bool Replicated bool
Date time.Time Date time.Time
} }
func (p *Pruner) Report() *Report { func (p *Pruner) Report() *Report {
@ -250,9 +239,9 @@ func (p *Pruner) Report() *Report {
r := Report{State: p.state.String()} r := Report{State: p.state.String()}
if p.err != nil { if p.err != nil {
r.Error = p.err.Error() r.Error = p.err.Error()
} }
if p.execQueue != nil { if p.execQueue != nil {
r.Pending, r.Completed = p.execQueue.Report() r.Pending, r.Completed = p.execQueue.Report()
@ -268,7 +257,7 @@ func (p *Pruner) State() State {
} }
type fs struct { type fs struct {
path string path string
// permanent error during planning // permanent error during planning
planErr error planErr error
@ -316,7 +305,7 @@ func (f *fs) Report() FSReport {
if f.planErr != nil { if f.planErr != nil {
r.LastError = f.planErr.Error() r.LastError = f.planErr.Error()
} else if f.execErrLast != nil { } else if f.execErrLast != nil {
r.LastError = f.execErrLast.Error() r.LastError = f.execErrLast.Error()
} }
@ -326,7 +315,7 @@ func (f *fs) Report() FSReport {
} }
r.DestroyList = make([]SnapshotReport, len(f.destroyList)) r.DestroyList = make([]SnapshotReport, len(f.destroyList))
for i, snap := range f.destroyList{ for i, snap := range f.destroyList {
r.DestroyList[i] = snap.(snapshot).Report() r.DestroyList[i] = snap.(snapshot).Report()
} }
@ -490,9 +479,9 @@ tfss_loop:
}) })
for { for {
var pfs *fs var pfs *fs
u(func(pruner *Pruner) { u(func(pruner *Pruner) {
pfs = pruner.execQueue.Pop() pfs = pruner.execQueue.Pop()
}) })
if pfs == nil { if pfs == nil {
break break
@ -516,16 +505,15 @@ tfss_loop:
hadErr := false hadErr := false
for _, fsr := range rep.Completed { for _, fsr := range rep.Completed {
hadErr = hadErr || fsr.SkipReason.NotSkipped() && fsr.LastError != "" hadErr = hadErr || fsr.SkipReason.NotSkipped() && fsr.LastError != ""
} }
if hadErr { if hadErr {
p.state = ExecErr p.state = ExecErr
} else { } else {
p.state = Done p.state = Done
} }
}) })
} }
// attempts to exec pfs, puts it back into the queue with the result // attempts to exec pfs, puts it back into the queue with the result
func doOneAttemptExec(a *args, u updater, pfs *fs) { func doOneAttemptExec(a *args, u updater, pfs *fs) {
@ -558,20 +546,20 @@ func doOneAttemptExec(a *args, u updater, pfs *fs) {
err = nil err = nil
destroyFails := make([]*pdu.DestroySnapshotRes, 0) destroyFails := make([]*pdu.DestroySnapshotRes, 0)
for _, reqDestroy := range destroyList { for _, reqDestroy := range destroyList {
res, ok := destroyResults[reqDestroy.Name] res, ok := destroyResults[reqDestroy.Name]
if !ok { if !ok {
err = fmt.Errorf("missing destroy-result for %s", reqDestroy.RelName()) err = fmt.Errorf("missing destroy-result for %s", reqDestroy.RelName())
break break
} else if res.Error != "" { } else if res.Error != "" {
destroyFails = append(destroyFails, res) destroyFails = append(destroyFails, res)
} }
} }
if err == nil && len(destroyFails) > 0 { if err == nil && len(destroyFails) > 0 {
names := make([]string, len(destroyFails)) names := make([]string, len(destroyFails))
pairs := make([]string, len(destroyFails)) pairs := make([]string, len(destroyFails))
allSame := true allSame := true
lastMsg := destroyFails[0].Error lastMsg := destroyFails[0].Error
for i := 0; i < len(destroyFails); i++{ for i := 0; i < len(destroyFails); i++ {
allSame = allSame && destroyFails[i].Error == lastMsg allSame = allSame && destroyFails[i].Error == lastMsg
relname := destroyFails[i].Snapshot.RelName() relname := destroyFails[i].Snapshot.RelName()
names[i] = relname names[i] = relname

View File

@ -7,13 +7,13 @@ import (
) )
type execQueue struct { type execQueue struct {
mtx sync.Mutex mtx sync.Mutex
pending, completed []*fs pending, completed []*fs
} }
func newExecQueue(cap int) *execQueue { func newExecQueue(cap int) *execQueue {
q := execQueue{ q := execQueue{
pending: make([]*fs, 0, cap), pending: make([]*fs, 0, cap),
completed: make([]*fs, 0, cap), completed: make([]*fs, 0, cap),
} }
return &q return &q
@ -55,7 +55,7 @@ func (q *execQueue) Pop() *fs {
return fs return fs
} }
func(q *execQueue) Put(fs *fs, err error, done bool) { func (q *execQueue) Put(fs *fs, err error, done bool) {
fs.mtx.Lock() fs.mtx.Lock()
fs.execErrLast = err fs.execErrLast = err
if done || err != nil { if done || err != nil {
@ -79,5 +79,4 @@ func(q *execQueue) Put(fs *fs, err error, done bool) {
}) })
q.mtx.Unlock() q.mtx.Unlock()
}
}

View File

@ -1,18 +1,19 @@
package snapper package snapper
import ( import (
"github.com/zrepl/zrepl/config"
"github.com/pkg/errors"
"time"
"context" "context"
"github.com/zrepl/zrepl/daemon/filters"
"fmt" "fmt"
"github.com/zrepl/zrepl/zfs"
"sort" "sort"
"github.com/zrepl/zrepl/logger"
"sync" "sync"
) "time"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon/filters"
"github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/zfs"
)
//go:generate stringer -type=SnapState //go:generate stringer -type=SnapState
type SnapState uint type SnapState uint
@ -28,7 +29,7 @@ type snapProgress struct {
state SnapState state SnapState
// SnapStarted, SnapDone, SnapError // SnapStarted, SnapDone, SnapError
name string name string
startAt time.Time startAt time.Time
// SnapDone // SnapDone
@ -44,13 +45,13 @@ type args struct {
prefix string prefix string
interval time.Duration interval time.Duration
fsf *filters.DatasetMapFilter fsf *filters.DatasetMapFilter
snapshotsTaken chan<-struct{} snapshotsTaken chan<- struct{}
} }
type Snapper struct { type Snapper struct {
args args args args
mtx sync.Mutex mtx sync.Mutex
state State state State
// set in state Plan, used in Waiting // set in state Plan, used in Waiting
@ -70,7 +71,7 @@ type Snapper struct {
type State uint type State uint
const ( const (
SyncUp State = 1<<iota SyncUp State = 1 << iota
SyncUpErrWait SyncUpErrWait
Planning Planning
Snapshotting Snapshotting
@ -81,13 +82,13 @@ const (
func (s State) sf() state { func (s State) sf() state {
m := map[State]state{ m := map[State]state{
SyncUp: syncUp, SyncUp: syncUp,
SyncUpErrWait: wait, SyncUpErrWait: wait,
Planning: plan, Planning: plan,
Snapshotting: snapshot, Snapshotting: snapshot,
Waiting: wait, Waiting: wait,
ErrorWait: wait, ErrorWait: wait,
Stopped: nil, Stopped: nil,
} }
return m[s] return m[s]
} }
@ -123,9 +124,9 @@ func PeriodicFromConfig(g *config.Global, fsf *filters.DatasetMapFilter, in *con
} }
args := args{ args := args{
prefix: in.Prefix, prefix: in.Prefix,
interval: in.Interval, interval: in.Interval,
fsf: fsf, fsf: fsf,
// ctx and log is set in Run() // ctx and log is set in Run()
} }
@ -199,10 +200,10 @@ func syncUp(a args, u updater) state {
if err != nil { if err != nil {
return onErr(err, u) return onErr(err, u)
} }
u(func(s *Snapper){ u(func(s *Snapper) {
s.sleepUntil = syncPoint s.sleepUntil = syncPoint
}) })
t := time.NewTimer(syncPoint.Sub(time.Now())) t := time.NewTimer(time.Until(syncPoint))
defer t.Stop() defer t.Stop()
select { select {
case <-t.C: case <-t.C:
@ -306,7 +307,7 @@ func wait(a args, u updater) state {
logFunc("enter wait-state after error") logFunc("enter wait-state after error")
}) })
t := time.NewTimer(sleepUntil.Sub(time.Now())) t := time.NewTimer(time.Until(sleepUntil))
defer t.Stop() defer t.Stop()
select { select {
@ -386,4 +387,3 @@ func findSyncPoint(log Logger, fss []*zfs.DatasetPath, prefix string, interval t
return snaptimes[0].time, nil return snaptimes[0].time, nil
} }

View File

@ -3,6 +3,7 @@ package snapper
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/zrepl/zrepl/config" "github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/daemon/filters" "github.com/zrepl/zrepl/daemon/filters"
) )
@ -17,7 +18,7 @@ type PeriodicOrManual struct {
s *Snapper s *Snapper
} }
func (s *PeriodicOrManual) Run(ctx context.Context, wakeUpCommon chan <- struct{}) { func (s *PeriodicOrManual) Run(ctx context.Context, wakeUpCommon chan<- struct{}) {
if s.s != nil { if s.s != nil {
s.s.Run(ctx, wakeUpCommon) s.s.Run(ctx, wakeUpCommon)
} }

View File

@ -2,6 +2,7 @@ package endpoint
import ( import (
"context" "context"
"github.com/zrepl/zrepl/logger" "github.com/zrepl/zrepl/logger"
) )

View File

@ -7,6 +7,7 @@ import (
"path" "path"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/zrepl/zrepl/replication/logic/pdu" "github.com/zrepl/zrepl/replication/logic/pdu"
"github.com/zrepl/zrepl/util/chainlock" "github.com/zrepl/zrepl/util/chainlock"
"github.com/zrepl/zrepl/zfs" "github.com/zrepl/zrepl/zfs"

View File

@ -39,7 +39,9 @@ godep() {
go build -o "$GOPATH/bin/stringer" ./vendor/golang.org/x/tools/cmd/stringer go build -o "$GOPATH/bin/stringer" ./vendor/golang.org/x/tools/cmd/stringer
go build -o "$GOPATH/bin/protoc-gen-go" ./vendor/github.com/golang/protobuf/protoc-gen-go go build -o "$GOPATH/bin/protoc-gen-go" ./vendor/github.com/golang/protobuf/protoc-gen-go
go build -o "$GOPATH/bin/enumer" ./vendor/github.com/alvaroloes/enumer go build -o "$GOPATH/bin/enumer" ./vendor/github.com/alvaroloes/enumer
if ! type stringer || ! type protoc-gen-go || ! type enumer ; then go build -o "$GOPATH/bin/goimports" ./vendor/golang.org/x/tools/cmd/goimports
go build -o "$GOPATH/bin/golangci-lint" ./vendor/github.com/golangci/golangci-lint/cmd/golangci-lint
if ! type stringer || ! type protoc-gen-go || ! type enumer || ! type goimports || ! type golangci-lint; then
echo "Installed dependencies but can't find them in \$PATH, adjust it to contain \$GOPATH/bin" 1>&2 echo "Installed dependencies but can't find them in \$PATH, adjust it to contain \$GOPATH/bin" 1>&2
exit 1 exit 1
fi fi

View File

@ -4,10 +4,11 @@ import (
"bytes" "bytes"
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/fatih/color"
"github.com/pkg/errors"
"sync" "sync"
"time" "time"
"github.com/fatih/color"
"github.com/pkg/errors"
) )
type Level int type Level int
@ -66,7 +67,7 @@ func (l Level) Short() string {
case Error: case Error:
return "ERRO" return "ERRO"
default: default:
return fmt.Sprintf("%s", l) return l.String()
} }
} }
@ -81,7 +82,7 @@ func (l Level) String() string {
case Error: case Error:
return "error" return "error"
default: default:
return fmt.Sprintf("%s", string(l)) return string(l)
} }
} }

View File

@ -66,7 +66,8 @@ func (l *loggerImpl) logInternalError(outlet Outlet, err string) {
time.Now(), time.Now(),
fields, fields,
} }
l.outlets.GetLoggerErrorOutlet().WriteEntry(entry) // ignore errors at this point (still better than panicking if the error is temporary)
_ = l.outlets.GetLoggerErrorOutlet().WriteEntry(entry)
} }
func (l *loggerImpl) log(level Level, msg string) { func (l *loggerImpl) log(level Level, msg string) {

View File

@ -2,10 +2,12 @@ package logger_test
import ( import (
"fmt" "fmt"
"github.com/kr/pretty"
"github.com/zrepl/zrepl/logger"
"testing" "testing"
"time" "time"
"github.com/kr/pretty"
"github.com/zrepl/zrepl/logger"
) )
type TestOutlet struct { type TestOutlet struct {

View File

@ -5,11 +5,7 @@ import (
"os" "os"
) )
type stderrLogger struct { type stderrLoggerOutlet struct{}
Logger
}
type stderrLoggerOutlet struct {}
func (stderrLoggerOutlet) WriteEntry(entry Entry) error { func (stderrLoggerOutlet) WriteEntry(entry Entry) error {
fmt.Fprintf(os.Stderr, "%#v\n", entry) fmt.Fprintf(os.Stderr, "%#v\n", entry)

View File

@ -2,12 +2,14 @@ package pruning
import ( import (
"fmt" "fmt"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/pruning/retentiongrid"
"regexp" "regexp"
"sort" "sort"
"time" "time"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/pruning/retentiongrid"
) )
// KeepGrid fits snapshots that match a given regex into a retentiongrid.Grid, // KeepGrid fits snapshots that match a given regex into a retentiongrid.Grid,
@ -15,7 +17,7 @@ import (
// and deletes all snapshots that do not fit the grid specification. // and deletes all snapshots that do not fit the grid specification.
type KeepGrid struct { type KeepGrid struct {
retentionGrid *retentiongrid.Grid retentionGrid *retentiongrid.Grid
re *regexp.Regexp re *regexp.Regexp
} }
func NewKeepGrid(in *config.PruneGrid) (p *KeepGrid, err error) { func NewKeepGrid(in *config.PruneGrid) (p *KeepGrid, err error) {

View File

@ -1,8 +1,9 @@
package pruning package pruning
import ( import (
"github.com/stretchr/testify/assert"
"testing" "testing"
"github.com/stretchr/testify/assert"
) )
func TestShallowCopySnapList(t *testing.T) { func TestShallowCopySnapList(t *testing.T) {

View File

@ -1,8 +1,9 @@
package pruning package pruning
import ( import (
"github.com/pkg/errors"
"sort" "sort"
"github.com/pkg/errors"
) )
type KeepLastN struct { type KeepLastN struct {

View File

@ -1,9 +1,10 @@
package pruning package pruning
import ( import (
"github.com/stretchr/testify/assert"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert"
) )
func TestKeepLastN(t *testing.T) { func TestKeepLastN(t *testing.T) {

View File

@ -1,8 +1,6 @@
package pruning package pruning
type KeepNotReplicated struct { type KeepNotReplicated struct{}
forceConstructor struct{}
}
func (*KeepNotReplicated) KeepRule(snaps []Snapshot) (destroyList []Snapshot) { func (*KeepNotReplicated) KeepRule(snaps []Snapshot) (destroyList []Snapshot) {
return filterSnapList(snaps, func(snapshot Snapshot) bool { return filterSnapList(snaps, func(snapshot Snapshot) bool {

View File

@ -5,7 +5,7 @@ import (
) )
type KeepRegex struct { type KeepRegex struct {
expr *regexp.Regexp expr *regexp.Regexp
negate bool negate bool
} }

View File

@ -2,9 +2,11 @@ package pruning
import ( import (
"fmt" "fmt"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/config"
"time" "time"
"github.com/pkg/errors"
"github.com/zrepl/zrepl/config"
) )
type KeepRule interface { type KeepRule interface {
@ -20,7 +22,7 @@ type Snapshot interface {
// The returned snapshot list is guaranteed to only contains elements of input parameter snaps // The returned snapshot list is guaranteed to only contains elements of input parameter snaps
func PruneSnapshots(snaps []Snapshot, keepRules []KeepRule) []Snapshot { func PruneSnapshots(snaps []Snapshot, keepRules []KeepRule) []Snapshot {
if keepRules == nil || len(keepRules) == 0 { if len(keepRules) == 0 {
return []Snapshot{} return []Snapshot{}
} }

View File

@ -2,11 +2,12 @@ package retentiongrid
import ( import (
"fmt" "fmt"
"github.com/stretchr/testify/assert"
"strconv" "strconv"
"strings" "strings"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert"
) )
type retentionIntervalStub struct { type retentionIntervalStub struct {

View File

@ -6,9 +6,9 @@ import (
"fmt" "fmt"
) )
const _errorClassName = "errorClassUnknownerrorClassPermanenterrorClassTemporaryConnectivityRelated" const _errorClassName = "errorClassPermanenterrorClassTemporaryConnectivityRelated"
var _errorClassIndex = [...]uint8{0, 17, 36, 74} var _errorClassIndex = [...]uint8{0, 19, 57}
func (i errorClass) String() string { func (i errorClass) String() string {
if i < 0 || i >= errorClass(len(_errorClassIndex)-1) { if i < 0 || i >= errorClass(len(_errorClassIndex)-1) {
@ -17,12 +17,11 @@ func (i errorClass) String() string {
return _errorClassName[_errorClassIndex[i]:_errorClassIndex[i+1]] return _errorClassName[_errorClassIndex[i]:_errorClassIndex[i+1]]
} }
var _errorClassValues = []errorClass{0, 1, 2} var _errorClassValues = []errorClass{0, 1}
var _errorClassNameToValueMap = map[string]errorClass{ var _errorClassNameToValueMap = map[string]errorClass{
_errorClassName[0:17]: 0, _errorClassName[0:19]: 0,
_errorClassName[17:36]: 1, _errorClassName[19:57]: 1,
_errorClassName[36:74]: 2,
} }
// errorClassString retrieves an enum value from the enum constants string name. // errorClassString retrieves an enum value from the enum constants string name.

View File

@ -10,11 +10,12 @@ import (
"sync" "sync"
"time" "time"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/zrepl/zrepl/replication/report" "github.com/zrepl/zrepl/replication/report"
"github.com/zrepl/zrepl/util/chainlock" "github.com/zrepl/zrepl/util/chainlock"
"github.com/zrepl/zrepl/util/envconst" "github.com/zrepl/zrepl/util/envconst"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
) )
type interval struct { type interval struct {
@ -338,7 +339,7 @@ func (a *attempt) do(ctx context.Context, prev *attempt) {
l := fmt.Sprintf(" %s => %v", i.cur.fs.ReportInfo().Name, prevNames) l := fmt.Sprintf(" %s => %v", i.cur.fs.ReportInfo().Name, prevNames)
inconsistencyLines = append(inconsistencyLines, l) inconsistencyLines = append(inconsistencyLines, l)
} }
fmt.Fprintf(&msg, strings.Join(inconsistencyLines, "\n")) fmt.Fprint(&msg, strings.Join(inconsistencyLines, "\n"))
now := time.Now() now := time.Now()
a.planErr = newTimedError(errors.New(msg.String()), now) a.planErr = newTimedError(errors.New(msg.String()), now)
a.fss = nil a.fss = nil
@ -551,17 +552,11 @@ func (s *step) report() *report.StepReport {
return r return r
} }
type stepErrorReport struct {
err *timedError
step int
}
//go:generate enumer -type=errorClass //go:generate enumer -type=errorClass
type errorClass int type errorClass int
const ( const (
errorClassUnknown errorClass = iota errorClassPermanent errorClass = iota
errorClassPermanent
errorClassTemporaryConnectivityRelated errorClassTemporaryConnectivityRelated
) )

View File

@ -13,6 +13,7 @@ func init() {
} }
} }
//nolint[:deadcode,unused]
func debug(format string, args ...interface{}) { func debug(format string, args ...interface{}) {
if debugEnabled { if debugEnabled {
fmt.Fprintf(os.Stderr, "repl: driver: %s\n", fmt.Sprintf(format, args...)) fmt.Fprintf(os.Stderr, "repl: driver: %s\n", fmt.Sprintf(format, args...))
@ -21,9 +22,10 @@ func debug(format string, args ...interface{}) {
type debugFunc func(format string, args ...interface{}) type debugFunc func(format string, args ...interface{})
//nolint[:deadcode,unused]
func debugPrefix(prefixFormat string, prefixFormatArgs ...interface{}) debugFunc { func debugPrefix(prefixFormat string, prefixFormatArgs ...interface{}) debugFunc {
prefix := fmt.Sprintf(prefixFormat, prefixFormatArgs...) prefix := fmt.Sprintf(prefixFormat, prefixFormatArgs...)
return func(format string, args ...interface{}) { return func(format string, args ...interface{}) {
debug("%s: %s", prefix, fmt.Sprintf(format, args)) debug("%s: %s", prefix, fmt.Sprintf(format, args))
} }
} }

View File

@ -10,6 +10,7 @@ import (
"time" "time"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/zrepl/zrepl/replication/report" "github.com/zrepl/zrepl/replication/report"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@ -165,14 +166,14 @@ func TestReplication(t *testing.T) {
reports := make([]*report.Report, len(fireAt)) reports := make([]*report.Report, len(fireAt))
for i := range fireAt { for i := range fireAt {
sleepUntil := begin.Add(fireAt[i]) sleepUntil := begin.Add(fireAt[i])
time.Sleep(sleepUntil.Sub(time.Now())) time.Sleep(time.Until(sleepUntil))
reports[i] = getReport() reports[i] = getReport()
// uncomment for viewing non-diffed results // uncomment for viewing non-diffed results
// t.Logf("report @ %6.4f:\n%s", fireAt[i].Seconds(), pretty.Sprint(reports[i])) // t.Logf("report @ %6.4f:\n%s", fireAt[i].Seconds(), pretty.Sprint(reports[i]))
} }
waitBegin := time.Now() waitBegin := time.Now()
wait(true) wait(true)
waitDuration := time.Now().Sub(waitBegin) waitDuration := time.Since(waitBegin)
assert.True(t, waitDuration < 10*time.Millisecond, "%v", waitDuration) // and that's gratious assert.True(t, waitDuration < 10*time.Millisecond, "%v", waitDuration) // and that's gratious
prev, err := json.Marshal(reports[0]) prev, err := json.Marshal(reports[0])

View File

@ -96,7 +96,7 @@ func TestPqConcurrent(t *testing.T) {
pos := atomic.AddUint32(&globalCtr, 1) pos := atomic.AddUint32(&globalCtr, 1)
t := time.Unix(int64(step), 0) t := time.Unix(int64(step), 0)
done := q.WaitReady(fs, t) done := q.WaitReady(fs, t)
wakeAt := time.Now().Sub(begin) wakeAt := time.Since(begin)
time.Sleep(sleepTimePerStep) time.Sleep(sleepTimePerStep)
done() done()
recs = append(recs, record{fs, step, pos, wakeAt}) recs = append(recs, record{fs, step, pos, wakeAt})

View File

@ -97,7 +97,7 @@ func TestIncrementalPath_SnapshotsOnly(t *testing.T) {
}) })
// sender with earlier but also current version as sender is not a conflict // sender with earlier but also current version as sender is not a conflict
doTest(l("@c,3"), l("@a,1", "@b,2", "@c,3") , func(path []*FilesystemVersion, conflict error) { doTest(l("@c,3"), l("@a,1", "@b,2", "@c,3"), func(path []*FilesystemVersion, conflict error) {
t.Logf("path: %#v", path) t.Logf("path: %#v", path)
t.Logf("conflict: %#v", conflict) t.Logf("conflict: %#v", conflict)
assert.Empty(t, path) assert.Empty(t, path)

View File

@ -2,8 +2,9 @@ package pdu
import ( import (
"fmt" "fmt"
"github.com/zrepl/zrepl/zfs"
"time" "time"
"github.com/zrepl/zrepl/zfs"
) )
func (v *FilesystemVersion) RelName() string { func (v *FilesystemVersion) RelName() string {

View File

@ -1,9 +1,10 @@
package pdu package pdu
import ( import (
"github.com/stretchr/testify/assert"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert"
) )
func TestFilesystemVersion_RelName(t *testing.T) { func TestFilesystemVersion_RelName(t *testing.T) {
@ -18,24 +19,24 @@ func TestFilesystemVersion_RelName(t *testing.T) {
tcs := []TestCase{ tcs := []TestCase{
{ {
In: FilesystemVersion{ In: FilesystemVersion{
Type: FilesystemVersion_Snapshot, Type: FilesystemVersion_Snapshot,
Name: "foobar", Name: "foobar",
Creation: creat, Creation: creat,
}, },
Out: "@foobar", Out: "@foobar",
}, },
{ {
In: FilesystemVersion{ In: FilesystemVersion{
Type: FilesystemVersion_Bookmark, Type: FilesystemVersion_Bookmark,
Name: "foobar", Name: "foobar",
Creation: creat, Creation: creat,
}, },
Out: "#foobar", Out: "#foobar",
}, },
{ {
In: FilesystemVersion{ In: FilesystemVersion{
Type: 2342, Type: 2342,
Name: "foobar", Name: "foobar",
Creation: creat, Creation: creat,
}, },
Panic: true, Panic: true,
@ -58,7 +59,7 @@ func TestFilesystemVersion_RelName(t *testing.T) {
func TestFilesystemVersion_ZFSFilesystemVersion(t *testing.T) { func TestFilesystemVersion_ZFSFilesystemVersion(t *testing.T) {
empty := &FilesystemVersion{} empty := &FilesystemVersion{}
_, err:= empty.ZFSFilesystemVersion() _, err := empty.ZFSFilesystemVersion()
assert.Error(t, err) assert.Error(t, err)
dateInvalid := &FilesystemVersion{Creation: "foobar"} dateInvalid := &FilesystemVersion{Creation: "foobar"}

View File

@ -26,7 +26,7 @@ type Endpoint interface {
ListFilesystems(ctx context.Context, req *pdu.ListFilesystemReq) (*pdu.ListFilesystemRes, error) ListFilesystems(ctx context.Context, req *pdu.ListFilesystemReq) (*pdu.ListFilesystemRes, error)
ListFilesystemVersions(ctx context.Context, req *pdu.ListFilesystemVersionsReq) (*pdu.ListFilesystemVersionsRes, error) ListFilesystemVersions(ctx context.Context, req *pdu.ListFilesystemVersionsReq) (*pdu.ListFilesystemVersionsRes, error)
DestroySnapshots(ctx context.Context, req *pdu.DestroySnapshotsReq) (*pdu.DestroySnapshotsRes, error) DestroySnapshots(ctx context.Context, req *pdu.DestroySnapshotsReq) (*pdu.DestroySnapshotsRes, error)
WaitForConnectivity(ctx context.Context) (error) WaitForConnectivity(ctx context.Context) error
} }
type Sender interface { type Sender interface {
@ -107,7 +107,7 @@ type Filesystem struct {
sender Sender sender Sender
receiver Receiver receiver Receiver
Path string // compat Path string // compat
receiverFS *pdu.Filesystem receiverFS *pdu.Filesystem
promBytesReplicated prometheus.Counter // compat promBytesReplicated prometheus.Counter // compat
} }

View File

@ -7,6 +7,7 @@ import (
"strings" "strings"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/zrepl/zrepl/replication/logic/pdu" "github.com/zrepl/zrepl/replication/logic/pdu"
"github.com/zrepl/zrepl/rpc/dataconn/stream" "github.com/zrepl/zrepl/rpc/dataconn/stream"
"github.com/zrepl/zrepl/transport" "github.com/zrepl/zrepl/transport"
@ -214,13 +215,12 @@ func (c *Client) ReqRecv(ctx context.Context, req *pdu.ReceiveReq, streamCopier
return res.res, cause return res.res, cause
} }
func (c *Client) ReqPing(ctx context.Context, req *pdu.PingReq) (*pdu.PingRes, error) { func (c *Client) ReqPing(ctx context.Context, req *pdu.PingReq) (*pdu.PingRes, error) {
conn, err := c.getWire(ctx) conn, err := c.getWire(ctx)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer c.putWire(conn) defer c.putWire(conn)
if err := c.send(ctx, conn, EndpointPing, req, nil); err != nil { if err := c.send(ctx, conn, EndpointPing, req, nil); err != nil {
return nil, err return nil, err
@ -232,4 +232,4 @@ func (c *Client) ReqPing(ctx context.Context, req *pdu.PingReq) (*pdu.PingRes, e
} }
return &res, nil return &res, nil
} }

View File

@ -13,6 +13,7 @@ func init() {
} }
} }
//nolint[:deadcode,unused]
func debug(format string, args ...interface{}) { func debug(format string, args ...interface{}) {
if debugEnabled { if debugEnabled {
fmt.Fprintf(os.Stderr, "rpc/dataconn: %s\n", fmt.Sprintf(format, args...)) fmt.Fprintf(os.Stderr, "rpc/dataconn: %s\n", fmt.Sprintf(format, args...))

View File

@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"github.com/golang/protobuf/proto" "github.com/golang/protobuf/proto"
"github.com/zrepl/zrepl/logger" "github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/replication/logic/pdu" "github.com/zrepl/zrepl/replication/logic/pdu"
"github.com/zrepl/zrepl/rpc/dataconn/stream" "github.com/zrepl/zrepl/rpc/dataconn/stream"
@ -137,7 +138,6 @@ func (s *Server) serveConn(nc *transport.AuthConn) {
default: default:
s.log.WithField("endpoint", endpoint).Error("unknown endpoint") s.log.WithField("endpoint", endpoint).Error("unknown endpoint")
handlerErr = fmt.Errorf("requested endpoint does not exist") handlerErr = fmt.Errorf("requested endpoint does not exist")
return
} }
s.log.WithField("endpoint", endpoint).WithField("errType", fmt.Sprintf("%T", handlerErr)).Debug("handler returned") s.log.WithField("endpoint", endpoint).WithField("errType", fmt.Sprintf("%T", handlerErr)).Debug("handler returned")
@ -187,6 +187,4 @@ func (s *Server) serveConn(nc *transport.AuthConn) {
s.log.WithError(err).Error("cannot write send stream") s.log.WithError(err).Error("cannot write send stream")
} }
} }
return
} }

View File

@ -1,7 +1,6 @@
package frameconn package frameconn
import ( import (
"bufio"
"encoding/binary" "encoding/binary"
"errors" "errors"
"fmt" "fmt"
@ -12,6 +11,7 @@ import (
"time" "time"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/zrepl/zrepl/rpc/dataconn/base2bufpool" "github.com/zrepl/zrepl/rpc/dataconn/base2bufpool"
"github.com/zrepl/zrepl/rpc/dataconn/timeoutconn" "github.com/zrepl/zrepl/rpc/dataconn/timeoutconn"
) )
@ -47,7 +47,6 @@ func (f *FrameHeader) Unmarshal(buf []byte) {
type Conn struct { type Conn struct {
readMtx, writeMtx sync.Mutex readMtx, writeMtx sync.Mutex
nc timeoutconn.Conn nc timeoutconn.Conn
ncBuf *bufio.ReadWriter
readNextValid bool readNextValid bool
readNext FrameHeader readNext FrameHeader
nextReadErr error nextReadErr error

View File

@ -3,24 +3,18 @@ package frameconn
import "sync" import "sync"
type shutdownFSM struct { type shutdownFSM struct {
mtx sync.Mutex mtx sync.Mutex
state shutdownFSMState state shutdownFSMState
} }
type shutdownFSMState uint32 type shutdownFSMState uint32
const ( const (
// zero value is important
shutdownStateOpen shutdownFSMState = iota shutdownStateOpen shutdownFSMState = iota
shutdownStateBegin shutdownStateBegin
) )
func newShutdownFSM() *shutdownFSM {
fsm := &shutdownFSM{
state: shutdownStateOpen,
}
return fsm
}
func (f *shutdownFSM) Begin() (thisCallStartedShutdown bool) { func (f *shutdownFSM) Begin() (thisCallStartedShutdown bool) {
f.mtx.Lock() f.mtx.Lock()
defer f.mtx.Unlock() defer f.mtx.Unlock()
@ -34,4 +28,3 @@ func (f *shutdownFSM) IsShuttingDown() bool {
defer f.mtx.Unlock() defer f.mtx.Unlock()
return f.state != shutdownStateOpen return f.state != shutdownStateOpen
} }

View File

@ -19,4 +19,3 @@ func TestIsPublicFrameType(t *testing.T) {
assert.True(t, IsPublicFrameType(255)) assert.True(t, IsPublicFrameType(255))
assert.False(t, IsPublicFrameType(rstFrameType)) assert.False(t, IsPublicFrameType(rstFrameType))
} }

View File

@ -11,9 +11,7 @@ import (
) )
type Conn struct { type Conn struct {
state state state state
// if not nil, opErr is returned for ReadFrame and WriteFrame (not for Close, though)
opErr atomic.Value // error
fc *frameconn.Conn fc *frameconn.Conn
sendInterval, timeout time.Duration sendInterval, timeout time.Duration
stopSend chan struct{} stopSend chan struct{}
@ -97,7 +95,10 @@ func (c *Conn) sendHeartbeats() {
debug("send heartbeat") debug("send heartbeat")
// if the connection is in zombie mode (aka iptables DROP inbetween peers) // if the connection is in zombie mode (aka iptables DROP inbetween peers)
// this call or one of its successors will block after filling up the kernel tx buffer // this call or one of its successors will block after filling up the kernel tx buffer
c.fc.WriteFrame([]byte{}, heartbeat) err := c.fc.WriteFrame([]byte{}, heartbeat)
if err != nil {
debug("send heartbeat error: %s", err)
}
// ignore errors from WriteFrame to rate-limit SendHeartbeat retries // ignore errors from WriteFrame to rate-limit SendHeartbeat retries
c.lastFrameSent.Store(time.Now()) c.lastFrameSent.Store(time.Now())
}() }()

View File

@ -13,6 +13,7 @@ func init() {
} }
} }
//nolint[:deadcode,unused]
func debug(format string, args ...interface{}) { func debug(format string, args ...interface{}) {
if debugEnabled { if debugEnabled {
fmt.Fprintf(os.Stderr, "rpc/dataconn/heartbeatconn: %s\n", fmt.Sprintf(format, args...)) fmt.Fprintf(os.Stderr, "rpc/dataconn/heartbeatconn: %s\n", fmt.Sprintf(format, args...))

View File

@ -5,6 +5,7 @@ import (
"time" "time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/zrepl/zrepl/rpc/dataconn/frameconn" "github.com/zrepl/zrepl/rpc/dataconn/frameconn"
) )

View File

@ -28,6 +28,7 @@ func WithLogger(ctx context.Context, log Logger) context.Context {
return context.WithValue(ctx, contextKeyLogger, log) return context.WithValue(ctx, contextKeyLogger, log)
} }
//nolint[:deadcode,unused]
func getLog(ctx context.Context) Logger { func getLog(ctx context.Context) Logger {
log, ok := ctx.Value(contextKeyLogger).(Logger) log, ok := ctx.Value(contextKeyLogger).(Logger)
if !ok { if !ok {

View File

@ -23,9 +23,8 @@ type Conn struct {
// readMtx serializes read stream operations because we inherently only // readMtx serializes read stream operations because we inherently only
// support a single stream at a time over hc. // support a single stream at a time over hc.
readMtx sync.Mutex readMtx sync.Mutex
readClean bool readClean bool
allowWriteStreamTo bool
// writeMtx serializes write stream operations because we inherently only // writeMtx serializes write stream operations because we inherently only
// support a single stream at a time over hc. // support a single stream at a time over hc.
@ -95,7 +94,7 @@ func (c *Conn) ReadStreamedMessage(ctx context.Context, maxSize uint32, frameTyp
}() }()
err := readStream(c.frameReads, c.hc, w, frameType) err := readStream(c.frameReads, c.hc, w, frameType)
c.readClean = isConnCleanAfterRead(err) c.readClean = isConnCleanAfterRead(err)
w.CloseWithError(readMessageSentinel) _ = w.CloseWithError(readMessageSentinel) // always returns nil
wg.Wait() wg.Wait()
if err != nil { if err != nil {
return nil, err return nil, err
@ -166,7 +165,7 @@ func (c *Conn) SendStream(ctx context.Context, src zfs.StreamCopier, frameType u
var res writeStreamRes var res writeStreamRes
res.errStream, res.errConn = writeStream(ctx, c.hc, r, frameType) res.errStream, res.errConn = writeStream(ctx, c.hc, r, frameType)
if w != nil { if w != nil {
w.CloseWithError(res.errStream) _ = w.CloseWithError(res.errStream) // always returns nil
} }
writeStreamErrChan <- res writeStreamErrChan <- res
}() }()

View File

@ -13,6 +13,7 @@ func init() {
} }
} }
//nolint[:deadcode,unused]
func debug(format string, args ...interface{}) { func debug(format string, args ...interface{}) {
if debugEnabled { if debugEnabled {
fmt.Fprintf(os.Stderr, "rpc/dataconn/stream: %s\n", fmt.Sprintf(format, args...)) fmt.Fprintf(os.Stderr, "rpc/dataconn/stream: %s\n", fmt.Sprintf(format, args...))

View File

@ -11,6 +11,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/zrepl/zrepl/logger" "github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/rpc/dataconn/heartbeatconn" "github.com/zrepl/zrepl/rpc/dataconn/heartbeatconn"
"github.com/zrepl/zrepl/util/socketpair" "github.com/zrepl/zrepl/util/socketpair"

View File

@ -11,6 +11,7 @@ import (
netssh "github.com/problame/go-netssh" netssh "github.com/problame/go-netssh"
"github.com/zrepl/yaml-config" "github.com/zrepl/yaml-config"
"github.com/zrepl/zrepl/config" "github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/transport" "github.com/zrepl/zrepl/transport"
transportconfig "github.com/zrepl/zrepl/transport/fromconfig" transportconfig "github.com/zrepl/zrepl/transport/fromconfig"

View File

@ -52,9 +52,6 @@ func (CloseWrite) sender(wire transport.Wire) {
log.Printf("closeErr=%T %s", closeErr, closeErr) log.Printf("closeErr=%T %s", closeErr, closeErr)
}() }()
type opResult struct {
err error
}
writeDone := make(chan struct{}, 1) writeDone := make(chan struct{}, 1)
go func() { go func() {
close(writeDone) close(writeDone)
@ -85,7 +82,7 @@ func (CloseWrite) receiver(wire transport.Wire) {
// consume half the test data, then detect an error, send it and CloseWrite // consume half the test data, then detect an error, send it and CloseWrite
r := io.LimitReader(wire, int64(5 * len(closeWriteTestSendData)/3)) r := io.LimitReader(wire, int64(5*len(closeWriteTestSendData)/3))
_, err := io.Copy(ioutil.Discard, r) _, err := io.Copy(ioutil.Discard, r)
noerror(err) noerror(err)
@ -103,7 +100,7 @@ func (CloseWrite) receiver(wire transport.Wire) {
// io.Copy masks io.EOF to nil, and we expect io.EOF from the client's Close() call // io.Copy masks io.EOF to nil, and we expect io.EOF from the client's Close() call
log.Panicf("unexpected error returned from reading conn: %s", err) log.Panicf("unexpected error returned from reading conn: %s", err)
} }
closeErr := wire.Close() closeErr := wire.Close()
log.Printf("closeErr=%T %s", closeErr, closeErr) log.Printf("closeErr=%T %s", closeErr, closeErr)

View File

@ -95,7 +95,7 @@ restart:
return n, err return n, err
} }
var nCurRead int var nCurRead int
nCurRead, err = c.Wire.Read(p[n:len(p)]) nCurRead, err = c.Wire.Read(p[n:])
n += nCurRead n += nCurRead
if netErr, ok := err.(net.Error); ok && netErr.Timeout() && nCurRead > 0 { if netErr, ok := err.(net.Error); ok && netErr.Timeout() && nCurRead > 0 {
err = nil err = nil
@ -111,7 +111,7 @@ restart:
return n, err return n, err
} }
var nCurWrite int var nCurWrite int
nCurWrite, err = c.Wire.Write(p[n:len(p)]) nCurWrite, err = c.Wire.Write(p[n:])
n += nCurWrite n += nCurWrite
if netErr, ok := err.(net.Error); ok && netErr.Timeout() && nCurWrite > 0 { if netErr, ok := err.(net.Error); ok && netErr.Timeout() && nCurWrite > 0 {
err = nil err = nil

View File

@ -10,6 +10,7 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/zrepl/zrepl/util/socketpair" "github.com/zrepl/zrepl/util/socketpair"
) )
@ -101,7 +102,7 @@ func TestNoPartialReadsDueToDeadline(t *testing.T) {
// io.Copy will encounter a partial read, then wait ~50ms until the other 5 bytes are written // io.Copy will encounter a partial read, then wait ~50ms until the other 5 bytes are written
// It is still going to fail with deadline err because it expects EOF // It is still going to fail with deadline err because it expects EOF
n, err := io.Copy(&buf, bc) n, err := io.Copy(&buf, bc)
readDuration := time.Now().Sub(beginRead) readDuration := time.Since(beginRead)
t.Logf("read duration=%s", readDuration) t.Logf("read duration=%s", readDuration)
t.Logf("recv done n=%v err=%v", n, err) t.Logf("recv done n=%v err=%v", n, err)
t.Logf("buf=%v", buf.Bytes()) t.Logf("buf=%v", buf.Bytes())
@ -152,7 +153,7 @@ func TestPartialWriteMockConn(t *testing.T) {
buf := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} buf := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
begin := time.Now() begin := time.Now()
n, err := mc.Write(buf[:]) n, err := mc.Write(buf[:])
duration := time.Now().Sub(begin) duration := time.Since(begin)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 5, n) assert.Equal(t, 5, n)
assert.True(t, duration > 100*time.Millisecond) assert.True(t, duration > 100*time.Millisecond)

View File

@ -18,11 +18,12 @@ import (
"net" "net"
"time" "time"
"github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/transport"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials"
"google.golang.org/grpc/peer" "google.golang.org/grpc/peer"
"github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/transport"
) )
type Logger = logger.Logger type Logger = logger.Logger
@ -105,7 +106,7 @@ func NewInterceptors(logger Logger, clientIdentityKey interface{}) (unary grpc.U
if !ok { if !ok {
panic("peer.FromContext expected to return a peer in grpc.UnaryServerInterceptor") panic("peer.FromContext expected to return a peer in grpc.UnaryServerInterceptor")
} }
logger.WithField("peer_addr", fmt.Sprintf("%s", p.Addr)).Debug("peer addr") logger.WithField("peer_addr", p.Addr.String()).Debug("peer addr")
a, ok := p.AuthInfo.(*authConnAuthType) a, ok := p.AuthInfo.(*authConnAuthType)
if !ok { if !ok {
panic(fmt.Sprintf("NewInterceptors must be used in combination with grpc.NewTransportCredentials, but got auth type %T", p.AuthInfo)) panic(fmt.Sprintf("NewInterceptors must be used in combination with grpc.NewTransportCredentials, but got auth type %T", p.AuthInfo))

View File

@ -88,6 +88,9 @@ func server() {
log := logger.NewStderrDebugLogger() log := logger.NewStderrDebugLogger()
srv, serve, err := grpchelper.NewServer(authListenerFactory, clientIdentityKey, log) srv, serve, err := grpchelper.NewServer(authListenerFactory, clientIdentityKey, log)
if err != nil {
onErr(err, "new server")
}
svc := &greeter{"hello "} svc := &greeter{"hello "}
pdu.RegisterGreeterServer(srv, svc) pdu.RegisterGreeterServer(srv, svc)

View File

@ -25,8 +25,9 @@ package netadaptor
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/zrepl/zrepl/logger"
"net" "net"
"github.com/zrepl/zrepl/logger"
"github.com/zrepl/zrepl/transport" "github.com/zrepl/zrepl/transport"
) )

View File

@ -12,6 +12,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/google/uuid" "github.com/google/uuid"
"github.com/zrepl/zrepl/replication/logic" "github.com/zrepl/zrepl/replication/logic"
"github.com/zrepl/zrepl/replication/logic/pdu" "github.com/zrepl/zrepl/replication/logic/pdu"
"github.com/zrepl/zrepl/rpc/dataconn" "github.com/zrepl/zrepl/rpc/dataconn"
@ -158,7 +159,7 @@ func (c *Client) WaitForConnectivity(ctx context.Context) error {
time.Sleep(envconst.Duration("ZREPL_RPC_DATACONN_PING_SLEEP", 1*time.Second)) time.Sleep(envconst.Duration("ZREPL_RPC_DATACONN_PING_SLEEP", 1*time.Second))
continue continue
} }
// it's not a dial timeout, // it's not a dial timeout,
checkRes(data, dataErr, loggers.Data, &dataOk) checkRes(data, dataErr, loggers.Data, &dataOk)
return return
} }

View File

@ -13,6 +13,7 @@ func init() {
} }
} }
//nolint[:deadcode,unused]
func debug(format string, args ...interface{}) { func debug(format string, args ...interface{}) {
if debugEnabled { if debugEnabled {
fmt.Fprintf(os.Stderr, "rpc: %s\n", fmt.Sprintf(format, args...)) fmt.Fprintf(os.Stderr, "rpc: %s\n", fmt.Sprintf(format, args...))

View File

@ -115,4 +115,3 @@ package rpc
// - remove the comments // // - remove the comments //
// - vim: set virtualedit+=all // - vim: set virtualedit+=all
// - vim: set ft=text // - vim: set ft=text

View File

@ -12,9 +12,6 @@ type contextKey int
const ( const (
contextKeyLoggers contextKey = iota contextKeyLoggers contextKey = iota
contextKeyGeneralLogger
contextKeyControlLogger
contextKeyDataLogger
) )
/// All fields must be non-nil /// All fields must be non-nil

View File

@ -4,8 +4,8 @@ import (
"context" "context"
"time" "time"
"github.com/zrepl/zrepl/transport"
"github.com/zrepl/zrepl/rpc/transportmux" "github.com/zrepl/zrepl/rpc/transportmux"
"github.com/zrepl/zrepl/transport"
"github.com/zrepl/zrepl/util/envconst" "github.com/zrepl/zrepl/util/envconst"
) )

View File

@ -34,8 +34,6 @@ type Server struct {
dataServerServe serveFunc dataServerServe serveFunc
} }
type serverContextKey int
type HandlerContextInterceptor func(ctx context.Context) context.Context type HandlerContextInterceptor func(ctx context.Context) context.Context
// config must be valid (use its Validate function). // config must be valid (use its Validate function).

View File

@ -7,6 +7,7 @@ package transportmux
import ( import (
"context" "context"
"fmt" "fmt"
"io" "io"
"net" "net"
@ -49,10 +50,10 @@ func (l *demuxListener) Accept(ctx context.Context) (*transport.AuthConn, error)
return res.conn, res.err return res.conn, res.err
} }
type demuxAddr struct {} type demuxAddr struct{}
func (demuxAddr) Network() string { return "demux" } func (demuxAddr) Network() string { return "demux" }
func (demuxAddr) String() string { return "demux" } func (demuxAddr) String() string { return "demux" }
func (l *demuxListener) Addr() net.Addr { func (l *demuxListener) Addr() net.Addr {
return demuxAddr{} return demuxAddr{}
@ -64,7 +65,7 @@ func (l *demuxListener) Close() error { return nil } // TODO
// This is a protocol constant, changing it breaks the wire protocol. // This is a protocol constant, changing it breaks the wire protocol.
const LabelLen = 64 const LabelLen = 64
func padLabel(out []byte, label string) (error) { func padLabel(out []byte, label string) error {
if len(label) > LabelLen { if len(label) > LabelLen {
return fmt.Errorf("label %q exceeds max length (is %d, max %d)", label, len(label), LabelLen) return fmt.Errorf("label %q exceeds max length (is %d, max %d)", label, len(label), LabelLen)
} }
@ -142,7 +143,10 @@ func Demux(ctx context.Context, rawListener transport.AuthenticatedListener, lab
continue continue
} }
rawConn.SetDeadline(time.Time{}) err = rawConn.SetDeadline(time.Time{})
if err != nil {
getLog(ctx).WithError(err).Error("cannot reset deadline")
}
// blocking is intentional // blocking is intentional
demuxListener.conns <- acceptRes{conn: rawConn, err: nil} demuxListener.conns <- acceptRes{conn: rawConn, err: nil}
} }
@ -153,7 +157,7 @@ func Demux(ctx context.Context, rawListener transport.AuthenticatedListener, lab
type labeledConnecter struct { type labeledConnecter struct {
label []byte label []byte
transport.Connecter transport.Connecter
} }
func (c labeledConnecter) Connect(ctx context.Context) (transport.Wire, error) { func (c labeledConnecter) Connect(ctx context.Context) (transport.Wire, error) {
@ -169,7 +173,12 @@ func (c labeledConnecter) Connect(ctx context.Context) (transport.Wire, error) {
} }
if dl, ok := ctx.Deadline(); ok { if dl, ok := ctx.Deadline(); ok {
defer conn.SetDeadline(time.Time{}) defer func() {
err := conn.SetDeadline(time.Time{})
if err != nil {
getLog(ctx).WithError(err).Error("cannot reset deadline")
}
}()
if err := conn.SetDeadline(dl); err != nil { if err := conn.SetDeadline(dl); err != nil {
closeConn(err) closeConn(err)
return nil, err return nil, err
@ -202,4 +211,3 @@ func MuxConnecter(rawConnecter transport.Connecter, labels []string, timeout tim
} }
return ret, nil return ret, nil
} }

View File

@ -17,7 +17,7 @@ import (
type HandshakeMessage struct { type HandshakeMessage struct {
ProtocolVersion int ProtocolVersion int
Extensions []string Extensions []string
} }
// A HandshakeError describes what went wrong during the handshake. // A HandshakeError describes what went wrong during the handshake.
@ -25,7 +25,7 @@ type HandshakeMessage struct {
type HandshakeError struct { type HandshakeError struct {
msg string msg string
// If not nil, the underlying IO error that caused the handshake to fail. // If not nil, the underlying IO error that caused the handshake to fail.
IOError error IOError error
isAcceptError bool isAcceptError bool
} }
@ -36,10 +36,10 @@ func (e HandshakeError) Error() string { return e.msg }
// Like with net.OpErr (Go issue 6163), a client failing to handshake // Like with net.OpErr (Go issue 6163), a client failing to handshake
// should be a temporary Accept error toward the Listener . // should be a temporary Accept error toward the Listener .
func (e HandshakeError) Temporary() bool { func (e HandshakeError) Temporary() bool {
if e.isAcceptError { if e.isAcceptError {
return true return true
} }
te, ok := e.IOError.(interface{ Temporary() bool }); te, ok := e.IOError.(interface{ Temporary() bool })
return ok && te.Temporary() return ok && te.Temporary()
} }
@ -52,11 +52,11 @@ func (e HandshakeError) Timeout() bool {
return false return false
} }
func hsErr(format string, args... interface{}) *HandshakeError { func hsErr(format string, args ...interface{}) *HandshakeError {
return &HandshakeError{msg: fmt.Sprintf(format, args...)} return &HandshakeError{msg: fmt.Sprintf(format, args...)}
} }
func hsIOErr(err error, format string, args... interface{}) *HandshakeError { func hsIOErr(err error, format string, args ...interface{}) *HandshakeError {
return &HandshakeError{IOError: err, msg: fmt.Sprintf(format, args...)} return &HandshakeError{IOError: err, msg: fmt.Sprintf(format, args...)}
} }
@ -145,7 +145,7 @@ func (m *HandshakeMessage) DecodeReader(r io.Reader, maxLen int) error {
if exts[len(exts)-1] != "" { if exts[len(exts)-1] != "" {
return hsErr("unexpected data trailing after last extension newline") return hsErr("unexpected data trailing after last extension newline")
} }
m.Extensions = exts[0:len(exts)-1] m.Extensions = exts[0 : len(exts)-1]
return nil return nil
} }
@ -157,18 +157,29 @@ func DoHandshakeCurrentVersion(conn net.Conn, deadline time.Time) *HandshakeErro
const HandshakeMessageMaxLen = 16 * 4096 const HandshakeMessageMaxLen = 16 * 4096
func DoHandshakeVersion(conn net.Conn, deadline time.Time, version int) *HandshakeError { func DoHandshakeVersion(conn net.Conn, deadline time.Time, version int) (rErr *HandshakeError) {
ours := HandshakeMessage{ ours := HandshakeMessage{
ProtocolVersion: version, ProtocolVersion: version,
Extensions: nil, Extensions: nil,
} }
hsb, err := ours.Encode() hsb, err := ours.Encode()
if err != nil { if err != nil {
return hsErr("could not encode protocol banner: %s", err) return hsErr("could not encode protocol banner: %s", err)
} }
defer conn.SetDeadline(time.Time{}) err = conn.SetDeadline(deadline)
conn.SetDeadline(deadline) if err != nil {
return hsErr("could not set deadline for protocol banner handshake: %s", err)
}
defer func() {
if rErr != nil {
return
}
err := conn.SetDeadline(time.Time{})
if err != nil {
rErr = hsErr("could not reset deadline after protocol banner handshake: %s", err)
}
}()
_, err = io.Copy(conn, bytes.NewBuffer(hsb)) _, err = io.Copy(conn, bytes.NewBuffer(hsb))
if err != nil { if err != nil {
return hsErr("could not send protocol banner: %s", err) return hsErr("could not send protocol banner: %s", err)

View File

@ -3,13 +3,15 @@ package versionhandshake
import ( import (
"bytes" "bytes"
"fmt" "fmt"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zrepl/zrepl/util/socketpair"
"io" "io"
"strings" "strings"
"testing" "testing"
"time" "time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/zrepl/zrepl/util/socketpair"
) )
func TestHandshakeMessage_Encode(t *testing.T) { func TestHandshakeMessage_Encode(t *testing.T) {
@ -23,8 +25,6 @@ func TestHandshakeMessage_Encode(t *testing.T) {
enc := string(encB) enc := string(encB)
t.Logf("enc: %s", enc) t.Logf("enc: %s", enc)
assert.False(t, strings.ContainsAny(enc[0:10], " ")) assert.False(t, strings.ContainsAny(enc[0:10], " "))
assert.True(t, enc[10] == ' ') assert.True(t, enc[10] == ' ')
@ -45,7 +45,7 @@ func TestHandshakeMessage_Encode(t *testing.T) {
func TestHandshakeMessage_Encode_InvalidProtocolVersion(t *testing.T) { func TestHandshakeMessage_Encode_InvalidProtocolVersion(t *testing.T) {
for _, pv := range []int{-1, 0, 10000, 10001} { for _, pv := range []int{-1, 0, 10000, 10001} {
t.Logf("testing invalid protocol version = %v", pv) t.Logf("testing invalid protocol version = %v", pv)
msg := HandshakeMessage{ msg := HandshakeMessage{
ProtocolVersion: pv, ProtocolVersion: pv,
@ -68,7 +68,7 @@ func TestHandshakeMessage_DecodeReader(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
out := HandshakeMessage{} out := HandshakeMessage{}
err = out.DecodeReader(bytes.NewReader([]byte(enc)), 4 * 4096) err = out.DecodeReader(bytes.NewReader([]byte(enc)), 4*4096)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, 2342, out.ProtocolVersion) assert.Equal(t, 2342, out.ProtocolVersion)
assert.Equal(t, 2, len(out.Extensions)) assert.Equal(t, 2, len(out.Extensions))

View File

@ -4,12 +4,13 @@ import (
"context" "context"
"net" "net"
"time" "time"
"github.com/zrepl/zrepl/transport" "github.com/zrepl/zrepl/transport"
) )
type HandshakeConnecter struct { type HandshakeConnecter struct {
connecter transport.Connecter connecter transport.Connecter
timeout time.Duration timeout time.Duration
} }
func (c HandshakeConnecter) Connect(ctx context.Context) (transport.Wire, error) { func (c HandshakeConnecter) Connect(ctx context.Context) (transport.Wire, error) {
@ -31,17 +32,17 @@ func (c HandshakeConnecter) Connect(ctx context.Context) (transport.Wire, error)
func Connecter(connecter transport.Connecter, timeout time.Duration) HandshakeConnecter { func Connecter(connecter transport.Connecter, timeout time.Duration) HandshakeConnecter {
return HandshakeConnecter{ return HandshakeConnecter{
connecter: connecter, connecter: connecter,
timeout: timeout, timeout: timeout,
} }
} }
// wrapper type that performs a a protocol version handshake before returning the connection // wrapper type that performs a a protocol version handshake before returning the connection
type HandshakeListener struct { type HandshakeListener struct {
l transport.AuthenticatedListener l transport.AuthenticatedListener
timeout time.Duration timeout time.Duration
} }
func (l HandshakeListener) Addr() (net.Addr) { return l.l.Addr() } func (l HandshakeListener) Addr() net.Addr { return l.l.Addr() }
func (l HandshakeListener) Close() error { return l.l.Close() } func (l HandshakeListener) Close() error { return l.l.Close() }

View File

@ -80,7 +80,9 @@ func (l *ClientAuthListener) Accept() (tcpConn *net.TCPConn, tlsConn *tls.Conn,
if err = tlsConn.Handshake(); err != nil { if err = tlsConn.Handshake(); err != nil {
goto CloseAndErr goto CloseAndErr
} }
tlsConn.SetDeadline(time.Time{}) if err = tlsConn.SetDeadline(time.Time{}); err != nil {
goto CloseAndErr
}
peerCerts = tlsConn.ConnectionState().PeerCertificates peerCerts = tlsConn.ConnectionState().PeerCertificates
if len(peerCerts) < 1 { if len(peerCerts) < 1 {

View File

@ -4,7 +4,9 @@ package fromconfig
import ( import (
"fmt" "fmt"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/zrepl/zrepl/config" "github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/transport" "github.com/zrepl/zrepl/transport"
"github.com/zrepl/zrepl/transport/local" "github.com/zrepl/zrepl/transport/local"
@ -13,10 +15,10 @@ import (
"github.com/zrepl/zrepl/transport/tls" "github.com/zrepl/zrepl/transport/tls"
) )
func ListenerFactoryFromConfig(g *config.Global, in config.ServeEnum) (transport.AuthenticatedListenerFactory,error) { func ListenerFactoryFromConfig(g *config.Global, in config.ServeEnum) (transport.AuthenticatedListenerFactory, error) {
var ( var (
l transport.AuthenticatedListenerFactory l transport.AuthenticatedListenerFactory
err error err error
) )
switch v := in.Ret.(type) { switch v := in.Ret.(type) {
@ -35,7 +37,6 @@ func ListenerFactoryFromConfig(g *config.Global, in config.ServeEnum) (transport
return l, err return l, err
} }
func ConnecterFromConfig(g *config.Global, in config.ConnectEnum) (transport.Connecter, error) { func ConnecterFromConfig(g *config.Global, in config.ConnectEnum) (transport.Connecter, error) {
var ( var (
connecter transport.Connecter connecter transport.Connecter

View File

@ -3,12 +3,13 @@ package local
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/zrepl/zrepl/config" "github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/transport" "github.com/zrepl/zrepl/transport"
) )
type LocalConnecter struct { type LocalConnecter struct {
listenerName string listenerName string
clientIdentity string clientIdentity string
} }
@ -26,4 +27,3 @@ func (c *LocalConnecter) Connect(dialCtx context.Context) (transport.Wire, error
l := GetLocalListener(c.listenerName) l := GetLocalListener(c.listenerName)
return l.Connect(dialCtx, c.clientIdentity) return l.Connect(dialCtx, c.clientIdentity)
} }

View File

@ -3,20 +3,21 @@ package local
import ( import (
"context" "context"
"fmt" "fmt"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/util/socketpair"
"net" "net"
"sync" "sync"
"github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/transport" "github.com/zrepl/zrepl/transport"
"github.com/zrepl/zrepl/util/socketpair"
) )
var localListeners struct { var localListeners struct {
m map[string]*LocalListener // listenerName -> listener m map[string]*LocalListener // listenerName -> listener
init sync.Once init sync.Once
mtx sync.Mutex mtx sync.Mutex
} }
func GetLocalListener(listenerName string) (*LocalListener) { func GetLocalListener(listenerName string) *LocalListener {
localListeners.init.Do(func() { localListeners.init.Do(func() {
localListeners.m = make(map[string]*LocalListener) localListeners.m = make(map[string]*LocalListener)
@ -36,12 +37,12 @@ func GetLocalListener(listenerName string) (*LocalListener) {
type connectRequest struct { type connectRequest struct {
clientIdentity string clientIdentity string
callback chan connectResult callback chan connectResult
} }
type connectResult struct { type connectResult struct {
conn transport.Wire conn transport.Wire
err error err error
} }
type LocalListener struct { type LocalListener struct {
@ -60,7 +61,7 @@ func (l *LocalListener) Connect(dialCtx context.Context, clientIdentity string)
// place request // place request
req := connectRequest{ req := connectRequest{
clientIdentity: clientIdentity, clientIdentity: clientIdentity,
callback: make(chan connectResult), callback: make(chan connectResult),
} }
select { select {
case l.connects <- req: case l.connects <- req:
@ -70,7 +71,7 @@ func (l *LocalListener) Connect(dialCtx context.Context, clientIdentity string)
// wait for listener response // wait for listener response
select { select {
case connRes := <- req.callback: case connRes := <-req.callback:
conn, err = connRes.conn, connRes.err conn, err = connRes.conn, connRes.err
case <-dialCtx.Done(): case <-dialCtx.Done():
close(req.callback) // sending to the channel afterwards will panic, the listener has to catch this close(req.callback) // sending to the channel afterwards will panic, the listener has to catch this
@ -88,7 +89,7 @@ func (localAddr) Network() string { return "local" }
func (a localAddr) String() string { return a.S } func (a localAddr) String() string { return a.S }
func (l *LocalListener) Addr() (net.Addr) { return localAddr{"<listening>"} } func (l *LocalListener) Addr() net.Addr { return localAddr{"<listening>"} }
func (l *LocalListener) Accept(ctx context.Context) (*transport.AuthConn, error) { func (l *LocalListener) Accept(ctx context.Context) (*transport.AuthConn, error) {
respondToRequest := func(req connectRequest, res connectResult) (err error) { respondToRequest := func(req connectRequest, res connectResult) (err error) {
@ -163,12 +164,12 @@ func (l *LocalListener) Close() error {
return nil return nil
} }
func LocalListenerFactoryFromConfig(g *config.Global, in *config.LocalServe) (transport.AuthenticatedListenerFactory,error) { func LocalListenerFactoryFromConfig(g *config.Global, in *config.LocalServe) (transport.AuthenticatedListenerFactory, error) {
if in.ListenerName == "" { if in.ListenerName == "" {
return nil, fmt.Errorf("ListenerName must not be empty") return nil, fmt.Errorf("ListenerName must not be empty")
} }
listenerName := in.ListenerName listenerName := in.ListenerName
lf := func() (transport.AuthenticatedListener,error) { lf := func() (transport.AuthenticatedListener, error) {
return GetLocalListener(listenerName), nil return GetLocalListener(listenerName), nil
} }
return lf, nil return lf, nil

View File

@ -2,12 +2,14 @@ package ssh
import ( import (
"context" "context"
"time"
"github.com/jinzhu/copier" "github.com/jinzhu/copier"
"github.com/pkg/errors" "github.com/pkg/errors"
"github.com/problame/go-netssh" "github.com/problame/go-netssh"
"github.com/zrepl/zrepl/config" "github.com/zrepl/zrepl/config"
"github.com/zrepl/zrepl/transport" "github.com/zrepl/zrepl/transport"
"time"
) )
type SSHStdinserverConnecter struct { type SSHStdinserverConnecter struct {

Some files were not shown because too many files have changed in this diff Show More