Merge branch 'main' into peers-get-account-refactoring

This commit is contained in:
bcmmbaga 2024-11-29 10:15:10 +03:00
commit a3663fb444
No known key found for this signature in database
GPG Key ID: 511EED5C928AD547
3 changed files with 141 additions and 60 deletions

View File

@ -52,6 +52,47 @@ jobs:
- name: Test - name: Test
run: CGO_ENABLED=1 GOARCH=${{ matrix.arch }} NETBIRD_STORE_ENGINE=${{ matrix.store }} CI=true go test -exec 'sudo --preserve-env=CI,NETBIRD_STORE_ENGINE' -timeout 10m -p 1 ./... run: CGO_ENABLED=1 GOARCH=${{ matrix.arch }} NETBIRD_STORE_ENGINE=${{ matrix.store }} CI=true go test -exec 'sudo --preserve-env=CI,NETBIRD_STORE_ENGINE' -timeout 10m -p 1 ./...
benchmark:
strategy:
fail-fast: false
matrix:
arch: [ '386','amd64' ]
store: [ 'sqlite', 'postgres' ]
runs-on: ubuntu-22.04
steps:
- name: Install Go
uses: actions/setup-go@v5
with:
go-version: "1.23.x"
- name: Cache Go modules
uses: actions/cache@v4
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Checkout code
uses: actions/checkout@v4
- name: Install dependencies
run: sudo apt update && sudo apt install -y -q libgtk-3-dev libayatana-appindicator3-dev libgl1-mesa-dev xorg-dev gcc-multilib libpcap-dev
- name: Install 32-bit libpcap
if: matrix.arch == '386'
run: sudo dpkg --add-architecture i386 && sudo apt update && sudo apt-get install -y libpcap0.8-dev:i386
- name: Install modules
run: go mod tidy
- name: check git status
run: git --no-pager diff --exit-code
- name: Test
run: CGO_ENABLED=1 GOARCH=${{ matrix.arch }} NETBIRD_STORE_ENGINE=${{ matrix.store }} CI=true go test -run=^$ -bench=. -exec 'sudo --preserve-env=CI,NETBIRD_STORE_ENGINE' -timeout 10m -p 1 ./...
test_client_on_docker: test_client_on_docker:
runs-on: ubuntu-20.04 runs-on: ubuntu-20.04
steps: steps:

View File

@ -2985,15 +2985,18 @@ func BenchmarkSyncAndMarkPeer(b *testing.B) {
name string name string
peers int peers int
groups int groups int
minMsPerOp float64 // We need different expectations for CI/CD and local runs because of the different performance characteristics
maxMsPerOp float64 minMsPerOpLocal float64
maxMsPerOpLocal float64
minMsPerOpCICD float64
maxMsPerOpCICD float64
}{ }{
{"Small", 50, 5, 1, 3}, {"Small", 50, 5, 1, 3, 4, 10},
{"Medium", 500, 100, 7, 13}, {"Medium", 500, 100, 7, 13, 10, 60},
{"Large", 5000, 200, 65, 80}, {"Large", 5000, 200, 65, 80, 60, 170},
{"Small single", 50, 10, 1, 3}, {"Small single", 50, 10, 1, 3, 4, 60},
{"Medium single", 500, 10, 7, 13}, {"Medium single", 500, 10, 7, 13, 10, 26},
{"Large 5", 5000, 15, 65, 80}, {"Large 5", 5000, 15, 65, 80, 60, 170},
} }
log.SetOutput(io.Discard) log.SetOutput(io.Discard)
@ -3026,12 +3029,19 @@ func BenchmarkSyncAndMarkPeer(b *testing.B) {
msPerOp := float64(duration.Nanoseconds()) / float64(b.N) / 1e6 msPerOp := float64(duration.Nanoseconds()) / float64(b.N) / 1e6
b.ReportMetric(msPerOp, "ms/op") b.ReportMetric(msPerOp, "ms/op")
if msPerOp < bc.minMsPerOp { minExpected := bc.minMsPerOpLocal
b.Fatalf("Benchmark %s failed: too fast (%.2f ms/op, minimum %.2f ms/op)", bc.name, msPerOp, bc.minMsPerOp) maxExpected := bc.maxMsPerOpLocal
if os.Getenv("CI") == "true" {
minExpected = bc.minMsPerOpCICD
maxExpected = bc.maxMsPerOpCICD
} }
if msPerOp > bc.maxMsPerOp { if msPerOp < minExpected {
b.Fatalf("Benchmark %s failed: too slow (%.2f ms/op, maximum %.2f ms/op)", bc.name, msPerOp, bc.maxMsPerOp) b.Fatalf("Benchmark %s failed: too fast (%.2f ms/op, minimum %.2f ms/op)", bc.name, msPerOp, minExpected)
}
if msPerOp > maxExpected {
b.Fatalf("Benchmark %s failed: too slow (%.2f ms/op, maximum %.2f ms/op)", bc.name, msPerOp, maxExpected)
} }
}) })
} }
@ -3042,15 +3052,18 @@ func BenchmarkLoginPeer_ExistingPeer(b *testing.B) {
name string name string
peers int peers int
groups int groups int
minMsPerOp float64 // We need different expectations for CI/CD and local runs because of the different performance characteristics
maxMsPerOp float64 minMsPerOpLocal float64
maxMsPerOpLocal float64
minMsPerOpCICD float64
maxMsPerOpCICD float64
}{ }{
{"Small", 50, 5, 102, 110}, {"Small", 50, 5, 102, 110, 102, 120},
{"Medium", 500, 100, 105, 140}, {"Medium", 500, 100, 105, 140, 105, 170},
{"Large", 5000, 200, 160, 200}, {"Large", 5000, 200, 160, 200, 160, 270},
{"Small single", 50, 10, 102, 110}, {"Small single", 50, 10, 102, 110, 102, 120},
{"Medium single", 500, 10, 105, 140}, {"Medium single", 500, 10, 105, 140, 105, 170},
{"Large 5", 5000, 15, 160, 200}, {"Large 5", 5000, 15, 160, 200, 160, 270},
} }
log.SetOutput(io.Discard) log.SetOutput(io.Discard)
@ -3090,12 +3103,19 @@ func BenchmarkLoginPeer_ExistingPeer(b *testing.B) {
msPerOp := float64(duration.Nanoseconds()) / float64(b.N) / 1e6 msPerOp := float64(duration.Nanoseconds()) / float64(b.N) / 1e6
b.ReportMetric(msPerOp, "ms/op") b.ReportMetric(msPerOp, "ms/op")
if msPerOp < bc.minMsPerOp { minExpected := bc.minMsPerOpLocal
b.Fatalf("Benchmark %s failed: too fast (%.2f ms/op, minimum %.2f ms/op)", bc.name, msPerOp, bc.minMsPerOp) maxExpected := bc.maxMsPerOpLocal
if os.Getenv("CI") == "true" {
minExpected = bc.minMsPerOpCICD
maxExpected = bc.maxMsPerOpCICD
} }
if msPerOp > bc.maxMsPerOp { if msPerOp < minExpected {
b.Fatalf("Benchmark %s failed: too slow (%.2f ms/op, maximum %.2f ms/op)", bc.name, msPerOp, bc.maxMsPerOp) b.Fatalf("Benchmark %s failed: too fast (%.2f ms/op, minimum %.2f ms/op)", bc.name, msPerOp, minExpected)
}
if msPerOp > maxExpected {
b.Fatalf("Benchmark %s failed: too slow (%.2f ms/op, maximum %.2f ms/op)", bc.name, msPerOp, maxExpected)
} }
}) })
} }
@ -3106,15 +3126,18 @@ func BenchmarkLoginPeer_NewPeer(b *testing.B) {
name string name string
peers int peers int
groups int groups int
minMsPerOp float64 // We need different expectations for CI/CD and local runs because of the different performance characteristics
maxMsPerOp float64 minMsPerOpLocal float64
maxMsPerOpLocal float64
minMsPerOpCICD float64
maxMsPerOpCICD float64
}{ }{
{"Small", 50, 5, 107, 120}, {"Small", 50, 5, 107, 120, 107, 140},
{"Medium", 500, 100, 105, 140}, {"Medium", 500, 100, 105, 140, 105, 170},
{"Large", 5000, 200, 180, 220}, {"Large", 5000, 200, 180, 220, 180, 320},
{"Small single", 50, 10, 107, 120}, {"Small single", 50, 10, 107, 120, 105, 140},
{"Medium single", 500, 10, 105, 140}, {"Medium single", 500, 10, 105, 140, 105, 170},
{"Large 5", 5000, 15, 180, 220}, {"Large 5", 5000, 15, 180, 220, 180, 320},
} }
log.SetOutput(io.Discard) log.SetOutput(io.Discard)
@ -3154,12 +3177,19 @@ func BenchmarkLoginPeer_NewPeer(b *testing.B) {
msPerOp := float64(duration.Nanoseconds()) / float64(b.N) / 1e6 msPerOp := float64(duration.Nanoseconds()) / float64(b.N) / 1e6
b.ReportMetric(msPerOp, "ms/op") b.ReportMetric(msPerOp, "ms/op")
if msPerOp < bc.minMsPerOp { minExpected := bc.minMsPerOpLocal
b.Fatalf("Benchmark %s failed: too fast (%.2f ms/op, minimum %.2f ms/op)", bc.name, msPerOp, bc.minMsPerOp) maxExpected := bc.maxMsPerOpLocal
if os.Getenv("CI") == "true" {
minExpected = bc.minMsPerOpCICD
maxExpected = bc.maxMsPerOpCICD
} }
if msPerOp > bc.maxMsPerOp { if msPerOp < minExpected {
b.Fatalf("Benchmark %s failed: too slow (%.2f ms/op, maximum %.2f ms/op)", bc.name, msPerOp, bc.maxMsPerOp) b.Fatalf("Benchmark %s failed: too fast (%.2f ms/op, minimum %.2f ms/op)", bc.name, msPerOp, minExpected)
}
if msPerOp > maxExpected {
b.Fatalf("Benchmark %s failed: too slow (%.2f ms/op, maximum %.2f ms/op)", bc.name, msPerOp, maxExpected)
} }
}) })
} }

View File

@ -838,15 +838,18 @@ func BenchmarkUpdateAccountPeers(b *testing.B) {
name string name string
peers int peers int
groups int groups int
minMsPerOp float64 // We need different expectations for CI/CD and local runs because of the different performance characteristics
maxMsPerOp float64 minMsPerOpLocal float64
maxMsPerOpLocal float64
minMsPerOpCICD float64
maxMsPerOpCICD float64
}{ }{
{"Small", 50, 5, 90, 120}, {"Small", 50, 5, 90, 120, 90, 120},
{"Medium", 500, 100, 110, 140}, {"Medium", 500, 100, 110, 140, 120, 200},
{"Large", 5000, 200, 800, 1300}, {"Large", 5000, 200, 800, 1300, 2500, 3600},
{"Small single", 50, 10, 90, 120}, {"Small single", 50, 10, 90, 120, 90, 120},
{"Medium single", 500, 10, 110, 170}, {"Medium single", 500, 10, 110, 170, 120, 200},
{"Large 5", 5000, 15, 1300, 1800}, {"Large 5", 5000, 15, 1300, 1800, 5000, 6000},
} }
log.SetOutput(io.Discard) log.SetOutput(io.Discard)
@ -885,12 +888,19 @@ func BenchmarkUpdateAccountPeers(b *testing.B) {
msPerOp := float64(duration.Nanoseconds()) / float64(b.N) / 1e6 msPerOp := float64(duration.Nanoseconds()) / float64(b.N) / 1e6
b.ReportMetric(msPerOp, "ms/op") b.ReportMetric(msPerOp, "ms/op")
if msPerOp < bc.minMsPerOp { minExpected := bc.minMsPerOpLocal
b.Fatalf("Benchmark %s failed: too fast (%.2f ms/op, minimum %.2f ms/op)", bc.name, msPerOp, bc.minMsPerOp) maxExpected := bc.maxMsPerOpLocal
if os.Getenv("CI") == "true" {
minExpected = bc.minMsPerOpCICD
maxExpected = bc.maxMsPerOpCICD
} }
if msPerOp > bc.maxMsPerOp { if msPerOp < minExpected {
b.Fatalf("Benchmark %s failed: too slow (%.2f ms/op, maximum %.2f ms/op)", bc.name, msPerOp, bc.maxMsPerOp) b.Fatalf("Benchmark %s failed: too fast (%.2f ms/op, minimum %.2f ms/op)", bc.name, msPerOp, minExpected)
}
if msPerOp > maxExpected {
b.Fatalf("Benchmark %s failed: too slow (%.2f ms/op, maximum %.2f ms/op)", bc.name, msPerOp, maxExpected)
} }
}) })
} }