mirror of
https://github.com/netbirdio/netbird.git
synced 2025-06-20 01:38:41 +02:00
Run tests in serial and update multi-peer test (#269)
Updates test workflows with serial execution to avoid collision of ports and resource names. Also, used -exec sudo flag for UNIX tests and removed not-needed limits configuration on Linux and added a 5 minutes timeout. Updated the multi-peer tests in the client/internal/engine_test.go to provide proper validation when creating or starting a peer engine instance fails. As some operations of the tests running on windows are slow, we will experiment with disabling the Defender before restoring cache and checkout a repository, then we reenable it to run the tests. disabled extra logs for windows interface
This commit is contained in:
parent
957474817f
commit
1cd1e84290
2
.github/workflows/golang-test-darwin.yml
vendored
2
.github/workflows/golang-test-darwin.yml
vendored
@ -26,4 +26,4 @@ jobs:
|
|||||||
run: go mod tidy
|
run: go mod tidy
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: GOBIN=$(which go) && sudo --preserve-env=GOROOT $GOBIN test ./...
|
run: go test -exec sudo -timeout 5m -p 1 ./...
|
26
.github/workflows/golang-test-linux.yml
vendored
26
.github/workflows/golang-test-linux.yml
vendored
@ -11,18 +11,18 @@ jobs:
|
|||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go-version }}
|
go-version: ${{ matrix.go-version }}
|
||||||
- name: update limits.d
|
# - name: update limits.d
|
||||||
run: |
|
# run: |
|
||||||
cat <<'EOF' | sudo tee -a /etc/security/limits.d/wt.conf
|
# cat <<'EOF' | sudo tee -a /etc/security/limits.d/wt.conf
|
||||||
root soft nproc 65535
|
# root soft nproc 65535
|
||||||
root hard nproc 65535
|
# root hard nproc 65535
|
||||||
root soft nofile 65535
|
# root soft nofile 65535
|
||||||
root hard nofile 65535
|
# root hard nofile 65535
|
||||||
$(whoami) soft nproc 65535
|
# $(whoami) soft nproc 65535
|
||||||
$(whoami) hard nproc 65535
|
# $(whoami) hard nproc 65535
|
||||||
$(whoami) soft nofile 65535
|
# $(whoami) soft nofile 65535
|
||||||
$(whoami) hard nofile 65535
|
# $(whoami) hard nofile 65535
|
||||||
EOF
|
# EOF
|
||||||
|
|
||||||
- name: Cache Go modules
|
- name: Cache Go modules
|
||||||
uses: actions/cache@v2
|
uses: actions/cache@v2
|
||||||
@ -39,4 +39,4 @@ jobs:
|
|||||||
run: go mod tidy
|
run: go mod tidy
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: GOBIN=$(which go) && sudo --preserve-env=GOROOT $GOBIN test ./...
|
run: go test -exec sudo -timeout 5m -p 1 ./...
|
12
.github/workflows/golang-test-windows.yml
vendored
12
.github/workflows/golang-test-windows.yml
vendored
@ -22,6 +22,9 @@ jobs:
|
|||||||
go-version: [1.17.x]
|
go-version: [1.17.x]
|
||||||
runs-on: windows-latest
|
runs-on: windows-latest
|
||||||
steps:
|
steps:
|
||||||
|
- name: disable defender
|
||||||
|
run: Set-MpPreference -DisableRealtimeMonitoring $true
|
||||||
|
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
@ -39,13 +42,16 @@ jobs:
|
|||||||
restore-keys: |
|
restore-keys: |
|
||||||
${{ runner.os }}-go-
|
${{ runner.os }}-go-
|
||||||
|
|
||||||
|
- name: enable defender
|
||||||
|
run: Set-MpPreference -DisableRealtimeMonitoring $false
|
||||||
|
|
||||||
- uses: actions/download-artifact@v2
|
- uses: actions/download-artifact@v2
|
||||||
with:
|
with:
|
||||||
name: syso
|
name: syso
|
||||||
path: iface\
|
path: iface\
|
||||||
|
|
||||||
- name: Install modules
|
# - name: Install modules
|
||||||
run: go mod tidy
|
# run: go mod tidy
|
||||||
|
|
||||||
- name: Test
|
- name: Test
|
||||||
run: go test -tags=load_wgnt_from_rsrc ./...
|
run: go test -tags=load_wgnt_from_rsrc -timeout 5m -p 1 ./...
|
||||||
|
@ -308,11 +308,18 @@ func TestEngine_MultiplePeers(t *testing.T) {
|
|||||||
go func() {
|
go func() {
|
||||||
engine, err := createEngine(ctx, cancel, setupKey, j, mport, sport)
|
engine, err := createEngine(ctx, cancel, setupKey, j, mport, sport)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
wg.Done()
|
||||||
|
t.Errorf("unable to create the engine for peer %d with error %v", j, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
mu.Lock()
|
mu.Lock()
|
||||||
defer mu.Unlock()
|
defer mu.Unlock()
|
||||||
engine.Start() //nolint
|
err = engine.Start()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("unable to start engine for peer %d with error %v", j, err)
|
||||||
|
wg.Done()
|
||||||
|
return
|
||||||
|
}
|
||||||
engines = append(engines, engine)
|
engines = append(engines, engine)
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}()
|
}()
|
||||||
@ -320,33 +327,39 @@ func TestEngine_MultiplePeers(t *testing.T) {
|
|||||||
|
|
||||||
// wait until all have been created and started
|
// wait until all have been created and started
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
if len(engines) != numPeers {
|
||||||
|
t.Fatal("not all peers was started")
|
||||||
|
}
|
||||||
// check whether all the peer have expected peers connected
|
// check whether all the peer have expected peers connected
|
||||||
|
|
||||||
expectedConnected := numPeers * (numPeers - 1)
|
expectedConnected := numPeers * (numPeers - 1)
|
||||||
|
|
||||||
// adjust according to timeouts
|
// adjust according to timeouts
|
||||||
timeout := 50 * time.Second
|
timeout := 50 * time.Second
|
||||||
timeoutChan := time.After(timeout)
|
timeoutChan := time.After(timeout)
|
||||||
|
ticker := time.NewTicker(time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
loop:
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-timeoutChan:
|
case <-timeoutChan:
|
||||||
t.Fatalf("waiting for expected connections timeout after %s", timeout.String())
|
t.Fatalf("waiting for expected connections timeout after %s", timeout.String())
|
||||||
return
|
break loop
|
||||||
default:
|
case <-ticker.C:
|
||||||
}
|
|
||||||
time.Sleep(time.Second)
|
|
||||||
totalConnected := 0
|
totalConnected := 0
|
||||||
for _, engine := range engines {
|
for _, engine := range engines {
|
||||||
totalConnected = totalConnected + len(engine.GetConnectedPeers())
|
totalConnected = totalConnected + len(engine.GetConnectedPeers())
|
||||||
}
|
}
|
||||||
if totalConnected == expectedConnected {
|
if totalConnected == expectedConnected {
|
||||||
log.Debugf("total connected=%d", totalConnected)
|
log.Infof("total connected=%d", totalConnected)
|
||||||
break
|
break loop
|
||||||
}
|
}
|
||||||
log.Infof("total connected=%d", totalConnected)
|
log.Infof("total connected=%d", totalConnected)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
// cleanup test
|
// cleanup test
|
||||||
for _, peerEngine := range engines {
|
for n, peerEngine := range engines {
|
||||||
|
t.Logf("stopping peer with interface %s from multipeer test, loopIndex %d", peerEngine.wgInterface.Name, n)
|
||||||
errStop := peerEngine.mgmClient.Close()
|
errStop := peerEngine.mgmClient.Close()
|
||||||
if errStop != nil {
|
if errStop != nil {
|
||||||
log.Infoln("got error trying to close management clients from engine: ", errStop)
|
log.Infoln("got error trying to close management clients from engine: ", errStop)
|
||||||
|
@ -20,11 +20,6 @@ func (w *WGIface) Create() error {
|
|||||||
}
|
}
|
||||||
w.Interface = adapter
|
w.Interface = adapter
|
||||||
luid := adapter.LUID()
|
luid := adapter.LUID()
|
||||||
err = adapter.SetLogging(driver.AdapterLogOn)
|
|
||||||
if err != nil {
|
|
||||||
err = fmt.Errorf("Error enabling adapter logging: %w", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = adapter.SetAdapterState(driver.AdapterStateUp)
|
err = adapter.SetAdapterState(driver.AdapterStateUp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
Loading…
x
Reference in New Issue
Block a user