Run tests in serial and update multi-peer test (#269)

Updates test workflows with serial execution to avoid collision 
of ports and resource names.

Also, used -exec sudo flag for UNIX tests and removed not-needed
 limits configuration on Linux and added a 5 minutes timeout.

Updated the multi-peer tests in the client/internal/engine_test.go
 to provide proper validation when creating or starting 
a peer engine instance fails.

As some operations of the tests running on windows
 are slow, we will experiment with disabling the Defender before 
restoring cache and checkout a repository, then we reenable 
it to run the tests.

disabled extra logs for windows interface
This commit is contained in:
Maycon Santos 2022-03-16 11:02:06 +01:00 committed by GitHub
parent 957474817f
commit 1cd1e84290
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 51 additions and 37 deletions

View File

@ -26,4 +26,4 @@ jobs:
run: go mod tidy
- name: Test
run: GOBIN=$(which go) && sudo --preserve-env=GOROOT $GOBIN test ./...
run: go test -exec sudo -timeout 5m -p 1 ./...

View File

@ -11,18 +11,18 @@ jobs:
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: update limits.d
run: |
cat <<'EOF' | sudo tee -a /etc/security/limits.d/wt.conf
root soft nproc 65535
root hard nproc 65535
root soft nofile 65535
root hard nofile 65535
$(whoami) soft nproc 65535
$(whoami) hard nproc 65535
$(whoami) soft nofile 65535
$(whoami) hard nofile 65535
EOF
# - name: update limits.d
# run: |
# cat <<'EOF' | sudo tee -a /etc/security/limits.d/wt.conf
# root soft nproc 65535
# root hard nproc 65535
# root soft nofile 65535
# root hard nofile 65535
# $(whoami) soft nproc 65535
# $(whoami) hard nproc 65535
# $(whoami) soft nofile 65535
# $(whoami) hard nofile 65535
# EOF
- name: Cache Go modules
uses: actions/cache@v2
@ -39,4 +39,4 @@ jobs:
run: go mod tidy
- name: Test
run: GOBIN=$(which go) && sudo --preserve-env=GOROOT $GOBIN test ./...
run: go test -exec sudo -timeout 5m -p 1 ./...

View File

@ -22,6 +22,9 @@ jobs:
go-version: [1.17.x]
runs-on: windows-latest
steps:
- name: disable defender
run: Set-MpPreference -DisableRealtimeMonitoring $true
- name: Checkout code
uses: actions/checkout@v2
@ -39,13 +42,16 @@ jobs:
restore-keys: |
${{ runner.os }}-go-
- name: enable defender
run: Set-MpPreference -DisableRealtimeMonitoring $false
- uses: actions/download-artifact@v2
with:
name: syso
path: iface\
- name: Install modules
run: go mod tidy
# - name: Install modules
# run: go mod tidy
- name: Test
run: go test -tags=load_wgnt_from_rsrc ./...
run: go test -tags=load_wgnt_from_rsrc -timeout 5m -p 1 ./...

View File

@ -308,11 +308,18 @@ func TestEngine_MultiplePeers(t *testing.T) {
go func() {
engine, err := createEngine(ctx, cancel, setupKey, j, mport, sport)
if err != nil {
wg.Done()
t.Errorf("unable to create the engine for peer %d with error %v", j, err)
return
}
mu.Lock()
defer mu.Unlock()
engine.Start() //nolint
err = engine.Start()
if err != nil {
t.Errorf("unable to start engine for peer %d with error %v", j, err)
wg.Done()
return
}
engines = append(engines, engine)
wg.Done()
}()
@ -320,33 +327,39 @@ func TestEngine_MultiplePeers(t *testing.T) {
// wait until all have been created and started
wg.Wait()
if len(engines) != numPeers {
t.Fatal("not all peers was started")
}
// check whether all the peer have expected peers connected
expectedConnected := numPeers * (numPeers - 1)
// adjust according to timeouts
timeout := 50 * time.Second
timeoutChan := time.After(timeout)
ticker := time.NewTicker(time.Second)
defer ticker.Stop()
loop:
for {
select {
case <-timeoutChan:
t.Fatalf("waiting for expected connections timeout after %s", timeout.String())
return
default:
break loop
case <-ticker.C:
totalConnected := 0
for _, engine := range engines {
totalConnected = totalConnected + len(engine.GetConnectedPeers())
}
if totalConnected == expectedConnected {
log.Infof("total connected=%d", totalConnected)
break loop
}
log.Infof("total connected=%d", totalConnected)
}
time.Sleep(time.Second)
totalConnected := 0
for _, engine := range engines {
totalConnected = totalConnected + len(engine.GetConnectedPeers())
}
if totalConnected == expectedConnected {
log.Debugf("total connected=%d", totalConnected)
break
}
log.Infof("total connected=%d", totalConnected)
}
// cleanup test
for _, peerEngine := range engines {
for n, peerEngine := range engines {
t.Logf("stopping peer with interface %s from multipeer test, loopIndex %d", peerEngine.wgInterface.Name, n)
errStop := peerEngine.mgmClient.Close()
if errStop != nil {
log.Infoln("got error trying to close management clients from engine: ", errStop)

View File

@ -20,11 +20,6 @@ func (w *WGIface) Create() error {
}
w.Interface = adapter
luid := adapter.LUID()
err = adapter.SetLogging(driver.AdapterLogOn)
if err != nil {
err = fmt.Errorf("Error enabling adapter logging: %w", err)
return err
}
err = adapter.SetAdapterState(driver.AdapterStateUp)
if err != nil {
return err