Implement SQLite Store using gorm and relational approach (#1065)

Restructure data handling for improved performance and flexibility. 
Introduce 'G'-prefixed fields to represent Gorm relations, simplifying resource management. 
Eliminate complexity in lookup tables for enhanced query and write speed. 
Enable independent operations on data structures, requiring adjustments in the Store interface and Account Manager.
This commit is contained in:
Yury Gargay 2023-10-12 15:42:36 +02:00 committed by GitHub
parent 2b90ff8c24
commit 32880c56a4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
44 changed files with 1239 additions and 107 deletions

View File

@ -12,6 +12,9 @@ concurrency:
jobs: jobs:
test: test:
strategy:
matrix:
store: ['JsonFile', 'Sqlite']
runs-on: macos-latest runs-on: macos-latest
steps: steps:
- name: Install Go - name: Install Go
@ -33,4 +36,4 @@ jobs:
run: go mod tidy run: go mod tidy
- name: Test - name: Test
run: go test -exec 'sudo --preserve-env=CI' -timeout 5m -p 1 ./... run: NETBIRD_STORE_KIND=${{ matrix.store }} go test -exec 'sudo --preserve-env=CI' -timeout 5m -p 1 ./...

View File

@ -15,6 +15,7 @@ jobs:
strategy: strategy:
matrix: matrix:
arch: ['386','amd64'] arch: ['386','amd64']
store: ['JsonFile', 'Sqlite']
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Install Go - name: Install Go
@ -41,17 +42,16 @@ jobs:
run: go mod tidy run: go mod tidy
- name: Test - name: Test
run: CGO_ENABLED=1 GOARCH=${{ matrix.arch }} go test -exec 'sudo --preserve-env=CI' -timeout 5m -p 1 ./... run: CGO_ENABLED=1 GOARCH=${{ matrix.arch }} NETBIRD_STORE_KIND=${{ matrix.store }} go test -exec 'sudo --preserve-env=CI' -timeout 5m -p 1 ./...
test_client_on_docker: test_client_on_docker:
runs-on: ubuntu-latest runs-on: ubuntu-20.04
steps: steps:
- name: Install Go - name: Install Go
uses: actions/setup-go@v4 uses: actions/setup-go@v4
with: with:
go-version: "1.20.x" go-version: "1.20.x"
- name: Cache Go modules - name: Cache Go modules
uses: actions/cache@v3 uses: actions/cache@v3
with: with:
@ -64,7 +64,7 @@ jobs:
uses: actions/checkout@v3 uses: actions/checkout@v3
- name: Install dependencies - name: Install dependencies
run: sudo apt update && sudo apt install -y -q libgtk-3-dev libayatana-appindicator3-dev libgl1-mesa-dev xorg-dev run: sudo apt update && sudo apt install -y -q libgtk-3-dev libayatana-appindicator3-dev libgl1-mesa-dev xorg-dev gcc-multilib
- name: Install modules - name: Install modules
run: go mod tidy run: go mod tidy
@ -82,7 +82,7 @@ jobs:
run: CGO_ENABLED=0 go test -c -o nftablesmanager-testing.bin ./client/firewall/nftables/... run: CGO_ENABLED=0 go test -c -o nftablesmanager-testing.bin ./client/firewall/nftables/...
- name: Generate Engine Test bin - name: Generate Engine Test bin
run: CGO_ENABLED=0 go test -c -o engine-testing.bin ./client/internal run: CGO_ENABLED=1 go test -c -o engine-testing.bin ./client/internal
- name: Generate Peer Test bin - name: Generate Peer Test bin
run: CGO_ENABLED=0 go test -c -o peer-testing.bin ./client/internal/peer/... run: CGO_ENABLED=0 go test -c -o peer-testing.bin ./client/internal/peer/...
@ -95,15 +95,17 @@ jobs:
- name: Run Iface tests in docker - name: Run Iface tests in docker
run: docker run -t --cap-add=NET_ADMIN --privileged --rm -v $PWD:/ci -w /ci/iface --entrypoint /busybox/sh gcr.io/distroless/base:debug -c /ci/iface-testing.bin -test.timeout 5m -test.parallel 1 run: docker run -t --cap-add=NET_ADMIN --privileged --rm -v $PWD:/ci -w /ci/iface --entrypoint /busybox/sh gcr.io/distroless/base:debug -c /ci/iface-testing.bin -test.timeout 5m -test.parallel 1
- name: Run RouteManager tests in docker - name: Run RouteManager tests in docker
run: docker run -t --cap-add=NET_ADMIN --privileged --rm -v $PWD:/ci -w /ci/client/internal/routemanager --entrypoint /busybox/sh gcr.io/distroless/base:debug -c /ci/routemanager-testing.bin -test.timeout 5m -test.parallel 1 run: docker run -t --cap-add=NET_ADMIN --privileged --rm -v $PWD:/ci -w /ci/client/internal/routemanager --entrypoint /busybox/sh gcr.io/distroless/base:debug -c /ci/routemanager-testing.bin -test.timeout 5m -test.parallel 1
- name: Run nftables Manager tests in docker - name: Run nftables Manager tests in docker
run: docker run -t --cap-add=NET_ADMIN --privileged --rm -v $PWD:/ci -w /ci/client/firewall --entrypoint /busybox/sh gcr.io/distroless/base:debug -c /ci/nftablesmanager-testing.bin -test.timeout 5m -test.parallel 1 run: docker run -t --cap-add=NET_ADMIN --privileged --rm -v $PWD:/ci -w /ci/client/firewall --entrypoint /busybox/sh gcr.io/distroless/base:debug -c /ci/nftablesmanager-testing.bin -test.timeout 5m -test.parallel 1
- name: Run Engine tests in docker - name: Run Engine tests in docker with file store
run: docker run -t --cap-add=NET_ADMIN --privileged --rm -v $PWD:/ci -w /ci/client/internal --entrypoint /busybox/sh gcr.io/distroless/base:debug -c /ci/engine-testing.bin -test.timeout 5m -test.parallel 1 run: docker run -t --cap-add=NET_ADMIN --privileged --rm -v $PWD:/ci -w /ci/client/internal -e NETBIRD_STORE_KIND="JsonFile" --entrypoint /busybox/sh gcr.io/distroless/base:debug -c /ci/engine-testing.bin -test.timeout 5m -test.parallel 1
- name: Run Engine tests in docker with sqlite store
run: docker run -t --cap-add=NET_ADMIN --privileged --rm -v $PWD:/ci -w /ci/client/internal -e NETBIRD_STORE_KIND="Sqlite" --entrypoint /busybox/sh gcr.io/distroless/base:debug -c /ci/engine-testing.bin -test.timeout 5m -test.parallel 1
- name: Run Peer tests in docker - name: Run Peer tests in docker
run: docker run -t --cap-add=NET_ADMIN --privileged --rm -v $PWD:/ci -w /ci/client/internal/peer --entrypoint /busybox/sh gcr.io/distroless/base:debug -c /ci/peer-testing.bin -test.timeout 5m -test.parallel 1 run: docker run -t --cap-add=NET_ADMIN --privileged --rm -v $PWD:/ci -w /ci/client/internal/peer --entrypoint /busybox/sh gcr.io/distroless/base:debug -c /ci/peer-testing.bin -test.timeout 5m -test.parallel 1

View File

@ -14,6 +14,9 @@ concurrency:
jobs: jobs:
test: test:
strategy:
matrix:
store: ['JsonFile', 'Sqlite']
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- name: Checkout code - name: Checkout code
@ -40,6 +43,8 @@ jobs:
- run: mv ${{ env.downloadPath }}/wintun/bin/amd64/wintun.dll 'C:\Windows\System32\' - run: mv ${{ env.downloadPath }}/wintun/bin/amd64/wintun.dll 'C:\Windows\System32\'
- run: choco install -y sysinternals - run: choco install -y sysinternals
- run: choco install -y mingw
- run: PsExec64 -s -w ${{ github.workspace }} C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe env -w GOMODCACHE=C:\Users\runneradmin\go\pkg\mod - run: PsExec64 -s -w ${{ github.workspace }} C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe env -w GOMODCACHE=C:\Users\runneradmin\go\pkg\mod
- run: PsExec64 -s -w ${{ github.workspace }} C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe env -w GOCACHE=C:\Users\runneradmin\AppData\Local\go-build - run: PsExec64 -s -w ${{ github.workspace }} C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe env -w GOCACHE=C:\Users\runneradmin\AppData\Local\go-build

1
.gitignore vendored
View File

@ -20,3 +20,4 @@ infrastructure_files/setup.env
infrastructure_files/setup-*.env infrastructure_files/setup-*.env
.vscode .vscode
.DS_Store .DS_Store
*.db

View File

@ -65,7 +65,7 @@ func startManagement(t *testing.T, config *mgmt.Config) (*grpc.Server, net.Liste
t.Fatal(err) t.Fatal(err)
} }
s := grpc.NewServer() s := grpc.NewServer()
store, err := mgmt.NewFileStore(config.Datadir, nil) store, err := mgmt.NewStoreFromJson(config.Datadir, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -1039,10 +1039,11 @@ func startManagement(dataDir string) (*grpc.Server, string, error) {
return nil, "", err return nil, "", err
} }
s := grpc.NewServer(grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp)) s := grpc.NewServer(grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp))
store, err := server.NewFileStore(config.Datadir, nil) store, err := server.NewStoreFromJson(config.Datadir, nil)
if err != nil { if err != nil {
log.Fatalf("failed creating a store: %s: %v", config.Datadir, err) return nil, "", err
} }
peersUpdateManager := server.NewPeersUpdateManager() peersUpdateManager := server.NewPeersUpdateManager()
eventStore := &activity.InMemoryEventStore{} eventStore := &activity.InMemoryEventStore{}
if err != nil { if err != nil {

View File

@ -50,19 +50,21 @@ func ToNameServerType(typeString string) NameServerType {
// NameServerGroup group of nameservers and with group ids // NameServerGroup group of nameservers and with group ids
type NameServerGroup struct { type NameServerGroup struct {
// ID identifier of group // ID identifier of group
ID string ID string `gorm:"primaryKey"`
// AccountID is a reference to Account that this object belongs
AccountID string `gorm:"index"`
// Name group name // Name group name
Name string Name string
// Description group description // Description group description
Description string Description string
// NameServers list of nameservers // NameServers list of nameservers
NameServers []NameServer NameServers []NameServer `gorm:"serializer:json"`
// Groups list of peer group IDs to distribute the nameservers information // Groups list of peer group IDs to distribute the nameservers information
Groups []string Groups []string `gorm:"serializer:json"`
// Primary indicates that the nameserver group is the primary resolver for any dns query // Primary indicates that the nameserver group is the primary resolver for any dns query
Primary bool Primary bool
// Domains indicate the dns query domains to use with this nameserver group // Domains indicate the dns query domains to use with this nameserver group
Domains []string Domains []string `gorm:"serializer:json"`
// Enabled group status // Enabled group status
Enabled bool Enabled bool
} }

6
go.mod
View File

@ -46,7 +46,7 @@ require (
github.com/hashicorp/go-version v1.6.0 github.com/hashicorp/go-version v1.6.0
github.com/libp2p/go-netroute v0.2.0 github.com/libp2p/go-netroute v0.2.0
github.com/magiconair/properties v1.8.5 github.com/magiconair/properties v1.8.5
github.com/mattn/go-sqlite3 v1.14.16 github.com/mattn/go-sqlite3 v1.14.17
github.com/mdlayher/socket v0.4.0 github.com/mdlayher/socket v0.4.0
github.com/miekg/dns v1.1.43 github.com/miekg/dns v1.1.43
github.com/mitchellh/hashstructure/v2 v2.0.2 github.com/mitchellh/hashstructure/v2 v2.0.2
@ -74,6 +74,8 @@ require (
golang.org/x/term v0.8.0 golang.org/x/term v0.8.0
google.golang.org/api v0.126.0 google.golang.org/api v0.126.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
gorm.io/driver/sqlite v1.5.3
gorm.io/gorm v1.25.4
) )
require ( require (
@ -110,6 +112,8 @@ require (
github.com/googleapis/gax-go/v2 v2.10.0 // indirect github.com/googleapis/gax-go/v2 v2.10.0 // indirect
github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/hashicorp/go-uuid v1.0.2 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/josharian/native v1.0.0 // indirect github.com/josharian/native v1.0.0 // indirect
github.com/kelseyhightower/envconfig v1.4.0 // indirect github.com/kelseyhightower/envconfig v1.4.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect

12
go.sum
View File

@ -383,6 +383,10 @@ github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLf
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jackmordaunt/icns v0.0.0-20181231085925-4f16af745526/go.mod h1:UQkeMHVoNcyXYq9otUupF7/h/2tmHlhrS2zw7ZVvUqc= github.com/jackmordaunt/icns v0.0.0-20181231085925-4f16af745526/go.mod h1:UQkeMHVoNcyXYq9otUupF7/h/2tmHlhrS2zw7ZVvUqc=
github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/josephspurrier/goversioninfo v0.0.0-20200309025242-14b0ab84c6ca/go.mod h1:eJTEwMjXb7kZ633hO3Ln9mBUCOjX2+FlTljvpl9SYdE= github.com/josephspurrier/goversioninfo v0.0.0-20200309025242-14b0ab84c6ca/go.mod h1:eJTEwMjXb7kZ633hO3Ln9mBUCOjX2+FlTljvpl9SYdE=
github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w= github.com/josharian/native v0.0.0-20200817173448-b6b71def0850/go.mod h1:7X/raswPFr05uY3HiLlYeyQntB6OO7E/d2Cu7qoaN2w=
@ -441,8 +445,8 @@ github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a/go.mod h1:M1qoD/MqPgTZIk0EWKB38wE28ACRfVcn+cU08jyArI0= github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a/go.mod h1:M1qoD/MqPgTZIk0EWKB38wE28ACRfVcn+cU08jyArI0=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM=
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
@ -1189,6 +1193,10 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/sqlite v1.5.3 h1:7/0dUgX28KAcopdfbRWWl68Rflh6osa4rDh+m51KL2g=
gorm.io/driver/sqlite v1.5.3/go.mod h1:qxAuCol+2r6PannQDpOP1FP6ag3mKi4esLnB/jHed+4=
gorm.io/gorm v1.25.4 h1:iyNd8fNAe8W9dvtlgeRI5zSVZPsq3OpcTu37cYcpCmw=
gorm.io/gorm v1.25.4/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g= gotest.tools/v3 v3.4.0/go.mod h1:CtbdzLSsqVhDgMtKsx03ird5YTGB3ar27v0u/yKBW5g=
gvisor.dev/gvisor v0.0.0-20221203005347-703fd9b7fbc0 h1:Wobr37noukisGxpKo5jAsLREcpj61RxrWYzD8uwveOY= gvisor.dev/gvisor v0.0.0-20221203005347-703fd9b7fbc0 h1:Wobr37noukisGxpKo5jAsLREcpj61RxrWYzD8uwveOY=

View File

@ -53,7 +53,7 @@ func startManagement(t *testing.T) (*grpc.Server, net.Listener) {
t.Fatal(err) t.Fatal(err)
} }
s := grpc.NewServer() s := grpc.NewServer()
store, err := mgmt.NewFileStore(config.Datadir, nil) store, err := mgmt.NewStoreFromJson(config.Datadir, nil)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }

View File

@ -126,7 +126,7 @@ var (
if err != nil { if err != nil {
return err return err
} }
store, err := server.NewFileStore(config.Datadir, appMetrics) store, err := server.NewStore(config.StoreKind, config.Datadir, appMetrics)
if err != nil { if err != nil {
return fmt.Errorf("failed creating Store: %s: %v", config.Datadir, err) return fmt.Errorf("failed creating Store: %s: %v", config.Datadir, err)
} }

View File

@ -0,0 +1,66 @@
package cmd
import (
"errors"
"flag"
"fmt"
"os"
"path"
"github.com/netbirdio/netbird/management/server"
"github.com/netbirdio/netbird/util"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var shortDown = "Rollback SQLite store to JSON file store. Please make a backup of the SQLite file before running this command."
var downCmd = &cobra.Command{
Use: "downgrade [--datadir directory] [--log-file console]",
Aliases: []string{"down"},
Short: shortDown,
Long: shortDown +
"\n\n" +
"This command reads the content of {datadir}/store.db and migrates it to {datadir}/store.json that can be used by File store driver.",
RunE: func(cmd *cobra.Command, args []string) error {
flag.Parse()
err := util.InitLog(logLevel, logFile)
if err != nil {
return fmt.Errorf("failed initializing log %v", err)
}
sqliteStorePath := path.Join(mgmtDataDir, "store.db")
if _, err := os.Stat(sqliteStorePath); errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("%s doesn't exist, couldn't continue the operation", sqliteStorePath)
}
fileStorePath := path.Join(mgmtDataDir, "store.json")
if _, err := os.Stat(fileStorePath); err == nil {
return fmt.Errorf("%s already exists, couldn't continue the operation", fileStorePath)
}
sqlstore, err := server.NewSqliteStore(mgmtDataDir, nil)
if err != nil {
return fmt.Errorf("failed creating file store: %s: %v", mgmtDataDir, err)
}
sqliteStoreAccounts := len(sqlstore.GetAllAccounts())
log.Infof("%d account will be migrated from sqlite store %s to file store %s",
sqliteStoreAccounts, sqliteStorePath, fileStorePath)
store, err := server.NewFilestoreFromSqliteStore(sqlstore, mgmtDataDir, nil)
if err != nil {
return fmt.Errorf("failed creating file store: %s: %v", mgmtDataDir, err)
}
fsStoreAccounts := len(store.GetAllAccounts())
if fsStoreAccounts != sqliteStoreAccounts {
return fmt.Errorf("failed to migrate accounts from sqlite to file[]. Expected accounts: %d, got: %d",
sqliteStoreAccounts, fsStoreAccounts)
}
log.Info("Migration finished successfully")
return nil
},
}

View File

@ -0,0 +1,66 @@
package cmd
import (
"errors"
"flag"
"fmt"
"os"
"path"
"github.com/netbirdio/netbird/management/server"
"github.com/netbirdio/netbird/util"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var shortUp = "Migrate JSON file store to SQLite store. Please make a backup of the JSON file before running this command."
var upCmd = &cobra.Command{
Use: "upgrade [--datadir directory] [--log-file console]",
Aliases: []string{"up"},
Short: shortUp,
Long: shortUp +
"\n\n" +
"This command reads the content of {datadir}/store.json and migrates it to {datadir}/store.db that can be used by SQLite store driver.",
RunE: func(cmd *cobra.Command, args []string) error {
flag.Parse()
err := util.InitLog(logLevel, logFile)
if err != nil {
return fmt.Errorf("failed initializing log %v", err)
}
fileStorePath := path.Join(mgmtDataDir, "store.json")
if _, err := os.Stat(fileStorePath); errors.Is(err, os.ErrNotExist) {
return fmt.Errorf("%s doesn't exist, couldn't continue the operation", fileStorePath)
}
sqlStorePath := path.Join(mgmtDataDir, "store.db")
if _, err := os.Stat(sqlStorePath); err == nil {
return fmt.Errorf("%s already exists, couldn't continue the operation", sqlStorePath)
}
fstore, err := server.NewFileStore(mgmtDataDir, nil)
if err != nil {
return fmt.Errorf("failed creating file store: %s: %v", mgmtDataDir, err)
}
fsStoreAccounts := len(fstore.GetAllAccounts())
log.Infof("%d account will be migrated from file store %s to sqlite store %s",
fsStoreAccounts, fileStorePath, sqlStorePath)
store, err := server.NewSqliteStoreFromFileStore(fstore, mgmtDataDir, nil)
if err != nil {
return fmt.Errorf("failed creating file store: %s: %v", mgmtDataDir, err)
}
sqliteStoreAccounts := len(store.GetAllAccounts())
if fsStoreAccounts != sqliteStoreAccounts {
return fmt.Errorf("failed to migrate accounts from file to sqlite. Expected accounts: %d, got: %d",
fsStoreAccounts, sqliteStoreAccounts)
}
log.Info("Migration finished successfully")
return nil
},
}

View File

@ -34,6 +34,12 @@ var (
SilenceUsage: true, SilenceUsage: true,
} }
migrationCmd = &cobra.Command{
Use: "sqlite-migration",
Short: "Contains sub-commands to perform JSON file store to SQLite store migration and rollback",
Long: "",
SilenceUsage: true,
}
// Execution control channel for stopCh signal // Execution control channel for stopCh signal
stopCh chan int stopCh chan int
) )
@ -63,6 +69,14 @@ func init() {
rootCmd.PersistentFlags().StringVar(&logLevel, "log-level", "info", "") rootCmd.PersistentFlags().StringVar(&logLevel, "log-level", "info", "")
rootCmd.PersistentFlags().StringVar(&logFile, "log-file", defaultLogFile, "sets Netbird log path. If console is specified the the log will be output to stdout") rootCmd.PersistentFlags().StringVar(&logFile, "log-file", defaultLogFile, "sets Netbird log path. If console is specified the the log will be output to stdout")
rootCmd.AddCommand(mgmtCmd) rootCmd.AddCommand(mgmtCmd)
migrationCmd.PersistentFlags().StringVar(&mgmtDataDir, "datadir", defaultMgmtDataDir, "server data directory location")
migrationCmd.MarkFlagRequired("datadir") //nolint
migrationCmd.AddCommand(upCmd)
migrationCmd.AddCommand(downCmd)
rootCmd.AddCommand(migrationCmd)
} }
// SetupCloseHandler handles SIGTERM signal and exits with success // SetupCloseHandler handles SIGTERM signal and exits with success

View File

@ -165,24 +165,33 @@ func (s *Settings) Copy() *Settings {
// Account represents a unique account of the system // Account represents a unique account of the system
type Account struct { type Account struct {
Id string // we have to name column to aid as it collides with Network.Id when work with associations
Id string `gorm:"primaryKey"`
// User.Id it was created by // User.Id it was created by
CreatedBy string CreatedBy string
Domain string Domain string `gorm:"index"`
DomainCategory string DomainCategory string
IsDomainPrimaryAccount bool IsDomainPrimaryAccount bool
SetupKeys map[string]*SetupKey SetupKeys map[string]*SetupKey `gorm:"-"`
Network *Network SetupKeysG []SetupKey `json:"-" gorm:"foreignKey:AccountID;references:id"`
Peers map[string]*Peer Network *Network `gorm:"embedded;embeddedPrefix:network_"`
Users map[string]*User Peers map[string]*Peer `gorm:"-"`
Groups map[string]*Group PeersG []Peer `json:"-" gorm:"foreignKey:AccountID;references:id"`
Rules map[string]*Rule Users map[string]*User `gorm:"-"`
Policies []*Policy UsersG []User `json:"-" gorm:"foreignKey:AccountID;references:id"`
Routes map[string]*route.Route Groups map[string]*Group `gorm:"-"`
NameServerGroups map[string]*nbdns.NameServerGroup GroupsG []Group `json:"-" gorm:"foreignKey:AccountID;references:id"`
DNSSettings DNSSettings Rules map[string]*Rule `gorm:"-"`
RulesG []Rule `json:"-" gorm:"foreignKey:AccountID;references:id"`
Policies []*Policy `gorm:"foreignKey:AccountID;references:id"`
Routes map[string]*route.Route `gorm:"-"`
RoutesG []route.Route `json:"-" gorm:"foreignKey:AccountID;references:id"`
NameServerGroups map[string]*nbdns.NameServerGroup `gorm:"-"`
NameServerGroupsG []nbdns.NameServerGroup `json:"-" gorm:"foreignKey:AccountID;references:id"`
DNSSettings DNSSettings `gorm:"embedded;embeddedPrefix:dns_settings_"`
// Settings is a dictionary of Account settings // Settings is a dictionary of Account settings
Settings *Settings Settings *Settings `gorm:"embedded;embeddedPrefix:settings_"`
} }
type UserInfo struct { type UserInfo struct {

View File

@ -198,7 +198,7 @@ func TestAccount_GetPeerNetworkMap(t *testing.T) {
netIP := net.IP{100, 64, 0, 0} netIP := net.IP{100, 64, 0, 0}
netMask := net.IPMask{255, 255, 0, 0} netMask := net.IPMask{255, 255, 0, 0}
network := &Network{ network := &Network{
Id: "network", Identifier: "network",
Net: net.IPNet{IP: netIP, Mask: netMask}, Net: net.IPNet{IP: netIP, Mask: netMask},
Dns: "netbird.selfhosted", Dns: "netbird.selfhosted",
Serial: 0, Serial: 0,
@ -476,7 +476,7 @@ func TestDefaultAccountManager_GetGroupsFromTheToken(t *testing.T) {
// as initAccount was created without account id we have to take the id after account initialization // as initAccount was created without account id we have to take the id after account initialization
// that happens inside the GetAccountByUserOrAccountID where the id is getting generated // that happens inside the GetAccountByUserOrAccountID where the id is getting generated
// it is important to set the id as it help to avoid creating additional account with empty Id and re-pointing indices to it // it is important to set the id as it help to avoid creating additional account with empty Id and re-pointing indices to it
initAccount.Id = acc.Id initAccount = acc
claims := jwtclaims.AuthorizationClaims{ claims := jwtclaims.AuthorizationClaims{
AccountId: accountID, // is empty as it is based on accountID right after initialization of initAccount AccountId: accountID, // is empty as it is based on accountID right after initialization of initAccount
@ -1025,7 +1025,6 @@ func TestAccountManager_NetworkUpdates(t *testing.T) {
wg.Wait() wg.Wait()
}) })
t.Run("delete peer update", func(t *testing.T) { t.Run("delete peer update", func(t *testing.T) {
wg.Add(1) wg.Add(1)
go func() { go func() {
@ -1309,7 +1308,7 @@ func TestAccount_Copy(t *testing.T) {
}, },
}, },
Network: &Network{ Network: &Network{
Id: "net1", Identifier: "net1",
}, },
Peers: map[string]*Peer{ Peers: map[string]*Peer{
"peer1": { "peer1": {
@ -1400,6 +1399,10 @@ func hasNilField(x interface{}) error {
rv := reflect.ValueOf(x) rv := reflect.ValueOf(x)
rv = rv.Elem() rv = rv.Elem()
for i := 0; i < rv.NumField(); i++ { for i := 0; i < rv.NumField(); i++ {
// skip gorm internal fields
if json, ok := rv.Type().Field(i).Tag.Lookup("json"); ok && json == "-" {
continue
}
if f := rv.Field(i); f.IsValid() { if f := rv.Field(i); f.IsValid() {
k := f.Kind() k := f.Kind()
switch k { switch k {
@ -2045,7 +2048,7 @@ func createManager(t *testing.T) (*DefaultAccountManager, error) {
func createStore(t *testing.T) (Store, error) { func createStore(t *testing.T) (Store, error) {
dataDir := t.TempDir() dataDir := t.TempDir()
store, err := NewFileStore(dataDir, nil) store, err := NewStoreFromJson(dataDir, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -45,6 +45,8 @@ type Config struct {
DeviceAuthorizationFlow *DeviceAuthorizationFlow DeviceAuthorizationFlow *DeviceAuthorizationFlow
PKCEAuthorizationFlow *PKCEAuthorizationFlow PKCEAuthorizationFlow *PKCEAuthorizationFlow
StoreKind StoreKind
} }
// GetAuthAudiences returns the audience from the http config and device authorization flow config // GetAuthAudiences returns the audience from the http config and device authorization flow config

View File

@ -20,7 +20,7 @@ type lookupMap map[string]struct{}
// DNSSettings defines dns settings at the account level // DNSSettings defines dns settings at the account level
type DNSSettings struct { type DNSSettings struct {
// DisabledManagementGroups groups whose DNS management is disabled // DisabledManagementGroups groups whose DNS management is disabled
DisabledManagementGroups []string DisabledManagementGroups []string `gorm:"serializer:json"`
} }
// Copy returns a copy of the DNS settings // Copy returns a copy of the DNS settings

View File

@ -196,7 +196,7 @@ func createDNSManager(t *testing.T) (*DefaultAccountManager, error) {
func createDNSStore(t *testing.T) (Store, error) { func createDNSStore(t *testing.T) (Store, error) {
dataDir := t.TempDir() dataDir := t.TempDir()
store, err := NewFileStore(dataDir, nil) store, err := NewStoreFromJson(dataDir, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -54,6 +54,25 @@ func NewFileStore(dataDir string, metrics telemetry.AppMetrics) (*FileStore, err
return fs, nil return fs, nil
} }
// NewFilestoreFromSqliteStore restores a store from Sqlite and stores to Filestore json in the file located in datadir
func NewFilestoreFromSqliteStore(sqlitestore *SqliteStore, dataDir string, metrics telemetry.AppMetrics) (*FileStore, error) {
store, err := NewFileStore(dataDir, metrics)
if err != nil {
return nil, err
}
err = store.SaveInstallationID(sqlitestore.GetInstallationID())
if err != nil {
return nil, err
}
for _, account := range sqlitestore.GetAllAccounts() {
store.Accounts[account.Id] = account
}
return store, store.persist(store.storeFile)
}
// restore the state of the store from the file. // restore the state of the store from the file.
// Creates a new empty store file if doesn't exist // Creates a new empty store file if doesn't exist
func restore(file string) (*FileStore, error) { func restore(file string) (*FileStore, error) {
@ -595,3 +614,8 @@ func (s *FileStore) Close() error {
return s.persist(s.storeFile) return s.persist(s.storeFile)
} }
// GetStoreKind returns FileStoreKind
func (s *FileStore) GetStoreKind() StoreKind {
return FileStoreKind
}

View File

@ -387,7 +387,7 @@ func TestFileStore_GetAccount(t *testing.T) {
assert.Equal(t, expected.DomainCategory, account.DomainCategory) assert.Equal(t, expected.DomainCategory, account.DomainCategory)
assert.Equal(t, expected.Domain, account.Domain) assert.Equal(t, expected.Domain, account.Domain)
assert.Equal(t, expected.CreatedBy, account.CreatedBy) assert.Equal(t, expected.CreatedBy, account.CreatedBy)
assert.Equal(t, expected.Network.Id, account.Network.Id) assert.Equal(t, expected.Network.Identifier, account.Network.Identifier)
assert.Len(t, account.Peers, len(expected.Peers)) assert.Len(t, account.Peers, len(expected.Peers))
assert.Len(t, account.Users, len(expected.Users)) assert.Len(t, account.Users, len(expected.Users))
assert.Len(t, account.SetupKeys, len(expected.SetupKeys)) assert.Len(t, account.SetupKeys, len(expected.SetupKeys))

View File

@ -23,6 +23,9 @@ type Group struct {
// ID of the group // ID of the group
ID string ID string
// AccountID is a reference to Account that this object belongs
AccountID string `json:"-" gorm:"index"`
// Name visible in the UI // Name visible in the UI
Name string Name string
@ -30,7 +33,7 @@ type Group struct {
Issued string Issued string
// Peers list of the group // Peers list of the group
Peers []string Peers []string `gorm:"serializer:json"`
} }
// EventMeta returns activity event meta related to the group // EventMeta returns activity event meta related to the group

View File

@ -80,6 +80,7 @@ func initTestGroupAccount(am *DefaultAccountManager) (*Account, error) {
groupForRoute := &Group{ groupForRoute := &Group{
"grp-for-route", "grp-for-route",
"account-id",
"Group for route", "Group for route",
GroupIssuedAPI, GroupIssuedAPI,
make([]string, 0), make([]string, 0),
@ -87,6 +88,7 @@ func initTestGroupAccount(am *DefaultAccountManager) (*Account, error) {
groupForNameServerGroups := &Group{ groupForNameServerGroups := &Group{
"grp-for-name-server-grp", "grp-for-name-server-grp",
"account-id",
"Group for name server groups", "Group for name server groups",
GroupIssuedAPI, GroupIssuedAPI,
make([]string, 0), make([]string, 0),
@ -94,6 +96,7 @@ func initTestGroupAccount(am *DefaultAccountManager) (*Account, error) {
groupForPolicies := &Group{ groupForPolicies := &Group{
"grp-for-policies", "grp-for-policies",
"account-id",
"Group for policies", "Group for policies",
GroupIssuedAPI, GroupIssuedAPI,
make([]string, 0), make([]string, 0),
@ -101,6 +104,7 @@ func initTestGroupAccount(am *DefaultAccountManager) (*Account, error) {
groupForSetupKeys := &Group{ groupForSetupKeys := &Group{
"grp-for-keys", "grp-for-keys",
"account-id",
"Group for setup keys", "Group for setup keys",
GroupIssuedAPI, GroupIssuedAPI,
make([]string, 0), make([]string, 0),
@ -108,6 +112,7 @@ func initTestGroupAccount(am *DefaultAccountManager) (*Account, error) {
groupForUsers := &Group{ groupForUsers := &Group{
"grp-for-users", "grp-for-users",
"account-id",
"Group for users", "Group for users",
GroupIssuedAPI, GroupIssuedAPI,
make([]string, 0), make([]string, 0),

View File

@ -405,7 +405,7 @@ func startManagement(t *testing.T, config *Config) (*grpc.Server, string, error)
return nil, "", err return nil, "", err
} }
s := grpc.NewServer(grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp)) s := grpc.NewServer(grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp))
store, err := NewFileStore(config.Datadir, nil) store, err := NewStoreFromJson(config.Datadir, nil)
if err != nil { if err != nil {
return nil, "", err return nil, "", err
} }

View File

@ -393,6 +393,7 @@ var _ = Describe("Management service", func() {
ipChannel := make(chan string, 20) ipChannel := make(chan string, 20)
for i := 0; i < initialPeers; i++ { for i := 0; i < initialPeers; i++ {
go func() { go func() {
defer GinkgoRecover()
key, _ := wgtypes.GenerateKey() key, _ := wgtypes.GenerateKey()
loginPeerWithValidSetupKey(serverPubKey, key, client) loginPeerWithValidSetupKey(serverPubKey, key, client)
encryptedBytes, err := encryption.EncryptMessage(serverPubKey, key, &mgmtProto.SyncRequest{}) encryptedBytes, err := encryption.EncryptMessage(serverPubKey, key, &mgmtProto.SyncRequest{})
@ -496,7 +497,7 @@ func startServer(config *server.Config) (*grpc.Server, net.Listener) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
s := grpc.NewServer() s := grpc.NewServer()
store, err := server.NewFileStore(config.Datadir, nil) store, err := server.NewStoreFromJson(config.Datadir, nil)
if err != nil { if err != nil {
log.Fatalf("failed creating a store: %s: %v", config.Datadir, err) log.Fatalf("failed creating a store: %s: %v", config.Datadir, err)
} }

View File

@ -48,6 +48,7 @@ type properties map[string]interface{}
// DataSource metric data source // DataSource metric data source
type DataSource interface { type DataSource interface {
GetAllAccounts() []*server.Account GetAllAccounts() []*server.Account
GetStoreKind() server.StoreKind
} }
// ConnManager peer connection manager that holds state for current active connections // ConnManager peer connection manager that holds state for current active connections
@ -295,6 +296,7 @@ func (w *Worker) generateProperties() properties {
metricsProperties["max_active_peer_version"] = maxActivePeerVersion metricsProperties["max_active_peer_version"] = maxActivePeerVersion
metricsProperties["ui_clients"] = uiClient metricsProperties["ui_clients"] = uiClient
metricsProperties["idp_manager"] = w.idpManager metricsProperties["idp_manager"] = w.idpManager
metricsProperties["store_kind"] = w.dataSource.GetStoreKind()
for protocol, count := range rulesProtocol { for protocol, count := range rulesProtocol {
metricsProperties["rules_protocol_"+protocol] = count metricsProperties["rules_protocol_"+protocol] = count

View File

@ -151,6 +151,11 @@ func (mockDatasource) GetAllAccounts() []*server.Account {
} }
} }
// GetStoreKind returns FileStoreKind
func (mockDatasource) GetStoreKind() server.StoreKind {
return server.FileStoreKind
}
// TestGenerateProperties tests and validate the properties generation by using the mockDatasource for the Worker.generateProperties // TestGenerateProperties tests and validate the properties generation by using the mockDatasource for the Worker.generateProperties
func TestGenerateProperties(t *testing.T) { func TestGenerateProperties(t *testing.T) {
ds := mockDatasource{} ds := mockDatasource{}
@ -236,4 +241,8 @@ func TestGenerateProperties(t *testing.T) {
if properties["user_peers"] != 2 { if properties["user_peers"] != 2 {
t.Errorf("expected 2 user_peers, got %d", properties["user_peers"]) t.Errorf("expected 2 user_peers, got %d", properties["user_peers"])
} }
if properties["store_kind"] != server.FileStoreKind {
t.Errorf("expected JsonFile, got %s", properties["store_kind"])
}
} }

View File

@ -749,7 +749,7 @@ func createNSManager(t *testing.T) (*DefaultAccountManager, error) {
func createNSStore(t *testing.T) (Store, error) { func createNSStore(t *testing.T) (Store, error) {
dataDir := t.TempDir() dataDir := t.TempDir()
store, err := NewFileStore(dataDir, nil) store, err := NewStoreFromJson(dataDir, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -34,14 +34,14 @@ type NetworkMap struct {
} }
type Network struct { type Network struct {
Id string Identifier string `json:"id"`
Net net.IPNet Net net.IPNet `gorm:"serializer:gob"`
Dns string Dns string
// Serial is an ID that increments by 1 when any change to the network happened (e.g. new peer has been added). // Serial is an ID that increments by 1 when any change to the network happened (e.g. new peer has been added).
// Used to synchronize state to the client apps. // Used to synchronize state to the client apps.
Serial uint64 Serial uint64
mu sync.Mutex `json:"-"` mu sync.Mutex `json:"-" gorm:"-"`
} }
// NewNetwork creates a new Network initializing it with a Serial=0 // NewNetwork creates a new Network initializing it with a Serial=0
@ -56,7 +56,7 @@ func NewNetwork() *Network {
intn := r.Intn(len(sub)) intn := r.Intn(len(sub))
return &Network{ return &Network{
Id: xid.New().String(), Identifier: xid.New().String(),
Net: sub[intn].IPNet, Net: sub[intn].IPNet,
Dns: "", Dns: "",
Serial: 0} Serial: 0}
@ -78,7 +78,7 @@ func (n *Network) CurrentSerial() uint64 {
func (n *Network) Copy() *Network { func (n *Network) Copy() *Network {
return &Network{ return &Network{
Id: n.Id, Identifier: n.Identifier,
Net: n.Net, Net: n.Net,
Dns: n.Dns, Dns: n.Dns,
Serial: n.Serial, Serial: n.Serial,

View File

@ -72,22 +72,24 @@ type PeerLogin struct {
// The Peer is a WireGuard peer identified by a public key // The Peer is a WireGuard peer identified by a public key
type Peer struct { type Peer struct {
// ID is an internal ID of the peer // ID is an internal ID of the peer
ID string ID string `gorm:"primaryKey"`
// AccountID is a reference to Account that this object belongs
AccountID string `json:"-" gorm:"index;uniqueIndex:idx_peers_account_id_ip"`
// WireGuard public key // WireGuard public key
Key string Key string `gorm:"index"`
// A setup key this peer was registered with // A setup key this peer was registered with
SetupKey string SetupKey string
// IP address of the Peer // IP address of the Peer
IP net.IP IP net.IP `gorm:"uniqueIndex:idx_peers_account_id_ip"`
// Meta is a Peer system meta data // Meta is a Peer system meta data
Meta PeerSystemMeta Meta PeerSystemMeta `gorm:"embedded;embeddedPrefix:meta_"`
// Name is peer's name (machine name) // Name is peer's name (machine name)
Name string Name string
// DNSLabel is the parsed peer name for domain resolution. It is used to form an FQDN by appending the account's // DNSLabel is the parsed peer name for domain resolution. It is used to form an FQDN by appending the account's
// domain to the peer label. e.g. peer-dns-label.netbird.cloud // domain to the peer label. e.g. peer-dns-label.netbird.cloud
DNSLabel string DNSLabel string
// Status peer's management connection status // Status peer's management connection status
Status *PeerStatus Status *PeerStatus `gorm:"embedded;embeddedPrefix:peer_status_"`
// The user ID that registered the peer // The user ID that registered the peer
UserID string UserID string
// SSHKey is a public SSH key of the peer // SSHKey is a public SSH key of the peer
@ -116,6 +118,7 @@ func (p *Peer) Copy() *Peer {
} }
return &Peer{ return &Peer{
ID: p.ID, ID: p.ID,
AccountID: p.AccountID,
Key: p.Key, Key: p.Key,
SetupKey: p.SetupKey, SetupKey: p.SetupKey,
IP: p.IP, IP: p.IP,

View File

@ -369,8 +369,8 @@ func TestAccountManager_GetPeerNetwork(t *testing.T) {
return return
} }
if account.Network.Id != network.Id { if account.Network.Identifier != network.Identifier {
t.Errorf("expecting Account Networks ID to be equal, got %s expected %s", network.Id, account.Network.Id) t.Errorf("expecting Account Networks ID to be equal, got %s expected %s", network.Identifier, account.Network.Identifier)
} }
} }

View File

@ -26,7 +26,9 @@ const (
// PersonalAccessToken holds all information about a PAT including a hashed version of it for verification // PersonalAccessToken holds all information about a PAT including a hashed version of it for verification
type PersonalAccessToken struct { type PersonalAccessToken struct {
ID string ID string `gorm:"primaryKey"`
// User is a reference to Account that this object belongs
UserID string `gorm:"index"`
Name string Name string
HashedToken string HashedToken string
ExpirationDate time.Time ExpirationDate time.Time

View File

@ -63,7 +63,10 @@ type PolicyUpdateOperation struct {
// PolicyRule is the metadata of the policy // PolicyRule is the metadata of the policy
type PolicyRule struct { type PolicyRule struct {
// ID of the policy rule // ID of the policy rule
ID string ID string `gorm:"primaryKey"`
// PolicyID is a reference to Policy that this object belongs
PolicyID string `json:"-" gorm:"index"`
// Name of the rule visible in the UI // Name of the rule visible in the UI
Name string Name string
@ -78,10 +81,10 @@ type PolicyRule struct {
Action PolicyTrafficActionType Action PolicyTrafficActionType
// Destinations policy destination groups // Destinations policy destination groups
Destinations []string Destinations []string `gorm:"serializer:json"`
// Sources policy source groups // Sources policy source groups
Sources []string Sources []string `gorm:"serializer:json"`
// Bidirectional define if the rule is applicable in both directions, sources, and destinations // Bidirectional define if the rule is applicable in both directions, sources, and destinations
Bidirectional bool Bidirectional bool
@ -90,7 +93,7 @@ type PolicyRule struct {
Protocol PolicyRuleProtocolType Protocol PolicyRuleProtocolType
// Ports or it ranges list // Ports or it ranges list
Ports []string Ports []string `gorm:"serializer:json"`
} }
// Copy returns a copy of a policy rule // Copy returns a copy of a policy rule
@ -128,8 +131,11 @@ func (pm *PolicyRule) ToRule() *Rule {
// Policy of the Rego query // Policy of the Rego query
type Policy struct { type Policy struct {
// ID of the policy // ID of the policy'
ID string ID string `gorm:"primaryKey"`
// AccountID is a reference to Account that this object belongs
AccountID string `json:"-" gorm:"index"`
// Name of the Policy // Name of the Policy
Name string Name string
@ -141,7 +147,7 @@ type Policy struct {
Enabled bool Enabled bool
// Rules of the policy // Rules of the policy
Rules []*PolicyRule Rules []*PolicyRule `gorm:"foreignKey:PolicyID;references:id"`
} }
// Copy returns a copy of the policy. // Copy returns a copy of the policy.
@ -201,7 +207,6 @@ type FirewallRule struct {
// This function returns the list of peers and firewall rules that are applicable to a given peer. // This function returns the list of peers and firewall rules that are applicable to a given peer.
func (a *Account) getPeerConnectionResources(peerID string) ([]*Peer, []*FirewallRule) { func (a *Account) getPeerConnectionResources(peerID string) ([]*Peer, []*FirewallRule) {
generateResources, getAccumulatedResources := a.connResourcesGenerator() generateResources, getAccumulatedResources := a.connResourcesGenerator()
for _, policy := range a.Policies { for _, policy := range a.Policies {
if !policy.Enabled { if !policy.Enabled {
continue continue

View File

@ -1017,7 +1017,7 @@ func createRouterManager(t *testing.T) (*DefaultAccountManager, error) {
func createRouterStore(t *testing.T) (Store, error) { func createRouterStore(t *testing.T) (Store, error) {
dataDir := t.TempDir() dataDir := t.TempDir()
store, err := NewFileStore(dataDir, nil) store, err := NewStoreFromJson(dataDir, nil)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -25,6 +25,9 @@ type Rule struct {
// ID of the rule // ID of the rule
ID string ID string
// AccountID is a reference to Account that this object belongs
AccountID string `json:"-" gorm:"index"`
// Name of the rule visible in the UI // Name of the rule visible in the UI
Name string Name string
@ -35,10 +38,10 @@ type Rule struct {
Disabled bool Disabled bool
// Source list of groups IDs of peers // Source list of groups IDs of peers
Source []string Source []string `gorm:"serializer:json"`
// Destination list of groups IDs of peers // Destination list of groups IDs of peers
Destination []string Destination []string `gorm:"serializer:json"`
// Flow of the traffic allowed by the rule // Flow of the traffic allowed by the rule
Flow TrafficFlowType Flow TrafficFlowType

View File

@ -69,12 +69,14 @@ type SetupKeyType string
// SetupKey represents a pre-authorized key used to register machines (peers) // SetupKey represents a pre-authorized key used to register machines (peers)
type SetupKey struct { type SetupKey struct {
Id string Id string
// AccountID is a reference to Account that this object belongs
AccountID string `json:"-" gorm:"index"`
Key string Key string
Name string Name string
Type SetupKeyType Type SetupKeyType
CreatedAt time.Time CreatedAt time.Time
ExpiresAt time.Time ExpiresAt time.Time
UpdatedAt time.Time UpdatedAt time.Time `gorm:"autoUpdateTime:false"`
// Revoked indicates whether the key was revoked or not (we don't remove them for tracking purposes) // Revoked indicates whether the key was revoked or not (we don't remove them for tracking purposes)
Revoked bool Revoked bool
// UsedTimes indicates how many times the key was used // UsedTimes indicates how many times the key was used
@ -82,7 +84,7 @@ type SetupKey struct {
// LastUsed last time the key was used for peer registration // LastUsed last time the key was used for peer registration
LastUsed time.Time LastUsed time.Time
// AutoGroups is a list of Group IDs that are auto assigned to a Peer when it uses this key to register // AutoGroups is a list of Group IDs that are auto assigned to a Peer when it uses this key to register
AutoGroups []string AutoGroups []string `gorm:"serializer:json"`
// UsageLimit indicates the number of times this key can be used to enroll a machine. // UsageLimit indicates the number of times this key can be used to enroll a machine.
// The value of 0 indicates the unlimited usage. // The value of 0 indicates the unlimited usage.
UsageLimit int UsageLimit int
@ -99,6 +101,7 @@ func (key *SetupKey) Copy() *SetupKey {
} }
return &SetupKey{ return &SetupKey{
Id: key.Id, Id: key.Id,
AccountID: key.AccountID,
Key: key.Key, Key: key.Key,
Name: key.Name, Name: key.Name,
Type: key.Type, Type: key.Type,

View File

@ -0,0 +1,457 @@
package server
import (
"path/filepath"
"runtime"
"strings"
"sync"
"time"
nbdns "github.com/netbirdio/netbird/dns"
"github.com/netbirdio/netbird/management/server/status"
"github.com/netbirdio/netbird/management/server/telemetry"
"github.com/netbirdio/netbird/route"
log "github.com/sirupsen/logrus"
"gorm.io/driver/sqlite"
"gorm.io/gorm"
"gorm.io/gorm/clause"
"gorm.io/gorm/logger"
)
// SqliteStore represents an account storage backed by a Sqlite DB persisted to disk
type SqliteStore struct {
db *gorm.DB
storeFile string
accountLocks sync.Map
globalAccountLock sync.Mutex
metrics telemetry.AppMetrics
installationPK int
}
type installation struct {
ID uint `gorm:"primaryKey"`
InstallationIDValue string
}
// NewSqliteStore restores a store from the file located in the datadir
func NewSqliteStore(dataDir string, metrics telemetry.AppMetrics) (*SqliteStore, error) {
storeStr := "store.db?cache=shared"
if runtime.GOOS == "windows" {
// Vo avoid `The process cannot access the file because it is being used by another process` on Windows
storeStr = "store.db"
}
file := filepath.Join(dataDir, storeStr)
db, err := gorm.Open(sqlite.Open(file), &gorm.Config{
Logger: logger.Default.LogMode(logger.Silent),
PrepareStmt: true,
})
if err != nil {
return nil, err
}
sql, err := db.DB()
if err != nil {
return nil, err
}
conns := runtime.NumCPU()
sql.SetMaxOpenConns(conns) // TODO: make it configurable
err = db.AutoMigrate(
&SetupKey{}, &Peer{}, &User{}, &PersonalAccessToken{}, &Group{}, &Rule{},
&Account{}, &Policy{}, &PolicyRule{}, &route.Route{}, &nbdns.NameServerGroup{},
&installation{},
)
if err != nil {
return nil, err
}
return &SqliteStore{db: db, storeFile: file, metrics: metrics, installationPK: 1}, nil
}
// NewSqliteStoreFromFileStore restores a store from FileStore and stores SQLite DB in the file located in datadir
func NewSqliteStoreFromFileStore(filestore *FileStore, dataDir string, metrics telemetry.AppMetrics) (*SqliteStore, error) {
store, err := NewSqliteStore(dataDir, metrics)
if err != nil {
return nil, err
}
err = store.SaveInstallationID(filestore.InstallationID)
if err != nil {
return nil, err
}
for _, account := range filestore.GetAllAccounts() {
err := store.SaveAccount(account)
if err != nil {
return nil, err
}
}
return store, nil
}
// AcquireGlobalLock acquires global lock across all the accounts and returns a function that releases the lock
func (s *SqliteStore) AcquireGlobalLock() (unlock func()) {
log.Debugf("acquiring global lock")
start := time.Now()
s.globalAccountLock.Lock()
unlock = func() {
s.globalAccountLock.Unlock()
log.Debugf("released global lock in %v", time.Since(start))
}
took := time.Since(start)
log.Debugf("took %v to acquire global lock", took)
if s.metrics != nil {
s.metrics.StoreMetrics().CountGlobalLockAcquisitionDuration(took)
}
return unlock
}
func (s *SqliteStore) AcquireAccountLock(accountID string) (unlock func()) {
log.Debugf("acquiring lock for account %s", accountID)
start := time.Now()
value, _ := s.accountLocks.LoadOrStore(accountID, &sync.Mutex{})
mtx := value.(*sync.Mutex)
mtx.Lock()
unlock = func() {
mtx.Unlock()
log.Debugf("released lock for account %s in %v", accountID, time.Since(start))
}
return unlock
}
func (s *SqliteStore) SaveAccount(account *Account) error {
start := time.Now()
for _, key := range account.SetupKeys {
account.SetupKeysG = append(account.SetupKeysG, *key)
}
for id, peer := range account.Peers {
peer.ID = id
account.PeersG = append(account.PeersG, *peer)
}
for id, user := range account.Users {
user.Id = id
for id, pat := range user.PATs {
pat.ID = id
user.PATsG = append(user.PATsG, *pat)
}
account.UsersG = append(account.UsersG, *user)
}
for id, group := range account.Groups {
group.ID = id
account.GroupsG = append(account.GroupsG, *group)
}
for id, rule := range account.Rules {
rule.ID = id
account.RulesG = append(account.RulesG, *rule)
}
for id, route := range account.Routes {
route.ID = id
account.RoutesG = append(account.RoutesG, *route)
}
for id, ns := range account.NameServerGroups {
ns.ID = id
account.NameServerGroupsG = append(account.NameServerGroupsG, *ns)
}
err := s.db.Transaction(func(tx *gorm.DB) error {
result := tx.Select(clause.Associations).Delete(account.Policies, "account_id = ?", account.Id)
if result.Error != nil {
return result.Error
}
result = tx.Select(clause.Associations).Delete(account.UsersG, "account_id = ?", account.Id)
if result.Error != nil {
return result.Error
}
result = tx.Select(clause.Associations).Delete(account)
if result.Error != nil {
return result.Error
}
result = tx.
Session(&gorm.Session{FullSaveAssociations: true}).
Clauses(clause.OnConflict{UpdateAll: true}).Create(account)
if result.Error != nil {
return result.Error
}
return nil
})
took := time.Since(start)
if s.metrics != nil {
s.metrics.StoreMetrics().CountPersistenceDuration(took)
}
log.Debugf("took %d ms to persist an account to the SQLite", took.Milliseconds())
return err
}
func (s *SqliteStore) SaveInstallationID(ID string) error {
installation := installation{InstallationIDValue: ID}
installation.ID = uint(s.installationPK)
return s.db.Clauses(clause.OnConflict{UpdateAll: true}).Create(&installation).Error
}
func (s *SqliteStore) GetInstallationID() string {
var installation installation
if result := s.db.First(&installation, "id = ?", s.installationPK); result.Error != nil {
return ""
}
return installation.InstallationIDValue
}
func (s *SqliteStore) SavePeerStatus(accountID, peerID string, peerStatus PeerStatus) error {
var peer Peer
result := s.db.First(&peer, "account_id = ? and id = ?", accountID, peerID)
if result.Error != nil {
return status.Errorf(status.NotFound, "peer %s not found", peerID)
}
peer.Status = &peerStatus
return s.db.Save(peer).Error
}
// DeleteHashedPAT2TokenIDIndex is noop in Sqlite
func (s *SqliteStore) DeleteHashedPAT2TokenIDIndex(hashedToken string) error {
return nil
}
// DeleteTokenID2UserIDIndex is noop in Sqlite
func (s *SqliteStore) DeleteTokenID2UserIDIndex(tokenID string) error {
return nil
}
func (s *SqliteStore) GetAccountByPrivateDomain(domain string) (*Account, error) {
var account Account
result := s.db.First(&account, "domain = ?", strings.ToLower(domain))
if result.Error != nil {
return nil, status.Errorf(status.NotFound, "account not found: provided domain is not registered or is not private")
}
// TODO: rework to not call GetAccount
return s.GetAccount(account.Id)
}
func (s *SqliteStore) GetAccountBySetupKey(setupKey string) (*Account, error) {
var key SetupKey
result := s.db.Select("account_id").First(&key, "key = ?", strings.ToUpper(setupKey))
if result.Error != nil {
return nil, status.Errorf(status.NotFound, "account not found: index lookup failed")
}
if key.AccountID == "" {
return nil, status.Errorf(status.NotFound, "account not found: index lookup failed")
}
return s.GetAccount(key.AccountID)
}
func (s *SqliteStore) GetTokenIDByHashedToken(hashedToken string) (string, error) {
var token PersonalAccessToken
result := s.db.First(&token, "hashed_token = ?", hashedToken)
if result.Error != nil {
return "", status.Errorf(status.NotFound, "account not found: index lookup failed")
}
return token.ID, nil
}
func (s *SqliteStore) GetUserByTokenID(tokenID string) (*User, error) {
var token PersonalAccessToken
result := s.db.First(&token, "id = ?", tokenID)
if result.Error != nil {
return nil, status.Errorf(status.NotFound, "account not found: index lookup failed")
}
if token.UserID == "" {
return nil, status.Errorf(status.NotFound, "account not found: index lookup failed")
}
var user User
result = s.db.Preload("PATsG").First(&user, "id = ?", token.UserID)
if result.Error != nil {
return nil, status.Errorf(status.NotFound, "account not found: index lookup failed")
}
user.PATs = make(map[string]*PersonalAccessToken, len(user.PATsG))
for _, pat := range user.PATsG {
user.PATs[pat.ID] = &pat
}
return &user, nil
}
func (s *SqliteStore) GetAllAccounts() (all []*Account) {
var accounts []Account
result := s.db.Find(&accounts)
if result.Error != nil {
return all
}
for _, account := range accounts {
if acc, err := s.GetAccount(account.Id); err == nil {
all = append(all, acc)
}
}
return all
}
func (s *SqliteStore) GetAccount(accountID string) (*Account, error) {
var account Account
result := s.db.Model(&account).
Preload("UsersG.PATsG"). // have to be specifies as this is nester reference
Preload(clause.Associations).
First(&account, "id = ?", accountID)
if result.Error != nil {
return nil, status.Errorf(status.NotFound, "account not found")
}
// we have to manually preload policy rules as it seems that gorm preloading doesn't do it for us
for i, policy := range account.Policies {
var rules []*PolicyRule
err := s.db.Model(&PolicyRule{}).Find(&rules, "policy_id = ?", policy.ID).Error
if err != nil {
return nil, status.Errorf(status.NotFound, "account not found")
}
account.Policies[i].Rules = rules
}
account.SetupKeys = make(map[string]*SetupKey, len(account.SetupKeysG))
for _, key := range account.SetupKeysG {
account.SetupKeys[key.Key] = key.Copy()
}
account.SetupKeysG = nil
account.Peers = make(map[string]*Peer, len(account.PeersG))
for _, peer := range account.PeersG {
account.Peers[peer.ID] = peer.Copy()
}
account.PeersG = nil
account.Users = make(map[string]*User, len(account.UsersG))
for _, user := range account.UsersG {
user.PATs = make(map[string]*PersonalAccessToken, len(user.PATs))
for _, pat := range user.PATsG {
user.PATs[pat.ID] = pat.Copy()
}
account.Users[user.Id] = user.Copy()
}
account.UsersG = nil
account.Groups = make(map[string]*Group, len(account.GroupsG))
for _, group := range account.GroupsG {
account.Groups[group.ID] = group.Copy()
}
account.GroupsG = nil
account.Rules = make(map[string]*Rule, len(account.RulesG))
for _, rule := range account.RulesG {
account.Rules[rule.ID] = rule.Copy()
}
account.RulesG = nil
account.Routes = make(map[string]*route.Route, len(account.RoutesG))
for _, route := range account.RoutesG {
account.Routes[route.ID] = route.Copy()
}
account.RoutesG = nil
account.NameServerGroups = make(map[string]*nbdns.NameServerGroup, len(account.NameServerGroupsG))
for _, ns := range account.NameServerGroupsG {
account.NameServerGroups[ns.ID] = ns.Copy()
}
account.NameServerGroupsG = nil
return &account, nil
}
func (s *SqliteStore) GetAccountByUser(userID string) (*Account, error) {
var user User
result := s.db.Select("account_id").First(&user, "id = ?", userID)
if result.Error != nil {
return nil, status.Errorf(status.NotFound, "account not found: index lookup failed")
}
if user.AccountID == "" {
return nil, status.Errorf(status.NotFound, "account not found: index lookup failed")
}
return s.GetAccount(user.AccountID)
}
func (s *SqliteStore) GetAccountByPeerID(peerID string) (*Account, error) {
var peer Peer
result := s.db.Select("account_id").First(&peer, "id = ?", peerID)
if result.Error != nil {
return nil, status.Errorf(status.NotFound, "account not found: index lookup failed")
}
if peer.AccountID == "" {
return nil, status.Errorf(status.NotFound, "account not found: index lookup failed")
}
return s.GetAccount(peer.AccountID)
}
func (s *SqliteStore) GetAccountByPeerPubKey(peerKey string) (*Account, error) {
var peer Peer
result := s.db.Select("account_id").First(&peer, "key = ?", peerKey)
if result.Error != nil {
return nil, status.Errorf(status.NotFound, "account not found: index lookup failed")
}
if peer.AccountID == "" {
return nil, status.Errorf(status.NotFound, "account not found: index lookup failed")
}
return s.GetAccount(peer.AccountID)
}
// SaveUserLastLogin stores the last login time for a user in DB.
func (s *SqliteStore) SaveUserLastLogin(accountID, userID string, lastLogin time.Time) error {
var peer Peer
result := s.db.First(&peer, "account_id = ? and user_id = ?", accountID, userID)
if result.Error != nil {
return status.Errorf(status.NotFound, "user %s not found", userID)
}
peer.LastLogin = lastLogin
return s.db.Save(peer).Error
}
// Close is noop in Sqlite
func (s *SqliteStore) Close() error {
return nil
}
// GetStoreKind returns SqliteStoreKind
func (s *SqliteStore) GetStoreKind() StoreKind {
return SqliteStoreKind
}

View File

@ -0,0 +1,229 @@
package server
import (
"fmt"
"net"
"path/filepath"
"runtime"
"testing"
"time"
"github.com/google/uuid"
"github.com/netbirdio/netbird/util"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSqlite_NewStore(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("The SQLite store is not properly supported by Windows yet")
}
store := newSqliteStore(t)
if len(store.GetAllAccounts()) != 0 {
t.Errorf("expected to create a new empty Accounts map when creating a new FileStore")
}
}
func TestSqlite_SaveAccount(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("The SQLite store is not properly supported by Windows yet")
}
store := newSqliteStore(t)
account := newAccountWithId("account_id", "testuser", "")
setupKey := GenerateDefaultSetupKey()
account.SetupKeys[setupKey.Key] = setupKey
account.Peers["testpeer"] = &Peer{
Key: "peerkey",
SetupKey: "peerkeysetupkey",
IP: net.IP{127, 0, 0, 1},
Meta: PeerSystemMeta{},
Name: "peer name",
Status: &PeerStatus{Connected: true, LastSeen: time.Now().UTC()},
}
err := store.SaveAccount(account)
require.NoError(t, err)
account2 := newAccountWithId("account_id2", "testuser2", "")
setupKey = GenerateDefaultSetupKey()
account2.SetupKeys[setupKey.Key] = setupKey
account2.Peers["testpeer2"] = &Peer{
Key: "peerkey2",
SetupKey: "peerkeysetupkey2",
IP: net.IP{127, 0, 0, 2},
Meta: PeerSystemMeta{},
Name: "peer name 2",
Status: &PeerStatus{Connected: true, LastSeen: time.Now().UTC()},
}
err = store.SaveAccount(account2)
require.NoError(t, err)
if len(store.GetAllAccounts()) != 2 {
t.Errorf("expecting 2 Accounts to be stored after SaveAccount()")
}
a, err := store.GetAccount(account.Id)
if a == nil {
t.Errorf("expecting Account to be stored after SaveAccount(): %v", err)
}
if a != nil && len(a.Policies) != 1 {
t.Errorf("expecting Account to have one policy stored after SaveAccount(), got %d", len(a.Policies))
}
if a != nil && len(a.Policies[0].Rules) != 1 {
t.Errorf("expecting Account to have one policy rule stored after SaveAccount(), got %d", len(a.Policies[0].Rules))
return
}
if a, err := store.GetAccountByPeerPubKey("peerkey"); a == nil {
t.Errorf("expecting PeerKeyID2AccountID index updated after SaveAccount(): %v", err)
}
if a, err := store.GetAccountByUser("testuser"); a == nil {
t.Errorf("expecting UserID2AccountID index updated after SaveAccount(): %v", err)
}
if a, err := store.GetAccountByPeerID("testpeer"); a == nil {
t.Errorf("expecting PeerID2AccountID index updated after SaveAccount(): %v", err)
}
if a, err := store.GetAccountBySetupKey(setupKey.Key); a == nil {
t.Errorf("expecting SetupKeyID2AccountID index updated after SaveAccount(): %v", err)
}
}
func TestSqlite_SavePeerStatus(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("The SQLite store is not properly supported by Windows yet")
}
store := newSqliteStoreFromFile(t, "testdata/store.json")
account, err := store.GetAccount("bf1c8084-ba50-4ce7-9439-34653001fc3b")
require.NoError(t, err)
// save status of non-existing peer
newStatus := PeerStatus{Connected: true, LastSeen: time.Now().UTC()}
err = store.SavePeerStatus(account.Id, "non-existing-peer", newStatus)
assert.Error(t, err)
// save new status of existing peer
account.Peers["testpeer"] = &Peer{
Key: "peerkey",
ID: "testpeer",
SetupKey: "peerkeysetupkey",
IP: net.IP{127, 0, 0, 1},
Meta: PeerSystemMeta{},
Name: "peer name",
Status: &PeerStatus{Connected: false, LastSeen: time.Now().UTC()},
}
err = store.SaveAccount(account)
require.NoError(t, err)
err = store.SavePeerStatus(account.Id, "testpeer", newStatus)
require.NoError(t, err)
account, err = store.GetAccount(account.Id)
require.NoError(t, err)
actual := account.Peers["testpeer"].Status
assert.Equal(t, newStatus, *actual)
}
func TestSqlite_TestGetAccountByPrivateDomain(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("The SQLite store is not properly supported by Windows yet")
}
store := newSqliteStoreFromFile(t, "testdata/store.json")
existingDomain := "test.com"
account, err := store.GetAccountByPrivateDomain(existingDomain)
require.NoError(t, err, "should found account")
require.Equal(t, existingDomain, account.Domain, "domains should match")
_, err = store.GetAccountByPrivateDomain("missing-domain.com")
require.Error(t, err, "should return error on domain lookup")
}
func TestSqlite_GetTokenIDByHashedToken(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("The SQLite store is not properly supported by Windows yet")
}
store := newSqliteStoreFromFile(t, "testdata/store.json")
hashed := "SoMeHaShEdToKeN"
id := "9dj38s35-63fb-11ec-90d6-0242ac120003"
token, err := store.GetTokenIDByHashedToken(hashed)
require.NoError(t, err)
require.Equal(t, id, token)
}
func TestSqlite_GetUserByTokenID(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("The SQLite store is not properly supported by Windows yet")
}
store := newSqliteStoreFromFile(t, "testdata/store.json")
id := "9dj38s35-63fb-11ec-90d6-0242ac120003"
user, err := store.GetUserByTokenID(id)
require.NoError(t, err)
require.Equal(t, id, user.PATs[id].ID)
}
func newSqliteStore(t *testing.T) *SqliteStore {
t.Helper()
store, err := NewSqliteStore(t.TempDir(), nil)
require.NoError(t, err)
require.NotNil(t, store)
return store
}
func newSqliteStoreFromFile(t *testing.T, filename string) *SqliteStore {
t.Helper()
storeDir := t.TempDir()
err := util.CopyFileContents(filename, filepath.Join(storeDir, "store.json"))
require.NoError(t, err)
fStore, err := NewFileStore(storeDir, nil)
require.NoError(t, err)
store, err := NewSqliteStoreFromFileStore(fStore, storeDir, nil)
require.NoError(t, err)
require.NotNil(t, store)
return store
}
func newAccount(store Store, id int) error {
str := fmt.Sprintf("%s-%d", uuid.New().String(), id)
account := newAccountWithId(str, str+"-testuser", "example.com")
setupKey := GenerateDefaultSetupKey()
account.SetupKeys[setupKey.Key] = setupKey
account.Peers["p"+str] = &Peer{
Key: "peerkey" + str,
SetupKey: "peerkeysetupkey",
IP: net.IP{127, 0, 0, 1},
Meta: PeerSystemMeta{},
Name: "peer name",
Status: &PeerStatus{Connected: true, LastSeen: time.Now().UTC()},
}
return store.SaveAccount(account)
}

View File

@ -1,6 +1,12 @@
package server package server
import "time" import (
"fmt"
"os"
"time"
"github.com/netbirdio/netbird/management/server/telemetry"
)
type Store interface { type Store interface {
GetAllAccounts() []*Account GetAllAccounts() []*Account
@ -25,4 +31,63 @@ type Store interface {
SaveUserLastLogin(accountID, userID string, lastLogin time.Time) error SaveUserLastLogin(accountID, userID string, lastLogin time.Time) error
// Close should close the store persisting all unsaved data. // Close should close the store persisting all unsaved data.
Close() error Close() error
// GetStoreKind should return StoreKind of the current store implementation.
// This is also a method of metrics.DataSource interface.
GetStoreKind() StoreKind
}
type StoreKind string
const (
FileStoreKind StoreKind = "JsonFile"
SqliteStoreKind StoreKind = "Sqlite"
)
func GetStoreKindFromEnv() StoreKind {
kind, ok := os.LookupEnv("NETBIRD_STORE_KIND")
if !ok {
return FileStoreKind
}
value := StoreKind(kind)
if value == FileStoreKind || value == SqliteStoreKind {
return value
}
return FileStoreKind
}
func NewStore(kind StoreKind, dataDir string, metrics telemetry.AppMetrics) (Store, error) {
if kind == "" {
// fallback to env. Normally this only should be used from tests
kind = GetStoreKindFromEnv()
}
switch kind {
case FileStoreKind:
return NewFileStore(dataDir, metrics)
case SqliteStoreKind:
return NewSqliteStore(dataDir, metrics)
default:
return nil, fmt.Errorf("unsupported kind of store %s", kind)
}
}
func NewStoreFromJson(dataDir string, metrics telemetry.AppMetrics) (Store, error) {
fstore, err := NewFileStore(dataDir, nil)
if err != nil {
return nil, err
}
kind := GetStoreKindFromEnv()
switch kind {
case FileStoreKind:
return fstore, nil
case SqliteStoreKind:
return NewSqliteStoreFromFileStore(fstore, dataDir, metrics)
default:
return nil, fmt.Errorf("unsupported kind of store %s", kind)
}
} }

View File

@ -0,0 +1,88 @@
package server
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
type benchCase struct {
name string
storeFn func(b *testing.B) Store
size int
}
var newFs = func(b *testing.B) Store {
store, _ := NewFileStore(b.TempDir(), nil)
return store
}
var newSqlite = func(b *testing.B) Store {
store, _ := NewSqliteStore(b.TempDir(), nil)
return store
}
func BenchmarkTest_StoreWrite(b *testing.B) {
cases := []benchCase{
{name: "FileStore_Write", storeFn: newFs, size: 100},
{name: "SqliteStore_Write", storeFn: newSqlite, size: 100},
{name: "FileStore_Write", storeFn: newFs, size: 500},
{name: "SqliteStore_Write", storeFn: newSqlite, size: 500},
{name: "FileStore_Write", storeFn: newFs, size: 1000},
{name: "SqliteStore_Write", storeFn: newSqlite, size: 1000},
{name: "FileStore_Write", storeFn: newFs, size: 2000},
{name: "SqliteStore_Write", storeFn: newSqlite, size: 2000},
}
for _, c := range cases {
name := fmt.Sprintf("%s_%d", c.name, c.size)
store := c.storeFn(b)
for i := 0; i < c.size; i++ {
_ = newAccount(store, i)
}
b.Run(name, func(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
i := c.size
for pb.Next() {
i++
err := newAccount(store, i)
require.NoError(b, err)
}
})
})
}
}
func BenchmarkTest_StoreRead(b *testing.B) {
cases := []benchCase{
{name: "FileStore_Read", storeFn: newFs, size: 100},
{name: "SqliteStore_Read", storeFn: newSqlite, size: 100},
{name: "FileStore_Read", storeFn: newFs, size: 500},
{name: "SqliteStore_Read", storeFn: newSqlite, size: 500},
{name: "FileStore_Read", storeFn: newFs, size: 1000},
{name: "SqliteStore_Read", storeFn: newSqlite, size: 1000},
}
for _, c := range cases {
name := fmt.Sprintf("%s_%d", c.name, c.size)
store := c.storeFn(b)
for i := 0; i < c.size; i++ {
_ = newAccount(store, i)
}
accounts := store.GetAllAccounts()
id := accounts[c.size-1].Id
b.Run(name, func(b *testing.B) {
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
_, _ = store.GetAccount(id)
}
})
})
}
}

View File

@ -2,52 +2,87 @@
"Accounts": { "Accounts": {
"bf1c8084-ba50-4ce7-9439-34653001fc3b": { "bf1c8084-ba50-4ce7-9439-34653001fc3b": {
"Id": "bf1c8084-ba50-4ce7-9439-34653001fc3b", "Id": "bf1c8084-ba50-4ce7-9439-34653001fc3b",
"CreatedBy": "",
"Domain": "test.com", "Domain": "test.com",
"DomainCategory": "private", "DomainCategory": "private",
"IsDomainPrimaryAccount": true, "IsDomainPrimaryAccount": true,
"SetupKeys": { "SetupKeys": {
"A2C8E62B-38F5-4553-B31E-DD66C696CEBB": { "A2C8E62B-38F5-4553-B31E-DD66C696CEBB": {
"Id": "",
"AccountID": "",
"Key": "A2C8E62B-38F5-4553-B31E-DD66C696CEBB", "Key": "A2C8E62B-38F5-4553-B31E-DD66C696CEBB",
"Name": "Default key", "Name": "Default key",
"Type": "reusable", "Type": "reusable",
"CreatedAt": "2021-08-19T20:46:20.005936822+02:00", "CreatedAt": "2021-08-19T20:46:20.005936822+02:00",
"ExpiresAt": "2321-09-18T20:46:20.005936822+02:00", "ExpiresAt": "2321-09-18T20:46:20.005936822+02:00",
"UpdatedAt": "0001-01-01T00:00:00Z",
"Revoked": false, "Revoked": false,
"UsedTimes": 0 "UsedTimes": 0,
"LastUsed": "0001-01-01T00:00:00Z",
"AutoGroups": null,
"UsageLimit": 0,
"Ephemeral": false
} }
}, },
"Network": { "Network": {
"Id": "af1c8024-ha40-4ce2-9418-34653101fc3c", "id": "af1c8024-ha40-4ce2-9418-34653101fc3c",
"Net": { "Net": {
"IP": "100.64.0.0", "IP": "100.64.0.0",
"Mask": "//8AAA==" "Mask": "//8AAA=="
}, },
"Dns": null "Dns": "",
"Serial": 0
}, },
"Peers": {}, "Peers": {},
"Users": { "Users": {
"edafee4e-63fb-11ec-90d6-0242ac120003": { "edafee4e-63fb-11ec-90d6-0242ac120003": {
"Id": "edafee4e-63fb-11ec-90d6-0242ac120003", "Id": "edafee4e-63fb-11ec-90d6-0242ac120003",
"AccountID": "",
"Role": "admin", "Role": "admin",
"PATs": {} "IsServiceUser": false,
"ServiceUserName": "",
"AutoGroups": null,
"PATs": {},
"Blocked": false,
"LastLogin": "0001-01-01T00:00:00Z"
}, },
"f4f6d672-63fb-11ec-90d6-0242ac120003": { "f4f6d672-63fb-11ec-90d6-0242ac120003": {
"Id": "f4f6d672-63fb-11ec-90d6-0242ac120003", "Id": "f4f6d672-63fb-11ec-90d6-0242ac120003",
"AccountID": "",
"Role": "user", "Role": "user",
"IsServiceUser": false,
"ServiceUserName": "",
"AutoGroups": null,
"PATs": { "PATs": {
"9dj38s35-63fb-11ec-90d6-0242ac120003": { "9dj38s35-63fb-11ec-90d6-0242ac120003": {
"ID":"9dj38s35-63fb-11ec-90d6-0242ac120003", "ID": "9dj38s35-63fb-11ec-90d6-0242ac120003",
"Description":"some Description", "UserID": "",
"HashedToken":"SoMeHaShEdToKeN", "Name": "",
"ExpirationDate":"2023-02-27T00:00:00Z", "HashedToken": "SoMeHaShEdToKeN",
"CreatedBy":"user", "ExpirationDate": "2023-02-27T00:00:00Z",
"CreatedAt":"2023-01-01T00:00:00Z", "CreatedBy": "user",
"LastUsed":"2023-02-01T00:00:00Z" "CreatedAt": "2023-01-01T00:00:00Z",
} "LastUsed": "2023-02-01T00:00:00Z"
} }
} },
} "Blocked": false,
"LastLogin": "0001-01-01T00:00:00Z"
}
},
"Groups": null,
"Rules": null,
"Policies": [],
"Routes": null,
"NameServerGroups": null,
"DNSSettings": null,
"Settings": {
"PeerLoginExpirationEnabled": false,
"PeerLoginExpiration": 86400000000000,
"GroupsPropagationEnabled": false,
"JWTGroupsEnabled": false,
"JWTGroupsClaimName": ""
} }
} }
},
"InstallationID": ""
} }

View File

@ -44,14 +44,17 @@ type UserRole string
// User represents a user of the system // User represents a user of the system
type User struct { type User struct {
Id string Id string `gorm:"primaryKey"`
// AccountID is a reference to Account that this object belongs
AccountID string `json:"-" gorm:"index"`
Role UserRole Role UserRole
IsServiceUser bool IsServiceUser bool
// ServiceUserName is only set if IsServiceUser is true // ServiceUserName is only set if IsServiceUser is true
ServiceUserName string ServiceUserName string
// AutoGroups is a list of Group IDs to auto-assign to peers registered by this user // AutoGroups is a list of Group IDs to auto-assign to peers registered by this user
AutoGroups []string AutoGroups []string `gorm:"serializer:json"`
PATs map[string]*PersonalAccessToken PATs map[string]*PersonalAccessToken `gorm:"-"`
PATsG []PersonalAccessToken `json:"-" gorm:"foreignKey:UserID;references:id"`
// Blocked indicates whether the user is blocked. Blocked users can't use the system. // Blocked indicates whether the user is blocked. Blocked users can't use the system.
Blocked bool Blocked bool
// LastLogin is the last time the user logged in to IdP // LastLogin is the last time the user logged in to IdP
@ -124,6 +127,7 @@ func (u *User) Copy() *User {
} }
return &User{ return &User{
Id: u.Id, Id: u.Id,
AccountID: u.AccountID,
Role: u.Role, Role: u.Role,
AutoGroups: autoGroups, AutoGroups: autoGroups,
IsServiceUser: u.IsServiceUser, IsServiceUser: u.IsServiceUser,

View File

@ -251,6 +251,7 @@ func TestUser_Copy(t *testing.T) {
// this is an imaginary case which will never be in DB this way // this is an imaginary case which will never be in DB this way
user := User{ user := User{
Id: "userId", Id: "userId",
AccountID: "accountId",
Role: "role", Role: "role",
IsServiceUser: true, IsServiceUser: true,
ServiceUserName: "servicename", ServiceUserName: "servicename",
@ -291,6 +292,11 @@ func validateStruct(s interface{}) (err error) {
field := structVal.Field(i) field := structVal.Field(i)
fieldName := structType.Field(i).Name fieldName := structType.Field(i).Name
// skip gorm internal fields
if json, ok := structType.Field(i).Tag.Lookup("json"); ok && json == "-" {
continue
}
isSet := field.IsValid() && (!field.IsZero() || field.Type().String() == "bool") isSet := field.IsValid() && (!field.IsZero() || field.Type().String() == "bool")
if !isSet { if !isSet {

View File

@ -65,17 +65,19 @@ func ToPrefixType(prefix string) NetworkType {
// Route represents a route // Route represents a route
type Route struct { type Route struct {
ID string ID string `gorm:"primaryKey"`
Network netip.Prefix // AccountID is a reference to Account that this object belongs
AccountID string `gorm:"index"`
Network netip.Prefix `gorm:"serializer:gob"`
NetID string NetID string
Description string Description string
Peer string Peer string
PeerGroups []string PeerGroups []string `gorm:"serializer:gob"`
NetworkType NetworkType NetworkType NetworkType
Masquerade bool Masquerade bool
Metric int Metric int
Enabled bool Enabled bool
Groups []string Groups []string `gorm:"serializer:json"`
} }
// EventMeta returns activity event meta related to the route // EventMeta returns activity event meta related to the route