mirror of
https://github.com/netbirdio/netbird.git
synced 2024-11-21 23:53:14 +01:00
[management] Remove file store (#2689)
This commit is contained in:
parent
8934453b30
commit
158936fb15
@ -3,7 +3,6 @@ package cmd
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -34,18 +33,12 @@ func startTestingServices(t *testing.T) string {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testDir := t.TempDir()
|
||||
config.Datadir = testDir
|
||||
err = util.CopyFileContents("../testdata/store.json", filepath.Join(testDir, "store.json"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
_, signalLis := startSignal(t)
|
||||
signalAddr := signalLis.Addr().String()
|
||||
config.Signal.URI = signalAddr
|
||||
|
||||
_, mgmLis := startManagement(t, config)
|
||||
_, mgmLis := startManagement(t, config, "../testdata/store.sqlite")
|
||||
mgmAddr := mgmLis.Addr().String()
|
||||
return mgmAddr
|
||||
}
|
||||
@ -70,7 +63,7 @@ func startSignal(t *testing.T) (*grpc.Server, net.Listener) {
|
||||
return s, lis
|
||||
}
|
||||
|
||||
func startManagement(t *testing.T, config *mgmt.Config) (*grpc.Server, net.Listener) {
|
||||
func startManagement(t *testing.T, config *mgmt.Config, testFile string) (*grpc.Server, net.Listener) {
|
||||
t.Helper()
|
||||
|
||||
lis, err := net.Listen("tcp", ":0")
|
||||
@ -78,7 +71,7 @@ func startManagement(t *testing.T, config *mgmt.Config) (*grpc.Server, net.Liste
|
||||
t.Fatal(err)
|
||||
}
|
||||
s := grpc.NewServer()
|
||||
store, cleanUp, err := mgmt.NewTestStoreFromJson(context.Background(), config.Datadir)
|
||||
store, cleanUp, err := mgmt.NewTestStoreFromSqlite(context.Background(), testFile, t.TempDir())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -6,7 +6,6 @@ import (
|
||||
"net"
|
||||
"net/netip"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
@ -824,20 +823,6 @@ func TestEngine_UpdateNetworkMapWithDNSUpdate(t *testing.T) {
|
||||
func TestEngine_MultiplePeers(t *testing.T) {
|
||||
// log.SetLevel(log.DebugLevel)
|
||||
|
||||
dir := t.TempDir()
|
||||
|
||||
err := util.CopyFileContents("../testdata/store.json", filepath.Join(dir, "store.json"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
err = os.Remove(filepath.Join(dir, "store.json")) //nolint
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
ctx, cancel := context.WithCancel(CtxInitState(context.Background()))
|
||||
defer cancel()
|
||||
|
||||
@ -847,7 +832,7 @@ func TestEngine_MultiplePeers(t *testing.T) {
|
||||
return
|
||||
}
|
||||
defer sigServer.Stop()
|
||||
mgmtServer, mgmtAddr, err := startManagement(t, dir)
|
||||
mgmtServer, mgmtAddr, err := startManagement(t, t.TempDir(), "../testdata/store.sqlite")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
@ -1070,7 +1055,7 @@ func startSignal(t *testing.T) (*grpc.Server, string, error) {
|
||||
return s, lis.Addr().String(), nil
|
||||
}
|
||||
|
||||
func startManagement(t *testing.T, dataDir string) (*grpc.Server, string, error) {
|
||||
func startManagement(t *testing.T, dataDir, testFile string) (*grpc.Server, string, error) {
|
||||
t.Helper()
|
||||
|
||||
config := &server.Config{
|
||||
@ -1095,7 +1080,7 @@ func startManagement(t *testing.T, dataDir string) (*grpc.Server, string, error)
|
||||
}
|
||||
s := grpc.NewServer(grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp))
|
||||
|
||||
store, cleanUp, err := server.NewTestStoreFromJson(context.Background(), config.Datadir)
|
||||
store, cleanUp, err := server.NewTestStoreFromSqlite(context.Background(), testFile, config.Datadir)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ func startManagement(t *testing.T, signalAddr string, counter *int) (*grpc.Serve
|
||||
return nil, "", err
|
||||
}
|
||||
s := grpc.NewServer(grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp))
|
||||
store, cleanUp, err := server.NewTestStoreFromJson(context.Background(), config.Datadir)
|
||||
store, cleanUp, err := server.NewTestStoreFromSqlite(context.Background(), "", config.Datadir)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
|
38
client/testdata/store.json
vendored
38
client/testdata/store.json
vendored
@ -1,38 +0,0 @@
|
||||
{
|
||||
"Accounts": {
|
||||
"bf1c8084-ba50-4ce7-9439-34653001fc3b": {
|
||||
"Id": "bf1c8084-ba50-4ce7-9439-34653001fc3b",
|
||||
"SetupKeys": {
|
||||
"A2C8E62B-38F5-4553-B31E-DD66C696CEBB": {
|
||||
"Key": "A2C8E62B-38F5-4553-B31E-DD66C696CEBB",
|
||||
"Name": "Default key",
|
||||
"Type": "reusable",
|
||||
"CreatedAt": "2021-08-19T20:46:20.005936822+02:00",
|
||||
"ExpiresAt": "2321-09-18T20:46:20.005936822+02:00",
|
||||
"Revoked": false,
|
||||
"UsedTimes": 0
|
||||
|
||||
}
|
||||
},
|
||||
"Network": {
|
||||
"Id": "af1c8024-ha40-4ce2-9418-34653101fc3c",
|
||||
"Net": {
|
||||
"IP": "100.64.0.0",
|
||||
"Mask": "//8AAA=="
|
||||
},
|
||||
"Dns": null
|
||||
},
|
||||
"Peers": {},
|
||||
"Users": {
|
||||
"edafee4e-63fb-11ec-90d6-0242ac120003": {
|
||||
"Id": "edafee4e-63fb-11ec-90d6-0242ac120003",
|
||||
"Role": "admin"
|
||||
},
|
||||
"f4f6d672-63fb-11ec-90d6-0242ac120003": {
|
||||
"Id": "f4f6d672-63fb-11ec-90d6-0242ac120003",
|
||||
"Role": "user"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
BIN
client/testdata/store.sqlite
vendored
Normal file
BIN
client/testdata/store.sqlite
vendored
Normal file
Binary file not shown.
@ -47,25 +47,18 @@ func startManagement(t *testing.T) (*grpc.Server, net.Listener) {
|
||||
level, _ := log.ParseLevel("debug")
|
||||
log.SetLevel(level)
|
||||
|
||||
testDir := t.TempDir()
|
||||
|
||||
config := &mgmt.Config{}
|
||||
_, err := util.ReadJson("../server/testdata/management.json", config)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
config.Datadir = testDir
|
||||
err = util.CopyFileContents("../server/testdata/store.json", filepath.Join(testDir, "store.json"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
lis, err := net.Listen("tcp", ":0")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
s := grpc.NewServer()
|
||||
store, cleanUp, err := mgmt.NewTestStoreFromJson(context.Background(), config.Datadir)
|
||||
store, cleanUp, err := NewSqliteTestStore(t, context.Background(), "../server/testdata/store.sqlite")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -521,3 +514,22 @@ func Test_GetPKCEAuthorizationFlow(t *testing.T) {
|
||||
assert.Equal(t, expectedFlowInfo.ProviderConfig.ClientID, flowInfo.ProviderConfig.ClientID, "provider configured client ID should match")
|
||||
assert.Equal(t, expectedFlowInfo.ProviderConfig.ClientSecret, flowInfo.ProviderConfig.ClientSecret, "provider configured client secret should match")
|
||||
}
|
||||
|
||||
func NewSqliteTestStore(t *testing.T, ctx context.Context, testFile string) (mgmt.Store, func(), error) {
|
||||
t.Helper()
|
||||
dataDir := t.TempDir()
|
||||
err := util.CopyFileContents(testFile, filepath.Join(dataDir, "store.db"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store, err := mgmt.NewSqliteStore(ctx, dataDir, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return store, func() {
|
||||
store.Close(ctx)
|
||||
os.Remove(filepath.Join(dataDir, "store.db"))
|
||||
}, nil
|
||||
}
|
||||
|
@ -2366,7 +2366,7 @@ func createManager(t TB) (*DefaultAccountManager, error) {
|
||||
func createStore(t TB) (Store, error) {
|
||||
t.Helper()
|
||||
dataDir := t.TempDir()
|
||||
store, cleanUp, err := NewTestStoreFromJson(context.Background(), dataDir)
|
||||
store, cleanUp, err := NewTestStoreFromSqlite(context.Background(), "", dataDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -210,7 +210,7 @@ func createDNSManager(t *testing.T) (*DefaultAccountManager, error) {
|
||||
func createDNSStore(t *testing.T) (Store, error) {
|
||||
t.Helper()
|
||||
dataDir := t.TempDir()
|
||||
store, cleanUp, err := NewTestStoreFromJson(context.Background(), dataDir)
|
||||
store, cleanUp, err := NewTestStoreFromSqlite(context.Background(), "", dataDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -2,24 +2,18 @@ package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/netbirdio/netbird/dns"
|
||||
nbgroup "github.com/netbirdio/netbird/management/server/group"
|
||||
nbpeer "github.com/netbirdio/netbird/management/server/peer"
|
||||
"github.com/netbirdio/netbird/management/server/posture"
|
||||
"github.com/netbirdio/netbird/management/server/status"
|
||||
"github.com/netbirdio/netbird/management/server/telemetry"
|
||||
"github.com/netbirdio/netbird/route"
|
||||
"github.com/rs/xid"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
nbgroup "github.com/netbirdio/netbird/management/server/group"
|
||||
nbpeer "github.com/netbirdio/netbird/management/server/peer"
|
||||
"github.com/netbirdio/netbird/management/server/telemetry"
|
||||
"github.com/netbirdio/netbird/util"
|
||||
)
|
||||
|
||||
@ -42,167 +36,9 @@ type FileStore struct {
|
||||
mux sync.Mutex `json:"-"`
|
||||
storeFile string `json:"-"`
|
||||
|
||||
// sync.Mutex indexed by resource ID
|
||||
resourceLocks sync.Map `json:"-"`
|
||||
globalAccountLock sync.Mutex `json:"-"`
|
||||
|
||||
metrics telemetry.AppMetrics `json:"-"`
|
||||
}
|
||||
|
||||
func (s *FileStore) ExecuteInTransaction(ctx context.Context, f func(store Store) error) error {
|
||||
return f(s)
|
||||
}
|
||||
|
||||
func (s *FileStore) IncrementSetupKeyUsage(ctx context.Context, setupKeyID string) error {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
accountID, ok := s.SetupKeyID2AccountID[strings.ToUpper(setupKeyID)]
|
||||
if !ok {
|
||||
return status.NewSetupKeyNotFoundError()
|
||||
}
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
account.SetupKeys[setupKeyID].UsedTimes++
|
||||
|
||||
return s.SaveAccount(ctx, account)
|
||||
}
|
||||
|
||||
func (s *FileStore) AddPeerToAllGroup(ctx context.Context, accountID string, peerID string) error {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
allGroup, err := account.GetGroupAll()
|
||||
if err != nil || allGroup == nil {
|
||||
return errors.New("all group not found")
|
||||
}
|
||||
|
||||
allGroup.Peers = append(allGroup.Peers, peerID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *FileStore) AddPeerToGroup(ctx context.Context, accountId string, peerId string, groupID string) error {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
account, err := s.getAccount(accountId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
account.Groups[groupID].Peers = append(account.Groups[groupID].Peers, peerId)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *FileStore) AddPeerToAccount(ctx context.Context, peer *nbpeer.Peer) error {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
account, ok := s.Accounts[peer.AccountID]
|
||||
if !ok {
|
||||
return status.NewAccountNotFoundError(peer.AccountID)
|
||||
}
|
||||
|
||||
account.Peers[peer.ID] = peer
|
||||
return s.SaveAccount(ctx, account)
|
||||
}
|
||||
|
||||
func (s *FileStore) IncrementNetworkSerial(ctx context.Context, accountId string) error {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
account, ok := s.Accounts[accountId]
|
||||
if !ok {
|
||||
return status.NewAccountNotFoundError(accountId)
|
||||
}
|
||||
|
||||
account.Network.Serial++
|
||||
|
||||
return s.SaveAccount(ctx, account)
|
||||
}
|
||||
|
||||
func (s *FileStore) GetSetupKeyBySecret(ctx context.Context, lockStrength LockingStrength, key string) (*SetupKey, error) {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
accountID, ok := s.SetupKeyID2AccountID[strings.ToUpper(key)]
|
||||
if !ok {
|
||||
return nil, status.NewSetupKeyNotFoundError()
|
||||
}
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
setupKey, ok := account.SetupKeys[key]
|
||||
if !ok {
|
||||
return nil, status.Errorf(status.NotFound, "setup key not found")
|
||||
}
|
||||
|
||||
return setupKey, nil
|
||||
}
|
||||
|
||||
func (s *FileStore) GetTakenIPs(ctx context.Context, lockStrength LockingStrength, accountID string) ([]net.IP, error) {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var takenIps []net.IP
|
||||
for _, existingPeer := range account.Peers {
|
||||
takenIps = append(takenIps, existingPeer.IP)
|
||||
}
|
||||
|
||||
return takenIps, nil
|
||||
}
|
||||
|
||||
func (s *FileStore) GetPeerLabelsInAccount(ctx context.Context, lockStrength LockingStrength, accountID string) ([]string, error) {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
existingLabels := []string{}
|
||||
for _, peer := range account.Peers {
|
||||
if peer.DNSLabel != "" {
|
||||
existingLabels = append(existingLabels, peer.DNSLabel)
|
||||
}
|
||||
}
|
||||
return existingLabels, nil
|
||||
}
|
||||
|
||||
func (s *FileStore) GetAccountNetwork(ctx context.Context, lockStrength LockingStrength, accountID string) (*Network, error) {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return account.Network, nil
|
||||
}
|
||||
|
||||
type StoredAccount struct{}
|
||||
|
||||
// NewFileStore restores a store from the file located in the datadir
|
||||
func NewFileStore(ctx context.Context, dataDir string, metrics telemetry.AppMetrics) (*FileStore, error) {
|
||||
fs, err := restore(ctx, filepath.Join(dataDir, storeFileName))
|
||||
@ -213,25 +49,6 @@ func NewFileStore(ctx context.Context, dataDir string, metrics telemetry.AppMetr
|
||||
return fs, nil
|
||||
}
|
||||
|
||||
// NewFilestoreFromSqliteStore restores a store from Sqlite and stores to Filestore json in the file located in datadir
|
||||
func NewFilestoreFromSqliteStore(ctx context.Context, sqlStore *SqlStore, dataDir string, metrics telemetry.AppMetrics) (*FileStore, error) {
|
||||
store, err := NewFileStore(ctx, dataDir, metrics)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = store.SaveInstallationID(ctx, sqlStore.GetInstallationID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, account := range sqlStore.GetAllAccounts(ctx) {
|
||||
store.Accounts[account.Id] = account
|
||||
}
|
||||
|
||||
return store, store.persist(ctx, store.storeFile)
|
||||
}
|
||||
|
||||
// restore the state of the store from the file.
|
||||
// Creates a new empty store file if doesn't exist
|
||||
func restore(ctx context.Context, file string) (*FileStore, error) {
|
||||
@ -240,7 +57,6 @@ func restore(ctx context.Context, file string) (*FileStore, error) {
|
||||
s := &FileStore{
|
||||
Accounts: make(map[string]*Account),
|
||||
mux: sync.Mutex{},
|
||||
globalAccountLock: sync.Mutex{},
|
||||
SetupKeyID2AccountID: make(map[string]string),
|
||||
PeerKeyID2AccountID: make(map[string]string),
|
||||
UserID2AccountID: make(map[string]string),
|
||||
@ -416,252 +232,6 @@ func (s *FileStore) persist(ctx context.Context, file string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AcquireGlobalLock acquires global lock across all the accounts and returns a function that releases the lock
|
||||
func (s *FileStore) AcquireGlobalLock(ctx context.Context) (unlock func()) {
|
||||
log.WithContext(ctx).Debugf("acquiring global lock")
|
||||
start := time.Now()
|
||||
s.globalAccountLock.Lock()
|
||||
|
||||
unlock = func() {
|
||||
s.globalAccountLock.Unlock()
|
||||
log.WithContext(ctx).Debugf("released global lock in %v", time.Since(start))
|
||||
}
|
||||
|
||||
took := time.Since(start)
|
||||
log.WithContext(ctx).Debugf("took %v to acquire global lock", took)
|
||||
if s.metrics != nil {
|
||||
s.metrics.StoreMetrics().CountGlobalLockAcquisitionDuration(took)
|
||||
}
|
||||
|
||||
return unlock
|
||||
}
|
||||
|
||||
// AcquireWriteLockByUID acquires an ID lock for writing to a resource and returns a function that releases the lock
|
||||
func (s *FileStore) AcquireWriteLockByUID(ctx context.Context, uniqueID string) (unlock func()) {
|
||||
log.WithContext(ctx).Debugf("acquiring lock for ID %s", uniqueID)
|
||||
start := time.Now()
|
||||
value, _ := s.resourceLocks.LoadOrStore(uniqueID, &sync.Mutex{})
|
||||
mtx := value.(*sync.Mutex)
|
||||
mtx.Lock()
|
||||
|
||||
unlock = func() {
|
||||
mtx.Unlock()
|
||||
log.WithContext(ctx).Debugf("released lock for ID %s in %v", uniqueID, time.Since(start))
|
||||
}
|
||||
|
||||
return unlock
|
||||
}
|
||||
|
||||
// AcquireReadLockByUID acquires an ID lock for reading a resource and returns a function that releases the lock
|
||||
// This method is still returns a write lock as file store can't handle read locks
|
||||
func (s *FileStore) AcquireReadLockByUID(ctx context.Context, uniqueID string) (unlock func()) {
|
||||
return s.AcquireWriteLockByUID(ctx, uniqueID)
|
||||
}
|
||||
|
||||
func (s *FileStore) SaveAccount(ctx context.Context, account *Account) error {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
if account.Id == "" {
|
||||
return status.Errorf(status.InvalidArgument, "account id should not be empty")
|
||||
}
|
||||
|
||||
accountCopy := account.Copy()
|
||||
|
||||
s.Accounts[accountCopy.Id] = accountCopy
|
||||
|
||||
// todo check that account.Id and keyId are not exist already
|
||||
// because if keyId exists for other accounts this can be bad
|
||||
for keyID := range accountCopy.SetupKeys {
|
||||
s.SetupKeyID2AccountID[strings.ToUpper(keyID)] = accountCopy.Id
|
||||
}
|
||||
|
||||
// enforce peer to account index and delete peer to route indexes for rebuild
|
||||
for _, peer := range accountCopy.Peers {
|
||||
s.PeerKeyID2AccountID[peer.Key] = accountCopy.Id
|
||||
s.PeerID2AccountID[peer.ID] = accountCopy.Id
|
||||
}
|
||||
|
||||
for _, user := range accountCopy.Users {
|
||||
s.UserID2AccountID[user.Id] = accountCopy.Id
|
||||
for _, pat := range user.PATs {
|
||||
s.TokenID2UserID[pat.ID] = user.Id
|
||||
s.HashedPAT2TokenID[pat.HashedToken] = pat.ID
|
||||
}
|
||||
}
|
||||
|
||||
if accountCopy.DomainCategory == PrivateCategory && accountCopy.IsDomainPrimaryAccount {
|
||||
s.PrivateDomain2AccountID[accountCopy.Domain] = accountCopy.Id
|
||||
}
|
||||
|
||||
return s.persist(ctx, s.storeFile)
|
||||
}
|
||||
|
||||
func (s *FileStore) DeleteAccount(ctx context.Context, account *Account) error {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
if account.Id == "" {
|
||||
return status.Errorf(status.InvalidArgument, "account id should not be empty")
|
||||
}
|
||||
|
||||
for keyID := range account.SetupKeys {
|
||||
delete(s.SetupKeyID2AccountID, strings.ToUpper(keyID))
|
||||
}
|
||||
|
||||
// enforce peer to account index and delete peer to route indexes for rebuild
|
||||
for _, peer := range account.Peers {
|
||||
delete(s.PeerKeyID2AccountID, peer.Key)
|
||||
delete(s.PeerID2AccountID, peer.ID)
|
||||
}
|
||||
|
||||
for _, user := range account.Users {
|
||||
for _, pat := range user.PATs {
|
||||
delete(s.TokenID2UserID, pat.ID)
|
||||
delete(s.HashedPAT2TokenID, pat.HashedToken)
|
||||
}
|
||||
delete(s.UserID2AccountID, user.Id)
|
||||
}
|
||||
|
||||
if account.DomainCategory == PrivateCategory && account.IsDomainPrimaryAccount {
|
||||
delete(s.PrivateDomain2AccountID, account.Domain)
|
||||
}
|
||||
|
||||
delete(s.Accounts, account.Id)
|
||||
|
||||
return s.persist(ctx, s.storeFile)
|
||||
}
|
||||
|
||||
// DeleteHashedPAT2TokenIDIndex removes an entry from the indexing map HashedPAT2TokenID
|
||||
func (s *FileStore) DeleteHashedPAT2TokenIDIndex(hashedToken string) error {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
delete(s.HashedPAT2TokenID, hashedToken)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteTokenID2UserIDIndex removes an entry from the indexing map TokenID2UserID
|
||||
func (s *FileStore) DeleteTokenID2UserIDIndex(tokenID string) error {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
delete(s.TokenID2UserID, tokenID)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAccountByPrivateDomain returns account by private domain
|
||||
func (s *FileStore) GetAccountByPrivateDomain(_ context.Context, domain string) (*Account, error) {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
accountID, ok := s.PrivateDomain2AccountID[strings.ToLower(domain)]
|
||||
if !ok {
|
||||
return nil, status.Errorf(status.NotFound, "account not found: provided domain is not registered or is not private")
|
||||
}
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return account.Copy(), nil
|
||||
}
|
||||
|
||||
// GetAccountBySetupKey returns account by setup key id
|
||||
func (s *FileStore) GetAccountBySetupKey(_ context.Context, setupKey string) (*Account, error) {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
accountID, ok := s.SetupKeyID2AccountID[strings.ToUpper(setupKey)]
|
||||
if !ok {
|
||||
return nil, status.NewSetupKeyNotFoundError()
|
||||
}
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return account.Copy(), nil
|
||||
}
|
||||
|
||||
// GetTokenIDByHashedToken returns the id of a personal access token by its hashed secret
|
||||
func (s *FileStore) GetTokenIDByHashedToken(_ context.Context, token string) (string, error) {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
tokenID, ok := s.HashedPAT2TokenID[token]
|
||||
if !ok {
|
||||
return "", status.Errorf(status.NotFound, "tokenID not found: provided token doesn't exists")
|
||||
}
|
||||
|
||||
return tokenID, nil
|
||||
}
|
||||
|
||||
// GetUserByTokenID returns a User object a tokenID belongs to
|
||||
func (s *FileStore) GetUserByTokenID(_ context.Context, tokenID string) (*User, error) {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
userID, ok := s.TokenID2UserID[tokenID]
|
||||
if !ok {
|
||||
return nil, status.Errorf(status.NotFound, "user not found: provided tokenID doesn't exists")
|
||||
}
|
||||
|
||||
accountID, ok := s.UserID2AccountID[userID]
|
||||
if !ok {
|
||||
return nil, status.Errorf(status.NotFound, "accountID not found: provided userID doesn't exists")
|
||||
}
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return account.Users[userID].Copy(), nil
|
||||
}
|
||||
|
||||
func (s *FileStore) GetUserByUserID(_ context.Context, _ LockingStrength, userID string) (*User, error) {
|
||||
accountID, ok := s.UserID2AccountID[userID]
|
||||
if !ok {
|
||||
return nil, status.Errorf(status.NotFound, "accountID not found: provided userID doesn't exists")
|
||||
}
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
user := account.Users[userID].Copy()
|
||||
pat := make([]PersonalAccessToken, 0, len(user.PATs))
|
||||
for _, token := range user.PATs {
|
||||
if token != nil {
|
||||
pat = append(pat, *token)
|
||||
}
|
||||
}
|
||||
user.PATsG = pat
|
||||
|
||||
return user, nil
|
||||
}
|
||||
|
||||
func (s *FileStore) GetAccountGroups(_ context.Context, accountID string) ([]*nbgroup.Group, error) {
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
groupsSlice := make([]*nbgroup.Group, 0, len(account.Groups))
|
||||
|
||||
for _, group := range account.Groups {
|
||||
groupsSlice = append(groupsSlice, group)
|
||||
}
|
||||
|
||||
return groupsSlice, nil
|
||||
}
|
||||
|
||||
// GetAllAccounts returns all accounts
|
||||
func (s *FileStore) GetAllAccounts(_ context.Context) (all []*Account) {
|
||||
s.mux.Lock()
|
||||
@ -673,278 +243,6 @@ func (s *FileStore) GetAllAccounts(_ context.Context) (all []*Account) {
|
||||
return all
|
||||
}
|
||||
|
||||
// getAccount returns a reference to the Account. Should not return a copy.
|
||||
func (s *FileStore) getAccount(accountID string) (*Account, error) {
|
||||
account, ok := s.Accounts[accountID]
|
||||
if !ok {
|
||||
return nil, status.NewAccountNotFoundError(accountID)
|
||||
}
|
||||
|
||||
return account, nil
|
||||
}
|
||||
|
||||
// GetAccount returns an account for ID
|
||||
func (s *FileStore) GetAccount(_ context.Context, accountID string) (*Account, error) {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return account.Copy(), nil
|
||||
}
|
||||
|
||||
// GetAccountByUser returns a user account
|
||||
func (s *FileStore) GetAccountByUser(_ context.Context, userID string) (*Account, error) {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
accountID, ok := s.UserID2AccountID[userID]
|
||||
if !ok {
|
||||
return nil, status.NewUserNotFoundError(userID)
|
||||
}
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return account.Copy(), nil
|
||||
}
|
||||
|
||||
// GetAccountByPeerID returns an account for a given peer ID
|
||||
func (s *FileStore) GetAccountByPeerID(ctx context.Context, peerID string) (*Account, error) {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
accountID, ok := s.PeerID2AccountID[peerID]
|
||||
if !ok {
|
||||
return nil, status.Errorf(status.NotFound, "provided peer ID doesn't exists %s", peerID)
|
||||
}
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// this protection is needed because when we delete a peer, we don't really remove index peerID -> accountID.
|
||||
// check Account.Peers for a match
|
||||
if _, ok := account.Peers[peerID]; !ok {
|
||||
delete(s.PeerID2AccountID, peerID)
|
||||
log.WithContext(ctx).Warnf("removed stale peerID %s to accountID %s index", peerID, accountID)
|
||||
return nil, status.NewPeerNotFoundError(peerID)
|
||||
}
|
||||
|
||||
return account.Copy(), nil
|
||||
}
|
||||
|
||||
// GetAccountByPeerPubKey returns an account for a given peer WireGuard public key
|
||||
func (s *FileStore) GetAccountByPeerPubKey(ctx context.Context, peerKey string) (*Account, error) {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
accountID, ok := s.PeerKeyID2AccountID[peerKey]
|
||||
if !ok {
|
||||
return nil, status.NewPeerNotFoundError(peerKey)
|
||||
}
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// this protection is needed because when we delete a peer, we don't really remove index peerKey -> accountID.
|
||||
// check Account.Peers for a match
|
||||
stale := true
|
||||
for _, peer := range account.Peers {
|
||||
if peer.Key == peerKey {
|
||||
stale = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if stale {
|
||||
delete(s.PeerKeyID2AccountID, peerKey)
|
||||
log.WithContext(ctx).Warnf("removed stale peerKey %s to accountID %s index", peerKey, accountID)
|
||||
return nil, status.NewPeerNotFoundError(peerKey)
|
||||
}
|
||||
|
||||
return account.Copy(), nil
|
||||
}
|
||||
|
||||
func (s *FileStore) GetAccountIDByPeerPubKey(_ context.Context, peerKey string) (string, error) {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
accountID, ok := s.PeerKeyID2AccountID[peerKey]
|
||||
if !ok {
|
||||
return "", status.NewPeerNotFoundError(peerKey)
|
||||
}
|
||||
|
||||
return accountID, nil
|
||||
}
|
||||
|
||||
func (s *FileStore) GetAccountIDByUserID(userID string) (string, error) {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
accountID, ok := s.UserID2AccountID[userID]
|
||||
if !ok {
|
||||
return "", status.NewUserNotFoundError(userID)
|
||||
}
|
||||
|
||||
return accountID, nil
|
||||
}
|
||||
|
||||
func (s *FileStore) GetAccountIDBySetupKey(_ context.Context, setupKey string) (string, error) {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
accountID, ok := s.SetupKeyID2AccountID[strings.ToUpper(setupKey)]
|
||||
if !ok {
|
||||
return "", status.NewSetupKeyNotFoundError()
|
||||
}
|
||||
|
||||
return accountID, nil
|
||||
}
|
||||
|
||||
func (s *FileStore) GetPeerByPeerPubKey(_ context.Context, _ LockingStrength, peerKey string) (*nbpeer.Peer, error) {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
accountID, ok := s.PeerKeyID2AccountID[peerKey]
|
||||
if !ok {
|
||||
return nil, status.NewPeerNotFoundError(peerKey)
|
||||
}
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, peer := range account.Peers {
|
||||
if peer.Key == peerKey {
|
||||
return peer.Copy(), nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, status.NewPeerNotFoundError(peerKey)
|
||||
}
|
||||
|
||||
func (s *FileStore) GetAccountSettings(_ context.Context, _ LockingStrength, accountID string) (*Settings, error) {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return account.Settings.Copy(), nil
|
||||
}
|
||||
|
||||
// GetInstallationID returns the installation ID from the store
|
||||
func (s *FileStore) GetInstallationID() string {
|
||||
return s.InstallationID
|
||||
}
|
||||
|
||||
// SaveInstallationID saves the installation ID
|
||||
func (s *FileStore) SaveInstallationID(ctx context.Context, ID string) error {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
s.InstallationID = ID
|
||||
|
||||
return s.persist(ctx, s.storeFile)
|
||||
}
|
||||
|
||||
// SavePeer saves the peer in the account
|
||||
func (s *FileStore) SavePeer(_ context.Context, accountID string, peer *nbpeer.Peer) error {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newPeer := peer.Copy()
|
||||
|
||||
account.Peers[peer.ID] = newPeer
|
||||
|
||||
s.PeerKeyID2AccountID[peer.Key] = accountID
|
||||
s.PeerID2AccountID[peer.ID] = accountID
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SavePeerStatus stores the PeerStatus in memory. It doesn't attempt to persist data to speed up things.
|
||||
// PeerStatus will be saved eventually when some other changes occur.
|
||||
func (s *FileStore) SavePeerStatus(accountID, peerID string, peerStatus nbpeer.PeerStatus) error {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
peer := account.Peers[peerID]
|
||||
if peer == nil {
|
||||
return status.Errorf(status.NotFound, "peer %s not found", peerID)
|
||||
}
|
||||
|
||||
peer.Status = &peerStatus
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SavePeerLocation stores the PeerStatus in memory. It doesn't attempt to persist data to speed up things.
|
||||
// Peer.Location will be saved eventually when some other changes occur.
|
||||
func (s *FileStore) SavePeerLocation(accountID string, peerWithLocation *nbpeer.Peer) error {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
peer := account.Peers[peerWithLocation.ID]
|
||||
if peer == nil {
|
||||
return status.Errorf(status.NotFound, "peer %s not found", peerWithLocation.ID)
|
||||
}
|
||||
|
||||
peer.Location = peerWithLocation.Location
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SaveUserLastLogin stores the last login time for a user in memory. It doesn't attempt to persist data to speed up things.
|
||||
func (s *FileStore) SaveUserLastLogin(_ context.Context, accountID, userID string, lastLogin time.Time) error {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
peer := account.Users[userID]
|
||||
if peer == nil {
|
||||
return status.Errorf(status.NotFound, "user %s not found", userID)
|
||||
}
|
||||
|
||||
peer.LastLogin = lastLogin
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *FileStore) GetPostureCheckByChecksDefinition(_ string, _ *posture.ChecksDefinition) (*posture.Checks, error) {
|
||||
return nil, status.Errorf(status.Internal, "GetPostureCheckByChecksDefinition is not implemented")
|
||||
}
|
||||
|
||||
// Close the FileStore persisting data to disk
|
||||
func (s *FileStore) Close(ctx context.Context) error {
|
||||
s.mux.Lock()
|
||||
@ -959,86 +257,3 @@ func (s *FileStore) Close(ctx context.Context) error {
|
||||
func (s *FileStore) GetStoreEngine() StoreEngine {
|
||||
return FileStoreEngine
|
||||
}
|
||||
|
||||
func (s *FileStore) SaveUsers(_ string, _ map[string]*User) error {
|
||||
return status.Errorf(status.Internal, "SaveUsers is not implemented")
|
||||
}
|
||||
|
||||
func (s *FileStore) SaveGroups(_ string, _ map[string]*nbgroup.Group) error {
|
||||
return status.Errorf(status.Internal, "SaveGroups is not implemented")
|
||||
}
|
||||
|
||||
func (s *FileStore) GetAccountIDByPrivateDomain(_ context.Context, _ LockingStrength, _ string) (string, error) {
|
||||
return "", status.Errorf(status.Internal, "GetAccountIDByPrivateDomain is not implemented")
|
||||
}
|
||||
|
||||
func (s *FileStore) GetAccountDomainAndCategory(_ context.Context, _ LockingStrength, accountID string) (string, string, error) {
|
||||
s.mux.Lock()
|
||||
defer s.mux.Unlock()
|
||||
|
||||
account, err := s.getAccount(accountID)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
return account.Domain, account.DomainCategory, nil
|
||||
}
|
||||
|
||||
// AccountExists checks whether an account exists by the given ID.
|
||||
func (s *FileStore) AccountExists(_ context.Context, _ LockingStrength, id string) (bool, error) {
|
||||
_, exists := s.Accounts[id]
|
||||
return exists, nil
|
||||
}
|
||||
|
||||
func (s *FileStore) GetAccountDNSSettings(_ context.Context, _ LockingStrength, _ string) (*DNSSettings, error) {
|
||||
return nil, status.Errorf(status.Internal, "GetAccountDNSSettings is not implemented")
|
||||
}
|
||||
|
||||
func (s *FileStore) GetGroupByID(_ context.Context, _ LockingStrength, _, _ string) (*nbgroup.Group, error) {
|
||||
return nil, status.Errorf(status.Internal, "GetGroupByID is not implemented")
|
||||
}
|
||||
|
||||
func (s *FileStore) GetGroupByName(_ context.Context, _ LockingStrength, _, _ string) (*nbgroup.Group, error) {
|
||||
return nil, status.Errorf(status.Internal, "GetGroupByName is not implemented")
|
||||
}
|
||||
|
||||
func (s *FileStore) GetAccountPolicies(_ context.Context, _ LockingStrength, _ string) ([]*Policy, error) {
|
||||
return nil, status.Errorf(status.Internal, "GetPolicyByID is not implemented")
|
||||
}
|
||||
|
||||
func (s *FileStore) GetPolicyByID(_ context.Context, _ LockingStrength, _ string, _ string) (*Policy, error) {
|
||||
return nil, status.Errorf(status.Internal, "GetPolicyByID is not implemented")
|
||||
|
||||
}
|
||||
|
||||
func (s *FileStore) GetAccountPostureChecks(_ context.Context, _ LockingStrength, _ string) ([]*posture.Checks, error) {
|
||||
return nil, status.Errorf(status.Internal, "GetAccountPostureChecks is not implemented")
|
||||
}
|
||||
|
||||
func (s *FileStore) GetPostureChecksByID(_ context.Context, _ LockingStrength, _ string, _ string) (*posture.Checks, error) {
|
||||
return nil, status.Errorf(status.Internal, "GetPostureChecksByID is not implemented")
|
||||
}
|
||||
|
||||
func (s *FileStore) GetAccountRoutes(_ context.Context, _ LockingStrength, _ string) ([]*route.Route, error) {
|
||||
return nil, status.Errorf(status.Internal, "GetAccountRoutes is not implemented")
|
||||
}
|
||||
|
||||
func (s *FileStore) GetRouteByID(_ context.Context, _ LockingStrength, _ string, _ string) (*route.Route, error) {
|
||||
return nil, status.Errorf(status.Internal, "GetRouteByID is not implemented")
|
||||
}
|
||||
|
||||
func (s *FileStore) GetAccountSetupKeys(_ context.Context, _ LockingStrength, _ string) ([]*SetupKey, error) {
|
||||
return nil, status.Errorf(status.Internal, "GetAccountSetupKeys is not implemented")
|
||||
}
|
||||
|
||||
func (s *FileStore) GetSetupKeyByID(_ context.Context, _ LockingStrength, _ string, _ string) (*SetupKey, error) {
|
||||
return nil, status.Errorf(status.Internal, "GetSetupKeyByID is not implemented")
|
||||
}
|
||||
|
||||
func (s *FileStore) GetAccountNameServerGroups(_ context.Context, _ LockingStrength, _ string) ([]*dns.NameServerGroup, error) {
|
||||
return nil, status.Errorf(status.Internal, "GetAccountNameServerGroups is not implemented")
|
||||
}
|
||||
|
||||
func (s *FileStore) GetNameServerGroupByID(_ context.Context, _ LockingStrength, _ string, _ string) (*dns.NameServerGroup, error) {
|
||||
return nil, status.Errorf(status.Internal, "GetNameServerGroupByID is not implemented")
|
||||
}
|
||||
|
@ -1,655 +0,0 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/netbirdio/netbird/management/server/group"
|
||||
nbpeer "github.com/netbirdio/netbird/management/server/peer"
|
||||
"github.com/netbirdio/netbird/util"
|
||||
)
|
||||
|
||||
type accounts struct {
|
||||
Accounts map[string]*Account
|
||||
}
|
||||
|
||||
func TestStalePeerIndices(t *testing.T) {
|
||||
storeDir := t.TempDir()
|
||||
|
||||
err := util.CopyFileContents("testdata/store.json", filepath.Join(storeDir, "store.json"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store, err := NewFileStore(context.Background(), storeDir, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
account, err := store.GetAccount(context.Background(), "bf1c8084-ba50-4ce7-9439-34653001fc3b")
|
||||
require.NoError(t, err)
|
||||
|
||||
peerID := "some_peer"
|
||||
peerKey := "some_peer_key"
|
||||
account.Peers[peerID] = &nbpeer.Peer{
|
||||
ID: peerID,
|
||||
Key: peerKey,
|
||||
}
|
||||
|
||||
err = store.SaveAccount(context.Background(), account)
|
||||
require.NoError(t, err)
|
||||
|
||||
account.DeletePeer(peerID)
|
||||
|
||||
err = store.SaveAccount(context.Background(), account)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = store.GetAccountByPeerID(context.Background(), peerID)
|
||||
require.Error(t, err, "expecting to get an error when found stale index")
|
||||
|
||||
_, err = store.GetAccountByPeerPubKey(context.Background(), peerKey)
|
||||
require.Error(t, err, "expecting to get an error when found stale index")
|
||||
}
|
||||
|
||||
func TestNewStore(t *testing.T) {
|
||||
store := newStore(t)
|
||||
defer store.Close(context.Background())
|
||||
|
||||
if store.Accounts == nil || len(store.Accounts) != 0 {
|
||||
t.Errorf("expected to create a new empty Accounts map when creating a new FileStore")
|
||||
}
|
||||
|
||||
if store.SetupKeyID2AccountID == nil || len(store.SetupKeyID2AccountID) != 0 {
|
||||
t.Errorf("expected to create a new empty SetupKeyID2AccountID map when creating a new FileStore")
|
||||
}
|
||||
|
||||
if store.PeerKeyID2AccountID == nil || len(store.PeerKeyID2AccountID) != 0 {
|
||||
t.Errorf("expected to create a new empty PeerKeyID2AccountID map when creating a new FileStore")
|
||||
}
|
||||
|
||||
if store.UserID2AccountID == nil || len(store.UserID2AccountID) != 0 {
|
||||
t.Errorf("expected to create a new empty UserID2AccountID map when creating a new FileStore")
|
||||
}
|
||||
|
||||
if store.HashedPAT2TokenID == nil || len(store.HashedPAT2TokenID) != 0 {
|
||||
t.Errorf("expected to create a new empty HashedPAT2TokenID map when creating a new FileStore")
|
||||
}
|
||||
|
||||
if store.TokenID2UserID == nil || len(store.TokenID2UserID) != 0 {
|
||||
t.Errorf("expected to create a new empty TokenID2UserID map when creating a new FileStore")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSaveAccount(t *testing.T) {
|
||||
store := newStore(t)
|
||||
defer store.Close(context.Background())
|
||||
|
||||
account := newAccountWithId(context.Background(), "account_id", "testuser", "")
|
||||
setupKey := GenerateDefaultSetupKey()
|
||||
account.SetupKeys[setupKey.Key] = setupKey
|
||||
account.Peers["testpeer"] = &nbpeer.Peer{
|
||||
Key: "peerkey",
|
||||
SetupKey: "peerkeysetupkey",
|
||||
IP: net.IP{127, 0, 0, 1},
|
||||
Meta: nbpeer.PeerSystemMeta{},
|
||||
Name: "peer name",
|
||||
Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now().UTC()},
|
||||
}
|
||||
|
||||
// SaveAccount should trigger persist
|
||||
err := store.SaveAccount(context.Background(), account)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if store.Accounts[account.Id] == nil {
|
||||
t.Errorf("expecting Account to be stored after SaveAccount()")
|
||||
}
|
||||
|
||||
if store.PeerKeyID2AccountID["peerkey"] == "" {
|
||||
t.Errorf("expecting PeerKeyID2AccountID index updated after SaveAccount()")
|
||||
}
|
||||
|
||||
if store.UserID2AccountID["testuser"] == "" {
|
||||
t.Errorf("expecting UserID2AccountID index updated after SaveAccount()")
|
||||
}
|
||||
|
||||
if store.SetupKeyID2AccountID[setupKey.Key] == "" {
|
||||
t.Errorf("expecting SetupKeyID2AccountID index updated after SaveAccount()")
|
||||
}
|
||||
}
|
||||
|
||||
func TestDeleteAccount(t *testing.T) {
|
||||
storeDir := t.TempDir()
|
||||
storeFile := filepath.Join(storeDir, "store.json")
|
||||
err := util.CopyFileContents("testdata/store.json", storeFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store, err := NewFileStore(context.Background(), storeDir, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer store.Close(context.Background())
|
||||
|
||||
var account *Account
|
||||
for _, a := range store.Accounts {
|
||||
account = a
|
||||
break
|
||||
}
|
||||
|
||||
require.NotNil(t, account, "failed to restore a FileStore file and get at least one account")
|
||||
|
||||
err = store.DeleteAccount(context.Background(), account)
|
||||
require.NoError(t, err, "failed to delete account, error: %v", err)
|
||||
|
||||
_, ok := store.Accounts[account.Id]
|
||||
require.False(t, ok, "failed to delete account")
|
||||
|
||||
for id := range account.Users {
|
||||
_, ok := store.UserID2AccountID[id]
|
||||
assert.False(t, ok, "failed to delete UserID2AccountID index")
|
||||
for _, pat := range account.Users[id].PATs {
|
||||
_, ok := store.HashedPAT2TokenID[pat.HashedToken]
|
||||
assert.False(t, ok, "failed to delete HashedPAT2TokenID index")
|
||||
_, ok = store.TokenID2UserID[pat.ID]
|
||||
assert.False(t, ok, "failed to delete TokenID2UserID index")
|
||||
}
|
||||
}
|
||||
|
||||
for _, p := range account.Peers {
|
||||
_, ok := store.PeerKeyID2AccountID[p.Key]
|
||||
assert.False(t, ok, "failed to delete PeerKeyID2AccountID index")
|
||||
_, ok = store.PeerID2AccountID[p.ID]
|
||||
assert.False(t, ok, "failed to delete PeerID2AccountID index")
|
||||
}
|
||||
|
||||
for id := range account.SetupKeys {
|
||||
_, ok := store.SetupKeyID2AccountID[id]
|
||||
assert.False(t, ok, "failed to delete SetupKeyID2AccountID index")
|
||||
}
|
||||
|
||||
_, ok = store.PrivateDomain2AccountID[account.Domain]
|
||||
assert.False(t, ok, "failed to delete PrivateDomain2AccountID index")
|
||||
|
||||
}
|
||||
|
||||
func TestStore(t *testing.T) {
|
||||
store := newStore(t)
|
||||
defer store.Close(context.Background())
|
||||
|
||||
account := newAccountWithId(context.Background(), "account_id", "testuser", "")
|
||||
account.Peers["testpeer"] = &nbpeer.Peer{
|
||||
Key: "peerkey",
|
||||
SetupKey: "peerkeysetupkey",
|
||||
IP: net.IP{127, 0, 0, 1},
|
||||
Meta: nbpeer.PeerSystemMeta{},
|
||||
Name: "peer name",
|
||||
Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now().UTC()},
|
||||
}
|
||||
account.Groups["all"] = &group.Group{
|
||||
ID: "all",
|
||||
Name: "all",
|
||||
Peers: []string{"testpeer"},
|
||||
}
|
||||
account.Policies = append(account.Policies, &Policy{
|
||||
ID: "all",
|
||||
Name: "all",
|
||||
Enabled: true,
|
||||
Rules: []*PolicyRule{
|
||||
{
|
||||
ID: "all",
|
||||
Name: "all",
|
||||
Sources: []string{"all"},
|
||||
Destinations: []string{"all"},
|
||||
},
|
||||
},
|
||||
})
|
||||
account.Policies = append(account.Policies, &Policy{
|
||||
ID: "dmz",
|
||||
Name: "dmz",
|
||||
Enabled: true,
|
||||
Rules: []*PolicyRule{
|
||||
{
|
||||
ID: "dmz",
|
||||
Name: "dmz",
|
||||
Enabled: true,
|
||||
Sources: []string{"all"},
|
||||
Destinations: []string{"all"},
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
// SaveAccount should trigger persist
|
||||
err := store.SaveAccount(context.Background(), account)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
restored, err := NewFileStore(context.Background(), store.storeFile, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
restoredAccount := restored.Accounts[account.Id]
|
||||
if restoredAccount == nil {
|
||||
t.Errorf("failed to restore a FileStore file - missing Account %s", account.Id)
|
||||
return
|
||||
}
|
||||
|
||||
if restoredAccount.Peers["testpeer"] == nil {
|
||||
t.Errorf("failed to restore a FileStore file - missing Peer testpeer")
|
||||
}
|
||||
|
||||
if restoredAccount.CreatedBy != "testuser" {
|
||||
t.Errorf("failed to restore a FileStore file - missing Account CreatedBy")
|
||||
}
|
||||
|
||||
if restoredAccount.Users["testuser"] == nil {
|
||||
t.Errorf("failed to restore a FileStore file - missing User testuser")
|
||||
}
|
||||
|
||||
if restoredAccount.Network == nil {
|
||||
t.Errorf("failed to restore a FileStore file - missing Network")
|
||||
}
|
||||
|
||||
if restoredAccount.Groups["all"] == nil {
|
||||
t.Errorf("failed to restore a FileStore file - missing Group all")
|
||||
}
|
||||
|
||||
if len(restoredAccount.Policies) != 2 {
|
||||
t.Errorf("failed to restore a FileStore file - missing Policies")
|
||||
return
|
||||
}
|
||||
|
||||
assert.Equal(t, account.Policies[0], restoredAccount.Policies[0], "failed to restore a FileStore file - missing Policy all")
|
||||
assert.Equal(t, account.Policies[1], restoredAccount.Policies[1], "failed to restore a FileStore file - missing Policy dmz")
|
||||
}
|
||||
|
||||
func TestRestore(t *testing.T) {
|
||||
storeDir := t.TempDir()
|
||||
|
||||
err := util.CopyFileContents("testdata/store.json", filepath.Join(storeDir, "store.json"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store, err := NewFileStore(context.Background(), storeDir, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
account := store.Accounts["bf1c8084-ba50-4ce7-9439-34653001fc3b"]
|
||||
|
||||
require.NotNil(t, account, "failed to restore a FileStore file - missing account bf1c8084-ba50-4ce7-9439-34653001fc3b")
|
||||
|
||||
require.NotNil(t, account.Users["edafee4e-63fb-11ec-90d6-0242ac120003"], "failed to restore a FileStore file - missing Account User edafee4e-63fb-11ec-90d6-0242ac120003")
|
||||
|
||||
require.NotNil(t, account.Users["f4f6d672-63fb-11ec-90d6-0242ac120003"], "failed to restore a FileStore file - missing Account User f4f6d672-63fb-11ec-90d6-0242ac120003")
|
||||
|
||||
require.NotNil(t, account.Network, "failed to restore a FileStore file - missing Account Network")
|
||||
|
||||
require.NotNil(t, account.SetupKeys["A2C8E62B-38F5-4553-B31E-DD66C696CEBB"], "failed to restore a FileStore file - missing Account SetupKey A2C8E62B-38F5-4553-B31E-DD66C696CEBB")
|
||||
|
||||
require.NotNil(t, account.Users["f4f6d672-63fb-11ec-90d6-0242ac120003"].PATs["9dj38s35-63fb-11ec-90d6-0242ac120003"], "failed to restore a FileStore wrong PATs length")
|
||||
|
||||
require.Len(t, store.UserID2AccountID, 2, "failed to restore a FileStore wrong UserID2AccountID mapping length")
|
||||
|
||||
require.Len(t, store.SetupKeyID2AccountID, 1, "failed to restore a FileStore wrong SetupKeyID2AccountID mapping length")
|
||||
|
||||
require.Len(t, store.PrivateDomain2AccountID, 1, "failed to restore a FileStore wrong PrivateDomain2AccountID mapping length")
|
||||
|
||||
require.Len(t, store.HashedPAT2TokenID, 1, "failed to restore a FileStore wrong HashedPAT2TokenID mapping length")
|
||||
|
||||
require.Len(t, store.TokenID2UserID, 1, "failed to restore a FileStore wrong TokenID2UserID mapping length")
|
||||
}
|
||||
|
||||
func TestRestoreGroups_Migration(t *testing.T) {
|
||||
storeDir := t.TempDir()
|
||||
|
||||
err := util.CopyFileContents("testdata/store.json", filepath.Join(storeDir, "store.json"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store, err := NewFileStore(context.Background(), storeDir, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// create default group
|
||||
account := store.Accounts["bf1c8084-ba50-4ce7-9439-34653001fc3b"]
|
||||
account.Groups = map[string]*group.Group{
|
||||
"cfefqs706sqkneg59g3g": {
|
||||
ID: "cfefqs706sqkneg59g3g",
|
||||
Name: "All",
|
||||
},
|
||||
}
|
||||
err = store.SaveAccount(context.Background(), account)
|
||||
require.NoError(t, err, "failed to save account")
|
||||
|
||||
// restore account with default group with empty Issue field
|
||||
if store, err = NewFileStore(context.Background(), storeDir, nil); err != nil {
|
||||
return
|
||||
}
|
||||
account = store.Accounts["bf1c8084-ba50-4ce7-9439-34653001fc3b"]
|
||||
|
||||
require.Contains(t, account.Groups, "cfefqs706sqkneg59g3g", "failed to restore a FileStore file - missing Account Groups")
|
||||
require.Equal(t, group.GroupIssuedAPI, account.Groups["cfefqs706sqkneg59g3g"].Issued, "default group should has API issued mark")
|
||||
}
|
||||
|
||||
func TestGetAccountByPrivateDomain(t *testing.T) {
|
||||
storeDir := t.TempDir()
|
||||
|
||||
err := util.CopyFileContents("testdata/store.json", filepath.Join(storeDir, "store.json"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store, err := NewFileStore(context.Background(), storeDir, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
existingDomain := "test.com"
|
||||
|
||||
account, err := store.GetAccountByPrivateDomain(context.Background(), existingDomain)
|
||||
require.NoError(t, err, "should found account")
|
||||
require.Equal(t, existingDomain, account.Domain, "domains should match")
|
||||
|
||||
_, err = store.GetAccountByPrivateDomain(context.Background(), "missing-domain.com")
|
||||
require.Error(t, err, "should return error on domain lookup")
|
||||
}
|
||||
|
||||
func TestFileStore_GetAccount(t *testing.T) {
|
||||
storeDir := t.TempDir()
|
||||
storeFile := filepath.Join(storeDir, "store.json")
|
||||
err := util.CopyFileContents("testdata/store.json", storeFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
accounts := &accounts{}
|
||||
_, err = util.ReadJson(storeFile, accounts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store, err := NewFileStore(context.Background(), storeDir, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expected := accounts.Accounts["bf1c8084-ba50-4ce7-9439-34653001fc3b"]
|
||||
if expected == nil {
|
||||
t.Fatalf("expected account doesn't exist")
|
||||
return
|
||||
}
|
||||
|
||||
account, err := store.GetAccount(context.Background(), expected.Id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.Equal(t, expected.IsDomainPrimaryAccount, account.IsDomainPrimaryAccount)
|
||||
assert.Equal(t, expected.DomainCategory, account.DomainCategory)
|
||||
assert.Equal(t, expected.Domain, account.Domain)
|
||||
assert.Equal(t, expected.CreatedBy, account.CreatedBy)
|
||||
assert.Equal(t, expected.Network.Identifier, account.Network.Identifier)
|
||||
assert.Len(t, account.Peers, len(expected.Peers))
|
||||
assert.Len(t, account.Users, len(expected.Users))
|
||||
assert.Len(t, account.SetupKeys, len(expected.SetupKeys))
|
||||
assert.Len(t, account.Routes, len(expected.Routes))
|
||||
assert.Len(t, account.NameServerGroups, len(expected.NameServerGroups))
|
||||
}
|
||||
|
||||
func TestFileStore_GetTokenIDByHashedToken(t *testing.T) {
|
||||
storeDir := t.TempDir()
|
||||
storeFile := filepath.Join(storeDir, "store.json")
|
||||
err := util.CopyFileContents("testdata/store.json", storeFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
accounts := &accounts{}
|
||||
_, err = util.ReadJson(storeFile, accounts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store, err := NewFileStore(context.Background(), storeDir, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
hashedToken := accounts.Accounts["bf1c8084-ba50-4ce7-9439-34653001fc3b"].Users["f4f6d672-63fb-11ec-90d6-0242ac120003"].PATs["9dj38s35-63fb-11ec-90d6-0242ac120003"].HashedToken
|
||||
tokenID, err := store.GetTokenIDByHashedToken(context.Background(), hashedToken)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedTokenID := accounts.Accounts["bf1c8084-ba50-4ce7-9439-34653001fc3b"].Users["f4f6d672-63fb-11ec-90d6-0242ac120003"].PATs["9dj38s35-63fb-11ec-90d6-0242ac120003"].ID
|
||||
assert.Equal(t, expectedTokenID, tokenID)
|
||||
}
|
||||
|
||||
func TestFileStore_DeleteHashedPAT2TokenIDIndex(t *testing.T) {
|
||||
store := newStore(t)
|
||||
defer store.Close(context.Background())
|
||||
store.HashedPAT2TokenID["someHashedToken"] = "someTokenId"
|
||||
|
||||
err := store.DeleteHashedPAT2TokenIDIndex("someHashedToken")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.Empty(t, store.HashedPAT2TokenID["someHashedToken"])
|
||||
}
|
||||
|
||||
func TestFileStore_DeleteTokenID2UserIDIndex(t *testing.T) {
|
||||
store := newStore(t)
|
||||
store.TokenID2UserID["someTokenId"] = "someUserId"
|
||||
|
||||
err := store.DeleteTokenID2UserIDIndex("someTokenId")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.Empty(t, store.TokenID2UserID["someTokenId"])
|
||||
}
|
||||
|
||||
func TestFileStore_GetTokenIDByHashedToken_Failure(t *testing.T) {
|
||||
storeDir := t.TempDir()
|
||||
storeFile := filepath.Join(storeDir, "store.json")
|
||||
err := util.CopyFileContents("testdata/store.json", storeFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
accounts := &accounts{}
|
||||
_, err = util.ReadJson(storeFile, accounts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store, err := NewFileStore(context.Background(), storeDir, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
wrongToken := sha256.Sum256([]byte("someNotValidTokenThatFails1234"))
|
||||
_, err = store.GetTokenIDByHashedToken(context.Background(), string(wrongToken[:]))
|
||||
|
||||
assert.Error(t, err, "GetTokenIDByHashedToken should throw error if token invalid")
|
||||
}
|
||||
|
||||
func TestFileStore_GetUserByTokenID(t *testing.T) {
|
||||
storeDir := t.TempDir()
|
||||
storeFile := filepath.Join(storeDir, "store.json")
|
||||
err := util.CopyFileContents("testdata/store.json", storeFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
accounts := &accounts{}
|
||||
_, err = util.ReadJson(storeFile, accounts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store, err := NewFileStore(context.Background(), storeDir, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
tokenID := accounts.Accounts["bf1c8084-ba50-4ce7-9439-34653001fc3b"].Users["f4f6d672-63fb-11ec-90d6-0242ac120003"].PATs["9dj38s35-63fb-11ec-90d6-0242ac120003"].ID
|
||||
user, err := store.GetUserByTokenID(context.Background(), tokenID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.Equal(t, "f4f6d672-63fb-11ec-90d6-0242ac120003", user.Id)
|
||||
}
|
||||
|
||||
func TestFileStore_GetUserByTokenID_Failure(t *testing.T) {
|
||||
storeDir := t.TempDir()
|
||||
storeFile := filepath.Join(storeDir, "store.json")
|
||||
err := util.CopyFileContents("testdata/store.json", storeFile)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
accounts := &accounts{}
|
||||
_, err = util.ReadJson(storeFile, accounts)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store, err := NewFileStore(context.Background(), storeDir, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
wrongTokenID := "someNonExistingTokenID"
|
||||
_, err = store.GetUserByTokenID(context.Background(), wrongTokenID)
|
||||
|
||||
assert.Error(t, err, "GetUserByTokenID should throw error if tokenID invalid")
|
||||
}
|
||||
|
||||
func TestFileStore_SavePeerStatus(t *testing.T) {
|
||||
storeDir := t.TempDir()
|
||||
|
||||
err := util.CopyFileContents("testdata/store.json", filepath.Join(storeDir, "store.json"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store, err := NewFileStore(context.Background(), storeDir, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
account, err := store.getAccount("bf1c8084-ba50-4ce7-9439-34653001fc3b")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// save status of non-existing peer
|
||||
newStatus := nbpeer.PeerStatus{Connected: true, LastSeen: time.Now().UTC()}
|
||||
err = store.SavePeerStatus(account.Id, "non-existing-peer", newStatus)
|
||||
assert.Error(t, err)
|
||||
|
||||
// save new status of existing peer
|
||||
account.Peers["testpeer"] = &nbpeer.Peer{
|
||||
Key: "peerkey",
|
||||
ID: "testpeer",
|
||||
SetupKey: "peerkeysetupkey",
|
||||
IP: net.IP{127, 0, 0, 1},
|
||||
Meta: nbpeer.PeerSystemMeta{},
|
||||
Name: "peer name",
|
||||
Status: &nbpeer.PeerStatus{Connected: false, LastSeen: time.Now().UTC()},
|
||||
}
|
||||
|
||||
err = store.SaveAccount(context.Background(), account)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = store.SavePeerStatus(account.Id, "testpeer", newStatus)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
account, err = store.getAccount(account.Id)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
actual := account.Peers["testpeer"].Status
|
||||
assert.Equal(t, newStatus, *actual)
|
||||
}
|
||||
|
||||
func TestFileStore_SavePeerLocation(t *testing.T) {
|
||||
storeDir := t.TempDir()
|
||||
|
||||
err := util.CopyFileContents("testdata/store.json", filepath.Join(storeDir, "store.json"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
store, err := NewFileStore(context.Background(), storeDir, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
account, err := store.GetAccount(context.Background(), "bf1c8084-ba50-4ce7-9439-34653001fc3b")
|
||||
require.NoError(t, err)
|
||||
|
||||
peer := &nbpeer.Peer{
|
||||
AccountID: account.Id,
|
||||
ID: "testpeer",
|
||||
Location: nbpeer.Location{
|
||||
ConnectionIP: net.ParseIP("10.0.0.0"),
|
||||
CountryCode: "YY",
|
||||
CityName: "City",
|
||||
GeoNameID: 1,
|
||||
},
|
||||
Meta: nbpeer.PeerSystemMeta{},
|
||||
}
|
||||
// error is expected as peer is not in store yet
|
||||
err = store.SavePeerLocation(account.Id, peer)
|
||||
assert.Error(t, err)
|
||||
|
||||
account.Peers[peer.ID] = peer
|
||||
err = store.SaveAccount(context.Background(), account)
|
||||
require.NoError(t, err)
|
||||
|
||||
peer.Location.ConnectionIP = net.ParseIP("35.1.1.1")
|
||||
peer.Location.CountryCode = "DE"
|
||||
peer.Location.CityName = "Berlin"
|
||||
peer.Location.GeoNameID = 2950159
|
||||
|
||||
err = store.SavePeerLocation(account.Id, account.Peers[peer.ID])
|
||||
assert.NoError(t, err)
|
||||
|
||||
account, err = store.GetAccount(context.Background(), account.Id)
|
||||
require.NoError(t, err)
|
||||
|
||||
actual := account.Peers[peer.ID].Location
|
||||
assert.Equal(t, peer.Location, actual)
|
||||
}
|
||||
|
||||
func newStore(t *testing.T) *FileStore {
|
||||
t.Helper()
|
||||
store, err := NewFileStore(context.Background(), t.TempDir(), nil)
|
||||
if err != nil {
|
||||
t.Errorf("failed creating a new store")
|
||||
}
|
||||
|
||||
return store
|
||||
}
|
@ -6,7 +6,6 @@ import (
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@ -89,14 +88,7 @@ func getServerKey(client mgmtProto.ManagementServiceClient) (*wgtypes.Key, error
|
||||
|
||||
func Test_SyncProtocol(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
err := util.CopyFileContents("testdata/store_with_expired_peers.json", filepath.Join(dir, "store.json"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
os.Remove(filepath.Join(dir, "store.json")) //nolint
|
||||
}()
|
||||
mgmtServer, _, mgmtAddr, err := startManagementForTest(t, &Config{
|
||||
mgmtServer, _, mgmtAddr, cleanup, err := startManagementForTest(t, "testdata/store_with_expired_peers.sqlite", &Config{
|
||||
Stuns: []*Host{{
|
||||
Proto: "udp",
|
||||
URI: "stun:stun.wiretrustee.com:3468",
|
||||
@ -117,6 +109,7 @@ func Test_SyncProtocol(t *testing.T) {
|
||||
Datadir: dir,
|
||||
HttpConfig: nil,
|
||||
})
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
@ -412,18 +405,18 @@ func TestServer_GetDeviceAuthorizationFlow(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func startManagementForTest(t TestingT, config *Config) (*grpc.Server, *DefaultAccountManager, string, error) {
|
||||
func startManagementForTest(t *testing.T, testFile string, config *Config) (*grpc.Server, *DefaultAccountManager, string, func(), error) {
|
||||
t.Helper()
|
||||
lis, err := net.Listen("tcp", "localhost:0")
|
||||
if err != nil {
|
||||
return nil, nil, "", err
|
||||
return nil, nil, "", nil, err
|
||||
}
|
||||
s := grpc.NewServer(grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp))
|
||||
store, cleanUp, err := NewTestStoreFromJson(context.Background(), config.Datadir)
|
||||
|
||||
store, cleanup, err := NewSqliteTestStore(context.Background(), t.TempDir(), testFile)
|
||||
if err != nil {
|
||||
return nil, nil, "", err
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Cleanup(cleanUp)
|
||||
|
||||
peersUpdateManager := NewPeersUpdateManager(nil)
|
||||
eventStore := &activity.InMemoryEventStore{}
|
||||
@ -437,7 +430,8 @@ func startManagementForTest(t TestingT, config *Config) (*grpc.Server, *DefaultA
|
||||
eventStore, nil, false, MocIntegratedValidator{}, metrics)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, "", err
|
||||
cleanup()
|
||||
return nil, nil, "", cleanup, err
|
||||
}
|
||||
|
||||
secretsManager := NewTimeBasedAuthSecretsManager(peersUpdateManager, config.TURNConfig, config.Relay)
|
||||
@ -445,7 +439,7 @@ func startManagementForTest(t TestingT, config *Config) (*grpc.Server, *DefaultA
|
||||
ephemeralMgr := NewEphemeralManager(store, accountManager)
|
||||
mgmtServer, err := NewServer(context.Background(), config, accountManager, peersUpdateManager, secretsManager, nil, ephemeralMgr)
|
||||
if err != nil {
|
||||
return nil, nil, "", err
|
||||
return nil, nil, "", cleanup, err
|
||||
}
|
||||
mgmtProto.RegisterManagementServiceServer(s, mgmtServer)
|
||||
|
||||
@ -455,7 +449,7 @@ func startManagementForTest(t TestingT, config *Config) (*grpc.Server, *DefaultA
|
||||
}
|
||||
}()
|
||||
|
||||
return s, accountManager, lis.Addr().String(), nil
|
||||
return s, accountManager, lis.Addr().String(), cleanup, nil
|
||||
}
|
||||
|
||||
func createRawClient(addr string) (mgmtProto.ManagementServiceClient, *grpc.ClientConn, error) {
|
||||
@ -475,6 +469,7 @@ func createRawClient(addr string) (mgmtProto.ManagementServiceClient, *grpc.Clie
|
||||
|
||||
return mgmtProto.NewManagementServiceClient(conn), conn, nil
|
||||
}
|
||||
|
||||
func Test_SyncStatusRace(t *testing.T) {
|
||||
if os.Getenv("CI") == "true" && os.Getenv("NETBIRD_STORE_ENGINE") == "postgres" {
|
||||
t.Skip("Skipping on CI and Postgres store")
|
||||
@ -488,15 +483,8 @@ func Test_SyncStatusRace(t *testing.T) {
|
||||
func testSyncStatusRace(t *testing.T) {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
err := util.CopyFileContents("testdata/store_with_expired_peers.json", filepath.Join(dir, "store.json"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
os.Remove(filepath.Join(dir, "store.json")) //nolint
|
||||
}()
|
||||
|
||||
mgmtServer, am, mgmtAddr, err := startManagementForTest(t, &Config{
|
||||
mgmtServer, am, mgmtAddr, cleanup, err := startManagementForTest(t, "testdata/store_with_expired_peers.sqlite", &Config{
|
||||
Stuns: []*Host{{
|
||||
Proto: "udp",
|
||||
URI: "stun:stun.wiretrustee.com:3468",
|
||||
@ -517,6 +505,7 @@ func testSyncStatusRace(t *testing.T) {
|
||||
Datadir: dir,
|
||||
HttpConfig: nil,
|
||||
})
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
@ -665,15 +654,8 @@ func Test_LoginPerformance(t *testing.T) {
|
||||
t.Run(bc.name, func(t *testing.T) {
|
||||
t.Helper()
|
||||
dir := t.TempDir()
|
||||
err := util.CopyFileContents("testdata/store_with_expired_peers.json", filepath.Join(dir, "store.json"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
os.Remove(filepath.Join(dir, "store.json")) //nolint
|
||||
}()
|
||||
|
||||
mgmtServer, am, _, err := startManagementForTest(t, &Config{
|
||||
mgmtServer, am, _, cleanup, err := startManagementForTest(t, "testdata/store_with_expired_peers.sqlite", &Config{
|
||||
Stuns: []*Host{{
|
||||
Proto: "udp",
|
||||
URI: "stun:stun.wiretrustee.com:3468",
|
||||
@ -694,6 +676,7 @@ func Test_LoginPerformance(t *testing.T) {
|
||||
Datadir: dir,
|
||||
HttpConfig: nil,
|
||||
})
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
sync2 "sync"
|
||||
"time"
|
||||
@ -52,8 +51,6 @@ var _ = Describe("Management service", func() {
|
||||
dataDir, err = os.MkdirTemp("", "wiretrustee_mgmt_test_tmp_*")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = util.CopyFileContents("testdata/store.json", filepath.Join(dataDir, "store.json"))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
var listener net.Listener
|
||||
|
||||
config := &server.Config{}
|
||||
@ -61,7 +58,7 @@ var _ = Describe("Management service", func() {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
config.Datadir = dataDir
|
||||
|
||||
s, listener = startServer(config)
|
||||
s, listener = startServer(config, dataDir, "testdata/store.sqlite")
|
||||
addr = listener.Addr().String()
|
||||
client, conn = createRawClient(addr)
|
||||
|
||||
@ -530,12 +527,12 @@ func createRawClient(addr string) (mgmtProto.ManagementServiceClient, *grpc.Clie
|
||||
return mgmtProto.NewManagementServiceClient(conn), conn
|
||||
}
|
||||
|
||||
func startServer(config *server.Config) (*grpc.Server, net.Listener) {
|
||||
func startServer(config *server.Config, dataDir string, testFile string) (*grpc.Server, net.Listener) {
|
||||
lis, err := net.Listen("tcp", ":0")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
s := grpc.NewServer()
|
||||
|
||||
store, _, err := server.NewTestStoreFromJson(context.Background(), config.Datadir)
|
||||
store, _, err := server.NewTestStoreFromSqlite(context.Background(), testFile, dataDir)
|
||||
if err != nil {
|
||||
log.Fatalf("failed creating a store: %s: %v", config.Datadir, err)
|
||||
}
|
||||
|
@ -773,7 +773,7 @@ func createNSManager(t *testing.T) (*DefaultAccountManager, error) {
|
||||
func createNSStore(t *testing.T) (Store, error) {
|
||||
t.Helper()
|
||||
dataDir := t.TempDir()
|
||||
store, cleanUp, err := NewTestStoreFromJson(context.Background(), dataDir)
|
||||
store, cleanUp, err := NewTestStoreFromSqlite(context.Background(), "", dataDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1004,7 +1004,11 @@ func Test_RegisterPeerByUser(t *testing.T) {
|
||||
t.Skip("The SQLite store is not properly supported by Windows yet")
|
||||
}
|
||||
|
||||
store := newSqliteStoreFromFile(t, "testdata/extended-store.json")
|
||||
store, cleanup, err := NewSqliteTestStore(context.Background(), t.TempDir(), "testdata/extended-store.sqlite")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
eventStore := &activity.InMemoryEventStore{}
|
||||
|
||||
@ -1065,7 +1069,11 @@ func Test_RegisterPeerBySetupKey(t *testing.T) {
|
||||
t.Skip("The SQLite store is not properly supported by Windows yet")
|
||||
}
|
||||
|
||||
store := newSqliteStoreFromFile(t, "testdata/extended-store.json")
|
||||
store, cleanup, err := NewSqliteTestStore(context.Background(), t.TempDir(), "testdata/extended-store.sqlite")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
eventStore := &activity.InMemoryEventStore{}
|
||||
|
||||
@ -1127,7 +1135,11 @@ func Test_RegisterPeerRollbackOnFailure(t *testing.T) {
|
||||
t.Skip("The SQLite store is not properly supported by Windows yet")
|
||||
}
|
||||
|
||||
store := newSqliteStoreFromFile(t, "testdata/extended-store.json")
|
||||
store, cleanup, err := NewSqliteTestStore(context.Background(), t.TempDir(), "testdata/extended-store.sqlite")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
eventStore := &activity.InMemoryEventStore{}
|
||||
|
||||
|
@ -1257,7 +1257,7 @@ func createRouterManager(t *testing.T) (*DefaultAccountManager, error) {
|
||||
func createRouterStore(t *testing.T) (Store, error) {
|
||||
t.Helper()
|
||||
dataDir := t.TempDir()
|
||||
store, cleanUp, err := NewTestStoreFromJson(context.Background(), dataDir)
|
||||
store, cleanUp, err := NewTestStoreFromSqlite(context.Background(), "", dataDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -1737,7 +1737,7 @@ func TestAccount_getPeersRoutesFirewall(t *testing.T) {
|
||||
}
|
||||
assert.ElementsMatch(t, routesFirewallRules, expectedRoutesFirewallRules)
|
||||
|
||||
//peerD is also the routing peer for route1, should contain same routes firewall rules as peerA
|
||||
// peerD is also the routing peer for route1, should contain same routes firewall rules as peerA
|
||||
routesFirewallRules = account.getPeerRoutesFirewallRules(context.Background(), "peerD", validatedPeers)
|
||||
assert.Len(t, routesFirewallRules, 2)
|
||||
assert.ElementsMatch(t, routesFirewallRules, expectedRoutesFirewallRules)
|
||||
|
@ -915,6 +915,28 @@ func NewPostgresqlStoreFromFileStore(ctx context.Context, fileStore *FileStore,
|
||||
return store, nil
|
||||
}
|
||||
|
||||
// NewPostgresqlStoreFromSqlStore restores a store from SqlStore and stores Postgres DB.
|
||||
func NewPostgresqlStoreFromSqlStore(ctx context.Context, sqliteStore *SqlStore, dsn string, metrics telemetry.AppMetrics) (*SqlStore, error) {
|
||||
store, err := NewPostgresqlStore(ctx, dsn, metrics)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = store.SaveInstallationID(ctx, sqliteStore.GetInstallationID())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, account := range sqliteStore.GetAllAccounts(ctx) {
|
||||
err := store.SaveAccount(ctx, account)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return store, nil
|
||||
}
|
||||
|
||||
func (s *SqlStore) GetSetupKeyBySecret(ctx context.Context, lockStrength LockingStrength, key string) (*SetupKey, error) {
|
||||
var setupKey SetupKey
|
||||
result := s.db.WithContext(ctx).Clauses(clause.Locking{Strength: string(lockStrength)}).
|
||||
|
@ -7,7 +7,6 @@ import (
|
||||
"net"
|
||||
"net/netip"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
@ -25,7 +24,6 @@ import (
|
||||
"github.com/netbirdio/netbird/management/server/status"
|
||||
|
||||
nbpeer "github.com/netbirdio/netbird/management/server/peer"
|
||||
"github.com/netbirdio/netbird/util"
|
||||
)
|
||||
|
||||
func TestSqlite_NewStore(t *testing.T) {
|
||||
@ -347,7 +345,11 @@ func TestSqlite_GetAccount(t *testing.T) {
|
||||
t.Skip("The SQLite store is not properly supported by Windows yet")
|
||||
}
|
||||
|
||||
store := newSqliteStoreFromFile(t, "testdata/store.json")
|
||||
store, cleanup, err := NewSqliteTestStore(context.Background(), t.TempDir(), "testdata/store.sqlite")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
id := "bf1c8084-ba50-4ce7-9439-34653001fc3b"
|
||||
|
||||
@ -367,7 +369,11 @@ func TestSqlite_SavePeer(t *testing.T) {
|
||||
t.Skip("The SQLite store is not properly supported by Windows yet")
|
||||
}
|
||||
|
||||
store := newSqliteStoreFromFile(t, "testdata/store.json")
|
||||
store, cleanup, err := NewSqliteTestStore(context.Background(), t.TempDir(), "testdata/store.sqlite")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
account, err := store.GetAccount(context.Background(), "bf1c8084-ba50-4ce7-9439-34653001fc3b")
|
||||
require.NoError(t, err)
|
||||
@ -415,7 +421,11 @@ func TestSqlite_SavePeerStatus(t *testing.T) {
|
||||
t.Skip("The SQLite store is not properly supported by Windows yet")
|
||||
}
|
||||
|
||||
store := newSqliteStoreFromFile(t, "testdata/store.json")
|
||||
store, cleanup, err := NewSqliteTestStore(context.Background(), t.TempDir(), "testdata/store.sqlite")
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
account, err := store.GetAccount(context.Background(), "bf1c8084-ba50-4ce7-9439-34653001fc3b")
|
||||
require.NoError(t, err)
|
||||
@ -468,8 +478,11 @@ func TestSqlite_SavePeerLocation(t *testing.T) {
|
||||
t.Skip("The SQLite store is not properly supported by Windows yet")
|
||||
}
|
||||
|
||||
store := newSqliteStoreFromFile(t, "testdata/store.json")
|
||||
|
||||
store, cleanup, err := NewSqliteTestStore(context.Background(), t.TempDir(), "testdata/store.sqlite")
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
account, err := store.GetAccount(context.Background(), "bf1c8084-ba50-4ce7-9439-34653001fc3b")
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -519,8 +532,11 @@ func TestSqlite_TestGetAccountByPrivateDomain(t *testing.T) {
|
||||
t.Skip("The SQLite store is not properly supported by Windows yet")
|
||||
}
|
||||
|
||||
store := newSqliteStoreFromFile(t, "testdata/store.json")
|
||||
|
||||
store, cleanup, err := NewSqliteTestStore(context.Background(), t.TempDir(), "testdata/store.sqlite")
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
existingDomain := "test.com"
|
||||
|
||||
account, err := store.GetAccountByPrivateDomain(context.Background(), existingDomain)
|
||||
@ -539,8 +555,11 @@ func TestSqlite_GetTokenIDByHashedToken(t *testing.T) {
|
||||
t.Skip("The SQLite store is not properly supported by Windows yet")
|
||||
}
|
||||
|
||||
store := newSqliteStoreFromFile(t, "testdata/store.json")
|
||||
|
||||
store, cleanup, err := NewSqliteTestStore(context.Background(), t.TempDir(), "testdata/store.sqlite")
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
hashed := "SoMeHaShEdToKeN"
|
||||
id := "9dj38s35-63fb-11ec-90d6-0242ac120003"
|
||||
|
||||
@ -560,8 +579,11 @@ func TestSqlite_GetUserByTokenID(t *testing.T) {
|
||||
t.Skip("The SQLite store is not properly supported by Windows yet")
|
||||
}
|
||||
|
||||
store := newSqliteStoreFromFile(t, "testdata/store.json")
|
||||
|
||||
store, cleanup, err := NewSqliteTestStore(context.Background(), t.TempDir(), "testdata/store.sqlite")
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
id := "9dj38s35-63fb-11ec-90d6-0242ac120003"
|
||||
|
||||
user, err := store.GetUserByTokenID(context.Background(), id)
|
||||
@ -668,24 +690,9 @@ func newSqliteStore(t *testing.T) *SqlStore {
|
||||
t.Helper()
|
||||
|
||||
store, err := NewSqliteStore(context.Background(), t.TempDir(), nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, store)
|
||||
|
||||
return store
|
||||
}
|
||||
|
||||
func newSqliteStoreFromFile(t *testing.T, filename string) *SqlStore {
|
||||
t.Helper()
|
||||
|
||||
storeDir := t.TempDir()
|
||||
|
||||
err := util.CopyFileContents(filename, filepath.Join(storeDir, "store.json"))
|
||||
require.NoError(t, err)
|
||||
|
||||
fStore, err := NewFileStore(context.Background(), storeDir, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
store, err := NewSqliteStoreFromFileStore(context.Background(), fStore, storeDir, nil)
|
||||
t.Cleanup(func() {
|
||||
store.Close(context.Background())
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, store)
|
||||
|
||||
@ -733,32 +740,31 @@ func newPostgresqlStore(t *testing.T) *SqlStore {
|
||||
return store
|
||||
}
|
||||
|
||||
func newPostgresqlStoreFromFile(t *testing.T, filename string) *SqlStore {
|
||||
func newPostgresqlStoreFromSqlite(t *testing.T, filename string) *SqlStore {
|
||||
t.Helper()
|
||||
|
||||
storeDir := t.TempDir()
|
||||
err := util.CopyFileContents(filename, filepath.Join(storeDir, "store.json"))
|
||||
require.NoError(t, err)
|
||||
store, cleanUpQ, err := NewSqliteTestStore(context.Background(), t.TempDir(), filename)
|
||||
t.Cleanup(cleanUpQ)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
fStore, err := NewFileStore(context.Background(), storeDir, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
cleanUp, err := testutil.CreatePGDB()
|
||||
cleanUpP, err := testutil.CreatePGDB()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Cleanup(cleanUp)
|
||||
t.Cleanup(cleanUpP)
|
||||
|
||||
postgresDsn, ok := os.LookupEnv(postgresDsnEnv)
|
||||
if !ok {
|
||||
t.Fatalf("could not initialize postgresql store: %s is not set", postgresDsnEnv)
|
||||
}
|
||||
|
||||
store, err := NewPostgresqlStoreFromFileStore(context.Background(), fStore, postgresDsn, nil)
|
||||
pstore, err := NewPostgresqlStoreFromSqlStore(context.Background(), store, postgresDsn, nil)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, store)
|
||||
|
||||
return store
|
||||
return pstore
|
||||
}
|
||||
|
||||
func TestPostgresql_NewStore(t *testing.T) {
|
||||
@ -924,7 +930,7 @@ func TestPostgresql_SavePeerStatus(t *testing.T) {
|
||||
t.Skipf("The PostgreSQL store is not properly supported by %s yet", runtime.GOOS)
|
||||
}
|
||||
|
||||
store := newPostgresqlStoreFromFile(t, "testdata/store.json")
|
||||
store := newPostgresqlStoreFromSqlite(t, "testdata/store.sqlite")
|
||||
|
||||
account, err := store.GetAccount(context.Background(), "bf1c8084-ba50-4ce7-9439-34653001fc3b")
|
||||
require.NoError(t, err)
|
||||
@ -963,7 +969,7 @@ func TestPostgresql_TestGetAccountByPrivateDomain(t *testing.T) {
|
||||
t.Skipf("The PostgreSQL store is not properly supported by %s yet", runtime.GOOS)
|
||||
}
|
||||
|
||||
store := newPostgresqlStoreFromFile(t, "testdata/store.json")
|
||||
store := newPostgresqlStoreFromSqlite(t, "testdata/store.sqlite")
|
||||
|
||||
existingDomain := "test.com"
|
||||
|
||||
@ -980,7 +986,7 @@ func TestPostgresql_GetTokenIDByHashedToken(t *testing.T) {
|
||||
t.Skipf("The PostgreSQL store is not properly supported by %s yet", runtime.GOOS)
|
||||
}
|
||||
|
||||
store := newPostgresqlStoreFromFile(t, "testdata/store.json")
|
||||
store := newPostgresqlStoreFromSqlite(t, "testdata/store.sqlite")
|
||||
|
||||
hashed := "SoMeHaShEdToKeN"
|
||||
id := "9dj38s35-63fb-11ec-90d6-0242ac120003"
|
||||
@ -995,7 +1001,7 @@ func TestPostgresql_GetUserByTokenID(t *testing.T) {
|
||||
t.Skipf("The PostgreSQL store is not properly supported by %s yet", runtime.GOOS)
|
||||
}
|
||||
|
||||
store := newPostgresqlStoreFromFile(t, "testdata/store.json")
|
||||
store := newPostgresqlStoreFromSqlite(t, "testdata/store.sqlite")
|
||||
|
||||
id := "9dj38s35-63fb-11ec-90d6-0242ac120003"
|
||||
|
||||
@ -1009,12 +1015,15 @@ func TestSqlite_GetTakenIPs(t *testing.T) {
|
||||
t.Skip("The SQLite store is not properly supported by Windows yet")
|
||||
}
|
||||
|
||||
store := newSqliteStoreFromFile(t, "testdata/extended-store.json")
|
||||
defer store.Close(context.Background())
|
||||
store, cleanup, err := NewSqliteTestStore(context.Background(), t.TempDir(), "testdata/extended-store.sqlite")
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
existingAccountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b"
|
||||
|
||||
_, err := store.GetAccount(context.Background(), existingAccountID)
|
||||
_, err = store.GetAccount(context.Background(), existingAccountID)
|
||||
require.NoError(t, err)
|
||||
|
||||
takenIPs, err := store.GetTakenIPs(context.Background(), LockingStrengthShare, existingAccountID)
|
||||
@ -1054,12 +1063,15 @@ func TestSqlite_GetPeerLabelsInAccount(t *testing.T) {
|
||||
t.Skip("The SQLite store is not properly supported by Windows yet")
|
||||
}
|
||||
|
||||
store := newSqliteStoreFromFile(t, "testdata/extended-store.json")
|
||||
defer store.Close(context.Background())
|
||||
store, cleanup, err := NewSqliteTestStore(context.Background(), t.TempDir(), "testdata/extended-store.sqlite")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
existingAccountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b"
|
||||
|
||||
_, err := store.GetAccount(context.Background(), existingAccountID)
|
||||
_, err = store.GetAccount(context.Background(), existingAccountID)
|
||||
require.NoError(t, err)
|
||||
|
||||
labels, err := store.GetPeerLabelsInAccount(context.Background(), LockingStrengthShare, existingAccountID)
|
||||
@ -1096,12 +1108,15 @@ func TestSqlite_GetAccountNetwork(t *testing.T) {
|
||||
t.Skip("The SQLite store is not properly supported by Windows yet")
|
||||
}
|
||||
|
||||
store := newSqliteStoreFromFile(t, "testdata/extended-store.json")
|
||||
defer store.Close(context.Background())
|
||||
store, cleanup, err := NewSqliteTestStore(context.Background(), t.TempDir(), "testdata/extended-store.sqlite")
|
||||
t.Cleanup(cleanup)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
existingAccountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b"
|
||||
|
||||
_, err := store.GetAccount(context.Background(), existingAccountID)
|
||||
_, err = store.GetAccount(context.Background(), existingAccountID)
|
||||
require.NoError(t, err)
|
||||
|
||||
network, err := store.GetAccountNetwork(context.Background(), LockingStrengthShare, existingAccountID)
|
||||
@ -1118,12 +1133,15 @@ func TestSqlite_GetSetupKeyBySecret(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("The SQLite store is not properly supported by Windows yet")
|
||||
}
|
||||
store := newSqliteStoreFromFile(t, "testdata/extended-store.json")
|
||||
defer store.Close(context.Background())
|
||||
store, cleanup, err := NewSqliteTestStore(context.Background(), t.TempDir(), "testdata/extended-store.sqlite")
|
||||
t.Cleanup(cleanup)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
existingAccountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b"
|
||||
|
||||
_, err := store.GetAccount(context.Background(), existingAccountID)
|
||||
_, err = store.GetAccount(context.Background(), existingAccountID)
|
||||
require.NoError(t, err)
|
||||
|
||||
setupKey, err := store.GetSetupKeyBySecret(context.Background(), LockingStrengthShare, "A2C8E62B-38F5-4553-B31E-DD66C696CEBB")
|
||||
@ -1137,12 +1155,16 @@ func TestSqlite_incrementSetupKeyUsage(t *testing.T) {
|
||||
if runtime.GOOS == "windows" {
|
||||
t.Skip("The SQLite store is not properly supported by Windows yet")
|
||||
}
|
||||
store := newSqliteStoreFromFile(t, "testdata/extended-store.json")
|
||||
defer store.Close(context.Background())
|
||||
|
||||
store, cleanup, err := NewSqliteTestStore(context.Background(), t.TempDir(), "testdata/extended-store.sqlite")
|
||||
t.Cleanup(cleanup)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
existingAccountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b"
|
||||
|
||||
_, err := store.GetAccount(context.Background(), existingAccountID)
|
||||
_, err = store.GetAccount(context.Background(), existingAccountID)
|
||||
require.NoError(t, err)
|
||||
|
||||
setupKey, err := store.GetSetupKeyBySecret(context.Background(), LockingStrengthShare, "A2C8E62B-38F5-4553-B31E-DD66C696CEBB")
|
||||
|
@ -12,10 +12,11 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/netbirdio/netbird/dns"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"github.com/netbirdio/netbird/dns"
|
||||
|
||||
nbgroup "github.com/netbirdio/netbird/management/server/group"
|
||||
|
||||
"github.com/netbirdio/netbird/management/server/telemetry"
|
||||
@ -236,23 +237,29 @@ func getMigrations(ctx context.Context) []migrationFunc {
|
||||
}
|
||||
}
|
||||
|
||||
// NewTestStoreFromJson is only used in tests
|
||||
func NewTestStoreFromJson(ctx context.Context, dataDir string) (Store, func(), error) {
|
||||
fstore, err := NewFileStore(ctx, dataDir, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// NewTestStoreFromSqlite is only used in tests
|
||||
func NewTestStoreFromSqlite(ctx context.Context, filename string, dataDir string) (Store, func(), error) {
|
||||
// if store engine is not set in the config we first try to evaluate NETBIRD_STORE_ENGINE
|
||||
kind := getStoreEngineFromEnv()
|
||||
if kind == "" {
|
||||
kind = SqliteStoreEngine
|
||||
}
|
||||
|
||||
var (
|
||||
store Store
|
||||
cleanUp func()
|
||||
)
|
||||
var store *SqlStore
|
||||
var err error
|
||||
var cleanUp func()
|
||||
|
||||
if filename == "" {
|
||||
store, err = NewSqliteStore(ctx, dataDir, nil)
|
||||
cleanUp = func() {
|
||||
store.Close(ctx)
|
||||
}
|
||||
} else {
|
||||
store, cleanUp, err = NewSqliteTestStore(ctx, dataDir, filename)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if kind == PostgresStoreEngine {
|
||||
cleanUp, err = testutil.CreatePGDB()
|
||||
@ -265,21 +272,32 @@ func NewTestStoreFromJson(ctx context.Context, dataDir string) (Store, func(), e
|
||||
return nil, nil, fmt.Errorf("%s is not set", postgresDsnEnv)
|
||||
}
|
||||
|
||||
store, err = NewPostgresqlStoreFromFileStore(ctx, fstore, dsn, nil)
|
||||
store, err = NewPostgresqlStoreFromSqlStore(ctx, store, dsn, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
} else {
|
||||
store, err = NewSqliteStoreFromFileStore(ctx, fstore, dataDir, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cleanUp = func() { store.Close(ctx) }
|
||||
}
|
||||
|
||||
return store, cleanUp, nil
|
||||
}
|
||||
|
||||
func NewSqliteTestStore(ctx context.Context, dataDir string, testFile string) (*SqlStore, func(), error) {
|
||||
err := util.CopyFileContents(testFile, filepath.Join(dataDir, "store.db"))
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
store, err := NewSqliteStore(ctx, dataDir, nil)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return store, func() {
|
||||
store.Close(ctx)
|
||||
os.Remove(filepath.Join(dataDir, "store.db"))
|
||||
}, nil
|
||||
}
|
||||
|
||||
// MigrateFileStoreToSqlite migrates the file store to the SQLite store.
|
||||
func MigrateFileStoreToSqlite(ctx context.Context, dataDir string) error {
|
||||
fileStorePath := path.Join(dataDir, storeFileName)
|
||||
|
@ -14,12 +14,6 @@ type benchCase struct {
|
||||
size int
|
||||
}
|
||||
|
||||
var newFs = func(b *testing.B) Store {
|
||||
b.Helper()
|
||||
store, _ := NewFileStore(context.Background(), b.TempDir(), nil)
|
||||
return store
|
||||
}
|
||||
|
||||
var newSqlite = func(b *testing.B) Store {
|
||||
b.Helper()
|
||||
store, _ := NewSqliteStore(context.Background(), b.TempDir(), nil)
|
||||
@ -28,13 +22,9 @@ var newSqlite = func(b *testing.B) Store {
|
||||
|
||||
func BenchmarkTest_StoreWrite(b *testing.B) {
|
||||
cases := []benchCase{
|
||||
{name: "FileStore_Write", storeFn: newFs, size: 100},
|
||||
{name: "SqliteStore_Write", storeFn: newSqlite, size: 100},
|
||||
{name: "FileStore_Write", storeFn: newFs, size: 500},
|
||||
{name: "SqliteStore_Write", storeFn: newSqlite, size: 500},
|
||||
{name: "FileStore_Write", storeFn: newFs, size: 1000},
|
||||
{name: "SqliteStore_Write", storeFn: newSqlite, size: 1000},
|
||||
{name: "FileStore_Write", storeFn: newFs, size: 2000},
|
||||
{name: "SqliteStore_Write", storeFn: newSqlite, size: 2000},
|
||||
}
|
||||
|
||||
@ -61,11 +51,8 @@ func BenchmarkTest_StoreWrite(b *testing.B) {
|
||||
|
||||
func BenchmarkTest_StoreRead(b *testing.B) {
|
||||
cases := []benchCase{
|
||||
{name: "FileStore_Read", storeFn: newFs, size: 100},
|
||||
{name: "SqliteStore_Read", storeFn: newSqlite, size: 100},
|
||||
{name: "FileStore_Read", storeFn: newFs, size: 500},
|
||||
{name: "SqliteStore_Read", storeFn: newSqlite, size: 500},
|
||||
{name: "FileStore_Read", storeFn: newFs, size: 1000},
|
||||
{name: "SqliteStore_Read", storeFn: newSqlite, size: 1000},
|
||||
}
|
||||
|
||||
@ -89,3 +76,11 @@ func BenchmarkTest_StoreRead(b *testing.B) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newStore(t *testing.T) Store {
|
||||
t.Helper()
|
||||
|
||||
store := newSqliteStore(t)
|
||||
|
||||
return store
|
||||
}
|
||||
|
120
management/server/testdata/extended-store.json
vendored
120
management/server/testdata/extended-store.json
vendored
@ -1,120 +0,0 @@
|
||||
{
|
||||
"Accounts": {
|
||||
"bf1c8084-ba50-4ce7-9439-34653001fc3b": {
|
||||
"Id": "bf1c8084-ba50-4ce7-9439-34653001fc3b",
|
||||
"CreatedBy": "",
|
||||
"Domain": "test.com",
|
||||
"DomainCategory": "private",
|
||||
"IsDomainPrimaryAccount": true,
|
||||
"SetupKeys": {
|
||||
"A2C8E62B-38F5-4553-B31E-DD66C696CEBB": {
|
||||
"Id": "A2C8E62B-38F5-4553-B31E-DD66C696CEBB",
|
||||
"AccountID": "",
|
||||
"Key": "A2C8E62B-38F5-4553-B31E-DD66C696CEBB",
|
||||
"Name": "Default key",
|
||||
"Type": "reusable",
|
||||
"CreatedAt": "2021-08-19T20:46:20.005936822+02:00",
|
||||
"ExpiresAt": "2321-09-18T20:46:20.005936822+02:00",
|
||||
"UpdatedAt": "0001-01-01T00:00:00Z",
|
||||
"Revoked": false,
|
||||
"UsedTimes": 0,
|
||||
"LastUsed": "0001-01-01T00:00:00Z",
|
||||
"AutoGroups": ["cfefqs706sqkneg59g2g"],
|
||||
"UsageLimit": 0,
|
||||
"Ephemeral": false
|
||||
},
|
||||
"A2C8E62B-38F5-4553-B31E-DD66C696CEBC": {
|
||||
"Id": "A2C8E62B-38F5-4553-B31E-DD66C696CEBC",
|
||||
"AccountID": "",
|
||||
"Key": "A2C8E62B-38F5-4553-B31E-DD66C696CEBC",
|
||||
"Name": "Faulty key with non existing group",
|
||||
"Type": "reusable",
|
||||
"CreatedAt": "2021-08-19T20:46:20.005936822+02:00",
|
||||
"ExpiresAt": "2321-09-18T20:46:20.005936822+02:00",
|
||||
"UpdatedAt": "0001-01-01T00:00:00Z",
|
||||
"Revoked": false,
|
||||
"UsedTimes": 0,
|
||||
"LastUsed": "0001-01-01T00:00:00Z",
|
||||
"AutoGroups": ["abcd"],
|
||||
"UsageLimit": 0,
|
||||
"Ephemeral": false
|
||||
}
|
||||
},
|
||||
"Network": {
|
||||
"id": "af1c8024-ha40-4ce2-9418-34653101fc3c",
|
||||
"Net": {
|
||||
"IP": "100.64.0.0",
|
||||
"Mask": "//8AAA=="
|
||||
},
|
||||
"Dns": "",
|
||||
"Serial": 0
|
||||
},
|
||||
"Peers": {},
|
||||
"Users": {
|
||||
"edafee4e-63fb-11ec-90d6-0242ac120003": {
|
||||
"Id": "edafee4e-63fb-11ec-90d6-0242ac120003",
|
||||
"AccountID": "",
|
||||
"Role": "admin",
|
||||
"IsServiceUser": false,
|
||||
"ServiceUserName": "",
|
||||
"AutoGroups": ["cfefqs706sqkneg59g3g"],
|
||||
"PATs": {},
|
||||
"Blocked": false,
|
||||
"LastLogin": "0001-01-01T00:00:00Z"
|
||||
},
|
||||
"f4f6d672-63fb-11ec-90d6-0242ac120003": {
|
||||
"Id": "f4f6d672-63fb-11ec-90d6-0242ac120003",
|
||||
"AccountID": "",
|
||||
"Role": "user",
|
||||
"IsServiceUser": false,
|
||||
"ServiceUserName": "",
|
||||
"AutoGroups": null,
|
||||
"PATs": {
|
||||
"9dj38s35-63fb-11ec-90d6-0242ac120003": {
|
||||
"ID": "9dj38s35-63fb-11ec-90d6-0242ac120003",
|
||||
"UserID": "",
|
||||
"Name": "",
|
||||
"HashedToken": "SoMeHaShEdToKeN",
|
||||
"ExpirationDate": "2023-02-27T00:00:00Z",
|
||||
"CreatedBy": "user",
|
||||
"CreatedAt": "2023-01-01T00:00:00Z",
|
||||
"LastUsed": "2023-02-01T00:00:00Z"
|
||||
}
|
||||
},
|
||||
"Blocked": false,
|
||||
"LastLogin": "0001-01-01T00:00:00Z"
|
||||
}
|
||||
},
|
||||
"Groups": {
|
||||
"cfefqs706sqkneg59g4g": {
|
||||
"ID": "cfefqs706sqkneg59g4g",
|
||||
"Name": "All",
|
||||
"Peers": []
|
||||
},
|
||||
"cfefqs706sqkneg59g3g": {
|
||||
"ID": "cfefqs706sqkneg59g3g",
|
||||
"Name": "AwesomeGroup1",
|
||||
"Peers": []
|
||||
},
|
||||
"cfefqs706sqkneg59g2g": {
|
||||
"ID": "cfefqs706sqkneg59g2g",
|
||||
"Name": "AwesomeGroup2",
|
||||
"Peers": []
|
||||
}
|
||||
},
|
||||
"Rules": null,
|
||||
"Policies": [],
|
||||
"Routes": null,
|
||||
"NameServerGroups": null,
|
||||
"DNSSettings": null,
|
||||
"Settings": {
|
||||
"PeerLoginExpirationEnabled": false,
|
||||
"PeerLoginExpiration": 86400000000000,
|
||||
"GroupsPropagationEnabled": false,
|
||||
"JWTGroupsEnabled": false,
|
||||
"JWTGroupsClaimName": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"InstallationID": ""
|
||||
}
|
BIN
management/server/testdata/extended-store.sqlite
vendored
Normal file
BIN
management/server/testdata/extended-store.sqlite
vendored
Normal file
Binary file not shown.
88
management/server/testdata/store.json
vendored
88
management/server/testdata/store.json
vendored
@ -1,88 +0,0 @@
|
||||
{
|
||||
"Accounts": {
|
||||
"bf1c8084-ba50-4ce7-9439-34653001fc3b": {
|
||||
"Id": "bf1c8084-ba50-4ce7-9439-34653001fc3b",
|
||||
"CreatedBy": "",
|
||||
"Domain": "test.com",
|
||||
"DomainCategory": "private",
|
||||
"IsDomainPrimaryAccount": true,
|
||||
"SetupKeys": {
|
||||
"A2C8E62B-38F5-4553-B31E-DD66C696CEBB": {
|
||||
"Id": "",
|
||||
"AccountID": "",
|
||||
"Key": "A2C8E62B-38F5-4553-B31E-DD66C696CEBB",
|
||||
"Name": "Default key",
|
||||
"Type": "reusable",
|
||||
"CreatedAt": "2021-08-19T20:46:20.005936822+02:00",
|
||||
"ExpiresAt": "2321-09-18T20:46:20.005936822+02:00",
|
||||
"UpdatedAt": "0001-01-01T00:00:00Z",
|
||||
"Revoked": false,
|
||||
"UsedTimes": 0,
|
||||
"LastUsed": "0001-01-01T00:00:00Z",
|
||||
"AutoGroups": null,
|
||||
"UsageLimit": 0,
|
||||
"Ephemeral": false
|
||||
}
|
||||
},
|
||||
"Network": {
|
||||
"id": "af1c8024-ha40-4ce2-9418-34653101fc3c",
|
||||
"Net": {
|
||||
"IP": "100.64.0.0",
|
||||
"Mask": "//8AAA=="
|
||||
},
|
||||
"Dns": "",
|
||||
"Serial": 0
|
||||
},
|
||||
"Peers": {},
|
||||
"Users": {
|
||||
"edafee4e-63fb-11ec-90d6-0242ac120003": {
|
||||
"Id": "edafee4e-63fb-11ec-90d6-0242ac120003",
|
||||
"AccountID": "",
|
||||
"Role": "admin",
|
||||
"IsServiceUser": false,
|
||||
"ServiceUserName": "",
|
||||
"AutoGroups": null,
|
||||
"PATs": {},
|
||||
"Blocked": false,
|
||||
"LastLogin": "0001-01-01T00:00:00Z"
|
||||
},
|
||||
"f4f6d672-63fb-11ec-90d6-0242ac120003": {
|
||||
"Id": "f4f6d672-63fb-11ec-90d6-0242ac120003",
|
||||
"AccountID": "",
|
||||
"Role": "user",
|
||||
"IsServiceUser": false,
|
||||
"ServiceUserName": "",
|
||||
"AutoGroups": null,
|
||||
"PATs": {
|
||||
"9dj38s35-63fb-11ec-90d6-0242ac120003": {
|
||||
"ID": "9dj38s35-63fb-11ec-90d6-0242ac120003",
|
||||
"UserID": "",
|
||||
"Name": "",
|
||||
"HashedToken": "SoMeHaShEdToKeN",
|
||||
"ExpirationDate": "2023-02-27T00:00:00Z",
|
||||
"CreatedBy": "user",
|
||||
"CreatedAt": "2023-01-01T00:00:00Z",
|
||||
"LastUsed": "2023-02-01T00:00:00Z"
|
||||
}
|
||||
},
|
||||
"Blocked": false,
|
||||
"LastLogin": "0001-01-01T00:00:00Z"
|
||||
}
|
||||
},
|
||||
"Groups": null,
|
||||
"Rules": null,
|
||||
"Policies": [],
|
||||
"Routes": null,
|
||||
"NameServerGroups": null,
|
||||
"DNSSettings": null,
|
||||
"Settings": {
|
||||
"PeerLoginExpirationEnabled": false,
|
||||
"PeerLoginExpiration": 86400000000000,
|
||||
"GroupsPropagationEnabled": false,
|
||||
"JWTGroupsEnabled": false,
|
||||
"JWTGroupsClaimName": ""
|
||||
}
|
||||
}
|
||||
},
|
||||
"InstallationID": ""
|
||||
}
|
BIN
management/server/testdata/store.sqlite
vendored
Normal file
BIN
management/server/testdata/store.sqlite
vendored
Normal file
Binary file not shown.
116
management/server/testdata/store_policy_migrate.json
vendored
116
management/server/testdata/store_policy_migrate.json
vendored
@ -1,116 +0,0 @@
|
||||
{
|
||||
"Accounts": {
|
||||
"bf1c8084-ba50-4ce7-9439-34653001fc3b": {
|
||||
"Id": "bf1c8084-ba50-4ce7-9439-34653001fc3b",
|
||||
"Domain": "test.com",
|
||||
"DomainCategory": "private",
|
||||
"IsDomainPrimaryAccount": true,
|
||||
"SetupKeys": {
|
||||
"A2C8E62B-38F5-4553-B31E-DD66C696CEBB": {
|
||||
"Key": "A2C8E62B-38F5-4553-B31E-DD66C696CEBB",
|
||||
"Name": "Default key",
|
||||
"Type": "reusable",
|
||||
"CreatedAt": "2021-08-19T20:46:20.005936822+02:00",
|
||||
"ExpiresAt": "2321-09-18T20:46:20.005936822+02:00",
|
||||
"Revoked": false,
|
||||
"UsedTimes": 0
|
||||
}
|
||||
},
|
||||
"Network": {
|
||||
"Id": "af1c8024-ha40-4ce2-9418-34653101fc3c",
|
||||
"Net": {
|
||||
"IP": "100.64.0.0",
|
||||
"Mask": "//8AAA=="
|
||||
},
|
||||
"Dns": null
|
||||
},
|
||||
"Peers": {
|
||||
"cfefqs706sqkneg59g4g": {
|
||||
"ID": "cfefqs706sqkneg59g4g",
|
||||
"Key": "MI5mHfJhbggPfD3FqEIsXm8X5bSWeUI2LhO9MpEEtWA=",
|
||||
"SetupKey": "",
|
||||
"IP": "100.103.179.238",
|
||||
"Meta": {
|
||||
"Hostname": "Ubuntu-2204-jammy-amd64-base",
|
||||
"GoOS": "linux",
|
||||
"Kernel": "Linux",
|
||||
"Core": "22.04",
|
||||
"Platform": "x86_64",
|
||||
"OS": "Ubuntu",
|
||||
"WtVersion": "development",
|
||||
"UIVersion": ""
|
||||
},
|
||||
"Name": "crocodile",
|
||||
"DNSLabel": "crocodile",
|
||||
"Status": {
|
||||
"LastSeen": "2023-02-13T12:37:12.635454796Z",
|
||||
"Connected": true
|
||||
},
|
||||
"UserID": "edafee4e-63fb-11ec-90d6-0242ac120003",
|
||||
"SSHKey": "AAAAC3NzaC1lZDI1NTE5AAAAIJN1NM4bpB9K",
|
||||
"SSHEnabled": false
|
||||
},
|
||||
"cfeg6sf06sqkneg59g50": {
|
||||
"ID": "cfeg6sf06sqkneg59g50",
|
||||
"Key": "zMAOKUeIYIuun4n0xPR1b3IdYZPmsyjYmB2jWCuloC4=",
|
||||
"SetupKey": "",
|
||||
"IP": "100.103.26.180",
|
||||
"Meta": {
|
||||
"Hostname": "borg",
|
||||
"GoOS": "linux",
|
||||
"Kernel": "Linux",
|
||||
"Core": "22.04",
|
||||
"Platform": "x86_64",
|
||||
"OS": "Ubuntu",
|
||||
"WtVersion": "development",
|
||||
"UIVersion": ""
|
||||
},
|
||||
"Name": "dingo",
|
||||
"DNSLabel": "dingo",
|
||||
"Status": {
|
||||
"LastSeen": "2023-02-21T09:37:42.565899199Z",
|
||||
"Connected": false
|
||||
},
|
||||
"UserID": "f4f6d672-63fb-11ec-90d6-0242ac120003",
|
||||
"SSHKey": "AAAAC3NzaC1lZDI1NTE5AAAAILHW",
|
||||
"SSHEnabled": true
|
||||
}
|
||||
},
|
||||
"Groups": {
|
||||
"cfefqs706sqkneg59g3g": {
|
||||
"ID": "cfefqs706sqkneg59g3g",
|
||||
"Name": "All",
|
||||
"Peers": [
|
||||
"cfefqs706sqkneg59g4g",
|
||||
"cfeg6sf06sqkneg59g50"
|
||||
]
|
||||
}
|
||||
},
|
||||
"Rules": {
|
||||
"cfefqs706sqkneg59g40": {
|
||||
"ID": "cfefqs706sqkneg59g40",
|
||||
"Name": "Default",
|
||||
"Description": "This is a default rule that allows connections between all the resources",
|
||||
"Disabled": false,
|
||||
"Source": [
|
||||
"cfefqs706sqkneg59g3g"
|
||||
],
|
||||
"Destination": [
|
||||
"cfefqs706sqkneg59g3g"
|
||||
],
|
||||
"Flow": 0
|
||||
}
|
||||
},
|
||||
"Users": {
|
||||
"edafee4e-63fb-11ec-90d6-0242ac120003": {
|
||||
"Id": "edafee4e-63fb-11ec-90d6-0242ac120003",
|
||||
"Role": "admin"
|
||||
},
|
||||
"f4f6d672-63fb-11ec-90d6-0242ac120003": {
|
||||
"Id": "f4f6d672-63fb-11ec-90d6-0242ac120003",
|
||||
"Role": "user"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
BIN
management/server/testdata/store_policy_migrate.sqlite
vendored
Normal file
BIN
management/server/testdata/store_policy_migrate.sqlite
vendored
Normal file
Binary file not shown.
@ -1,130 +0,0 @@
|
||||
{
|
||||
"Accounts": {
|
||||
"bf1c8084-ba50-4ce7-9439-34653001fc3b": {
|
||||
"Id": "bf1c8084-ba50-4ce7-9439-34653001fc3b",
|
||||
"Domain": "test.com",
|
||||
"DomainCategory": "private",
|
||||
"IsDomainPrimaryAccount": true,
|
||||
"Settings": {
|
||||
"PeerLoginExpirationEnabled": true,
|
||||
"PeerLoginExpiration": 3600000000000
|
||||
},
|
||||
"SetupKeys": {
|
||||
"A2C8E62B-38F5-4553-B31E-DD66C696CEBB": {
|
||||
"Key": "A2C8E62B-38F5-4553-B31E-DD66C696CEBB",
|
||||
"Name": "Default key",
|
||||
"Type": "reusable",
|
||||
"CreatedAt": "2021-08-19T20:46:20.005936822+02:00",
|
||||
"ExpiresAt": "2321-09-18T20:46:20.005936822+02:00",
|
||||
"Revoked": false,
|
||||
"UsedTimes": 0
|
||||
|
||||
}
|
||||
},
|
||||
"Network": {
|
||||
"Id": "af1c8024-ha40-4ce2-9418-34653101fc3c",
|
||||
"Net": {
|
||||
"IP": "100.64.0.0",
|
||||
"Mask": "//8AAA=="
|
||||
},
|
||||
"Dns": null
|
||||
},
|
||||
"Peers": {
|
||||
"cfvprsrlo1hqoo49ohog": {
|
||||
"ID": "cfvprsrlo1hqoo49ohog",
|
||||
"Key": "5rvhvriKJZ3S9oxYToVj5TzDM9u9y8cxg7htIMWlYAg=",
|
||||
"SetupKey": "72546A29-6BC8-4311-BCFC-9CDBF33F1A48",
|
||||
"IP": "100.64.114.31",
|
||||
"Meta": {
|
||||
"Hostname": "f2a34f6a4731",
|
||||
"GoOS": "linux",
|
||||
"Kernel": "Linux",
|
||||
"Core": "11",
|
||||
"Platform": "unknown",
|
||||
"OS": "Debian GNU/Linux",
|
||||
"WtVersion": "0.12.0",
|
||||
"UIVersion": ""
|
||||
},
|
||||
"Name": "f2a34f6a4731",
|
||||
"DNSLabel": "f2a34f6a4731",
|
||||
"Status": {
|
||||
"LastSeen": "2023-03-02T09:21:02.189035775+01:00",
|
||||
"Connected": false,
|
||||
"LoginExpired": false
|
||||
},
|
||||
"UserID": "",
|
||||
"SSHKey": "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILzUUSYG/LGnV8zarb2SGN+tib/PZ+M7cL4WtTzUrTpk",
|
||||
"SSHEnabled": false,
|
||||
"LoginExpirationEnabled": true,
|
||||
"LastLogin": "2023-03-01T19:48:19.817799698+01:00"
|
||||
},
|
||||
"cg05lnblo1hkg2j514p0": {
|
||||
"ID": "cg05lnblo1hkg2j514p0",
|
||||
"Key": "RlSy2vzoG2HyMBTUImXOiVhCBiiBa5qD5xzMxkiFDW4=",
|
||||
"SetupKey": "",
|
||||
"IP": "100.64.39.54",
|
||||
"Meta": {
|
||||
"Hostname": "expiredhost",
|
||||
"GoOS": "linux",
|
||||
"Kernel": "Linux",
|
||||
"Core": "22.04",
|
||||
"Platform": "x86_64",
|
||||
"OS": "Ubuntu",
|
||||
"WtVersion": "development",
|
||||
"UIVersion": ""
|
||||
},
|
||||
"Name": "expiredhost",
|
||||
"DNSLabel": "expiredhost",
|
||||
"Status": {
|
||||
"LastSeen": "2023-03-02T09:19:57.276717255+01:00",
|
||||
"Connected": false,
|
||||
"LoginExpired": true
|
||||
},
|
||||
"UserID": "edafee4e-63fb-11ec-90d6-0242ac120003",
|
||||
"SSHKey": "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMbK5ZXJsGOOWoBT4OmkPtgdPZe2Q7bDuS/zjn2CZxhK",
|
||||
"SSHEnabled": false,
|
||||
"LoginExpirationEnabled": true,
|
||||
"LastLogin": "2023-03-02T09:14:21.791679181+01:00"
|
||||
},
|
||||
"cg3161rlo1hs9cq94gdg": {
|
||||
"ID": "cg3161rlo1hs9cq94gdg",
|
||||
"Key": "mVABSKj28gv+JRsf7e0NEGKgSOGTfU/nPB2cpuG56HU=",
|
||||
"SetupKey": "",
|
||||
"IP": "100.64.117.96",
|
||||
"Meta": {
|
||||
"Hostname": "testhost",
|
||||
"GoOS": "linux",
|
||||
"Kernel": "Linux",
|
||||
"Core": "22.04",
|
||||
"Platform": "x86_64",
|
||||
"OS": "Ubuntu",
|
||||
"WtVersion": "development",
|
||||
"UIVersion": ""
|
||||
},
|
||||
"Name": "testhost",
|
||||
"DNSLabel": "testhost",
|
||||
"Status": {
|
||||
"LastSeen": "2023-03-06T18:21:27.252010027+01:00",
|
||||
"Connected": false,
|
||||
"LoginExpired": false
|
||||
},
|
||||
"UserID": "edafee4e-63fb-11ec-90d6-0242ac120003",
|
||||
"SSHKey": "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAINWvvUkFFcrj48CWTkNUb/do/n52i1L5dH4DhGu+4ZuM",
|
||||
"SSHEnabled": false,
|
||||
"LoginExpirationEnabled": false,
|
||||
"LastLogin": "2023-03-07T09:02:47.442857106+01:00"
|
||||
}
|
||||
},
|
||||
"Users": {
|
||||
"edafee4e-63fb-11ec-90d6-0242ac120003": {
|
||||
"Id": "edafee4e-63fb-11ec-90d6-0242ac120003",
|
||||
"Role": "admin"
|
||||
},
|
||||
"f4f6d672-63fb-11ec-90d6-0242ac120003": {
|
||||
"Id": "f4f6d672-63fb-11ec-90d6-0242ac120003",
|
||||
"Role": "user"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
BIN
management/server/testdata/store_with_expired_peers.sqlite
vendored
Normal file
BIN
management/server/testdata/store_with_expired_peers.sqlite
vendored
Normal file
Binary file not shown.
154
management/server/testdata/storev1.json
vendored
154
management/server/testdata/storev1.json
vendored
@ -1,154 +0,0 @@
|
||||
{
|
||||
"Accounts": {
|
||||
"auth0|61bf82ddeab084006aa1bccd": {
|
||||
"Id": "auth0|61bf82ddeab084006aa1bccd",
|
||||
"SetupKeys": {
|
||||
"1B2B50B0-B3E8-4B0C-A426-525EDB8481BD": {
|
||||
"Id": "831727121",
|
||||
"Key": "1B2B50B0-B3E8-4B0C-A426-525EDB8481BD",
|
||||
"Name": "One-off key",
|
||||
"Type": "one-off",
|
||||
"CreatedAt": "2021-12-24T16:09:45.926075752+01:00",
|
||||
"ExpiresAt": "2022-01-23T16:09:45.926075752+01:00",
|
||||
"Revoked": false,
|
||||
"UsedTimes": 1,
|
||||
"LastUsed": "2021-12-24T16:12:45.763424077+01:00"
|
||||
},
|
||||
"EB51E9EB-A11F-4F6E-8E49-C982891B405A": {
|
||||
"Id": "1769568301",
|
||||
"Key": "EB51E9EB-A11F-4F6E-8E49-C982891B405A",
|
||||
"Name": "Default key",
|
||||
"Type": "reusable",
|
||||
"CreatedAt": "2021-12-24T16:09:45.926073628+01:00",
|
||||
"ExpiresAt": "2022-01-23T16:09:45.926073628+01:00",
|
||||
"Revoked": false,
|
||||
"UsedTimes": 1,
|
||||
"LastUsed": "2021-12-24T16:13:06.236748538+01:00"
|
||||
}
|
||||
},
|
||||
"Network": {
|
||||
"Id": "a443c07a-5765-4a78-97fc-390d9c1d0e49",
|
||||
"Net": {
|
||||
"IP": "100.64.0.0",
|
||||
"Mask": "/8AAAA=="
|
||||
},
|
||||
"Dns": ""
|
||||
},
|
||||
"Peers": {
|
||||
"oMNaI8qWi0CyclSuwGR++SurxJyM3pQEiPEHwX8IREo=": {
|
||||
"Key": "oMNaI8qWi0CyclSuwGR++SurxJyM3pQEiPEHwX8IREo=",
|
||||
"SetupKey": "EB51E9EB-A11F-4F6E-8E49-C982891B405A",
|
||||
"IP": "100.64.0.2",
|
||||
"Meta": {
|
||||
"Hostname": "braginini",
|
||||
"GoOS": "linux",
|
||||
"Kernel": "Linux",
|
||||
"Core": "21.04",
|
||||
"Platform": "x86_64",
|
||||
"OS": "Ubuntu",
|
||||
"WtVersion": ""
|
||||
},
|
||||
"Name": "braginini",
|
||||
"Status": {
|
||||
"LastSeen": "2021-12-24T16:13:11.244342541+01:00",
|
||||
"Connected": false
|
||||
}
|
||||
},
|
||||
"xlx9/9D8+ibnRiIIB8nHGMxGOzxV17r8ShPHgi4aYSM=": {
|
||||
"Key": "xlx9/9D8+ibnRiIIB8nHGMxGOzxV17r8ShPHgi4aYSM=",
|
||||
"SetupKey": "1B2B50B0-B3E8-4B0C-A426-525EDB8481BD",
|
||||
"IP": "100.64.0.1",
|
||||
"Meta": {
|
||||
"Hostname": "braginini",
|
||||
"GoOS": "linux",
|
||||
"Kernel": "Linux",
|
||||
"Core": "21.04",
|
||||
"Platform": "x86_64",
|
||||
"OS": "Ubuntu",
|
||||
"WtVersion": ""
|
||||
},
|
||||
"Name": "braginini",
|
||||
"Status": {
|
||||
"LastSeen": "2021-12-24T16:12:49.089339333+01:00",
|
||||
"Connected": false
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"google-oauth2|103201118415301331038": {
|
||||
"Id": "google-oauth2|103201118415301331038",
|
||||
"SetupKeys": {
|
||||
"5AFB60DB-61F2-4251-8E11-494847EE88E9": {
|
||||
"Id": "2485964613",
|
||||
"Key": "5AFB60DB-61F2-4251-8E11-494847EE88E9",
|
||||
"Name": "Default key",
|
||||
"Type": "reusable",
|
||||
"CreatedAt": "2021-12-24T16:10:02.238476+01:00",
|
||||
"ExpiresAt": "2022-01-23T16:10:02.238476+01:00",
|
||||
"Revoked": false,
|
||||
"UsedTimes": 1,
|
||||
"LastUsed": "2021-12-24T16:12:05.994307717+01:00"
|
||||
},
|
||||
"A72E4DC2-00DE-4542-8A24-62945438104E": {
|
||||
"Id": "3504804807",
|
||||
"Key": "A72E4DC2-00DE-4542-8A24-62945438104E",
|
||||
"Name": "One-off key",
|
||||
"Type": "one-off",
|
||||
"CreatedAt": "2021-12-24T16:10:02.238478209+01:00",
|
||||
"ExpiresAt": "2022-01-23T16:10:02.238478209+01:00",
|
||||
"Revoked": false,
|
||||
"UsedTimes": 1,
|
||||
"LastUsed": "2021-12-24T16:11:27.015741738+01:00"
|
||||
}
|
||||
},
|
||||
"Network": {
|
||||
"Id": "b6d0b152-364e-40c1-a8a1-fa7bcac2267f",
|
||||
"Net": {
|
||||
"IP": "100.64.0.0",
|
||||
"Mask": "/8AAAA=="
|
||||
},
|
||||
"Dns": ""
|
||||
},
|
||||
"Peers": {
|
||||
"6kjbmVq1hmucVzvBXo5OucY5OYv+jSsB1jUTLq291Dw=": {
|
||||
"Key": "6kjbmVq1hmucVzvBXo5OucY5OYv+jSsB1jUTLq291Dw=",
|
||||
"SetupKey": "5AFB60DB-61F2-4251-8E11-494847EE88E9",
|
||||
"IP": "100.64.0.2",
|
||||
"Meta": {
|
||||
"Hostname": "braginini",
|
||||
"GoOS": "linux",
|
||||
"Kernel": "Linux",
|
||||
"Core": "21.04",
|
||||
"Platform": "x86_64",
|
||||
"OS": "Ubuntu",
|
||||
"WtVersion": ""
|
||||
},
|
||||
"Name": "braginini",
|
||||
"Status": {
|
||||
"LastSeen": "2021-12-24T16:12:05.994305438+01:00",
|
||||
"Connected": false
|
||||
}
|
||||
},
|
||||
"Ok+5QMdt/UjoktNOvicGYj+IX2g98p+0N2PJ3vJ45RI=": {
|
||||
"Key": "Ok+5QMdt/UjoktNOvicGYj+IX2g98p+0N2PJ3vJ45RI=",
|
||||
"SetupKey": "A72E4DC2-00DE-4542-8A24-62945438104E",
|
||||
"IP": "100.64.0.1",
|
||||
"Meta": {
|
||||
"Hostname": "braginini",
|
||||
"GoOS": "linux",
|
||||
"Kernel": "Linux",
|
||||
"Core": "21.04",
|
||||
"Platform": "x86_64",
|
||||
"OS": "Ubuntu",
|
||||
"WtVersion": ""
|
||||
},
|
||||
"Name": "braginini",
|
||||
"Status": {
|
||||
"LastSeen": "2021-12-24T16:11:27.015739803+01:00",
|
||||
"Connected": false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
BIN
management/server/testdata/storev1.sqlite
vendored
Normal file
BIN
management/server/testdata/storev1.sqlite
vendored
Normal file
Binary file not shown.
@ -59,8 +59,10 @@ func TestUser_CreatePAT_ForSameUser(t *testing.T) {
|
||||
|
||||
assert.Equal(t, pat.CreatedBy, mockUserID)
|
||||
|
||||
fileStore := am.Store.(*FileStore)
|
||||
tokenID := fileStore.HashedPAT2TokenID[pat.HashedToken]
|
||||
tokenID, err := am.Store.GetTokenIDByHashedToken(context.Background(), pat.HashedToken)
|
||||
if err != nil {
|
||||
t.Fatalf("Error when getting token ID by hashed token: %s", err)
|
||||
}
|
||||
|
||||
if tokenID == "" {
|
||||
t.Fatal("GetTokenIDByHashedToken failed after adding PAT")
|
||||
@ -68,11 +70,12 @@ func TestUser_CreatePAT_ForSameUser(t *testing.T) {
|
||||
|
||||
assert.Equal(t, pat.ID, tokenID)
|
||||
|
||||
userID := fileStore.TokenID2UserID[tokenID]
|
||||
if userID == "" {
|
||||
t.Fatal("GetUserByTokenId failed after adding PAT")
|
||||
user, err := am.Store.GetUserByTokenID(context.Background(), tokenID)
|
||||
if err != nil {
|
||||
t.Fatalf("Error when getting user by token ID: %s", err)
|
||||
}
|
||||
assert.Equal(t, mockUserID, userID)
|
||||
|
||||
assert.Equal(t, mockUserID, user.Id)
|
||||
}
|
||||
|
||||
func TestUser_CreatePAT_ForDifferentUser(t *testing.T) {
|
||||
@ -189,9 +192,12 @@ func TestUser_DeletePAT(t *testing.T) {
|
||||
t.Fatalf("Error when adding PAT to user: %s", err)
|
||||
}
|
||||
|
||||
assert.Nil(t, store.Accounts[mockAccountID].Users[mockUserID].PATs[mockTokenID1])
|
||||
assert.Empty(t, store.HashedPAT2TokenID[mockToken1])
|
||||
assert.Empty(t, store.TokenID2UserID[mockTokenID1])
|
||||
account, err = store.GetAccount(context.Background(), mockAccountID)
|
||||
if err != nil {
|
||||
t.Fatalf("Error when getting account: %s", err)
|
||||
}
|
||||
|
||||
assert.Nil(t, account.Users[mockUserID].PATs[mockTokenID1])
|
||||
}
|
||||
|
||||
func TestUser_GetPAT(t *testing.T) {
|
||||
@ -350,13 +356,16 @@ func TestUser_CreateServiceUser(t *testing.T) {
|
||||
t.Fatalf("Error when creating service user: %s", err)
|
||||
}
|
||||
|
||||
assert.Equal(t, 2, len(store.Accounts[mockAccountID].Users))
|
||||
assert.NotNil(t, store.Accounts[mockAccountID].Users[user.ID])
|
||||
assert.True(t, store.Accounts[mockAccountID].Users[user.ID].IsServiceUser)
|
||||
assert.Equal(t, mockServiceUserName, store.Accounts[mockAccountID].Users[user.ID].ServiceUserName)
|
||||
assert.Equal(t, UserRole(mockRole), store.Accounts[mockAccountID].Users[user.ID].Role)
|
||||
assert.Equal(t, []string{"group1", "group2"}, store.Accounts[mockAccountID].Users[user.ID].AutoGroups)
|
||||
assert.Equal(t, map[string]*PersonalAccessToken{}, store.Accounts[mockAccountID].Users[user.ID].PATs)
|
||||
account, err = store.GetAccount(context.Background(), mockAccountID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 2, len(account.Users))
|
||||
assert.NotNil(t, account.Users[user.ID])
|
||||
assert.True(t, account.Users[user.ID].IsServiceUser)
|
||||
assert.Equal(t, mockServiceUserName, account.Users[user.ID].ServiceUserName)
|
||||
assert.Equal(t, UserRole(mockRole), account.Users[user.ID].Role)
|
||||
assert.Equal(t, []string{"group1", "group2"}, account.Users[user.ID].AutoGroups)
|
||||
assert.Equal(t, map[string]*PersonalAccessToken{}, account.Users[user.ID].PATs)
|
||||
|
||||
assert.Zero(t, user.Email)
|
||||
assert.True(t, user.IsServiceUser)
|
||||
@ -394,12 +403,15 @@ func TestUser_CreateUser_ServiceUser(t *testing.T) {
|
||||
t.Fatalf("Error when creating user: %s", err)
|
||||
}
|
||||
|
||||
account, err = store.GetAccount(context.Background(), mockAccountID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.True(t, user.IsServiceUser)
|
||||
assert.Equal(t, 2, len(store.Accounts[mockAccountID].Users))
|
||||
assert.True(t, store.Accounts[mockAccountID].Users[user.ID].IsServiceUser)
|
||||
assert.Equal(t, mockServiceUserName, store.Accounts[mockAccountID].Users[user.ID].ServiceUserName)
|
||||
assert.Equal(t, UserRole(mockRole), store.Accounts[mockAccountID].Users[user.ID].Role)
|
||||
assert.Equal(t, []string{"group1", "group2"}, store.Accounts[mockAccountID].Users[user.ID].AutoGroups)
|
||||
assert.Equal(t, 2, len(account.Users))
|
||||
assert.True(t, account.Users[user.ID].IsServiceUser)
|
||||
assert.Equal(t, mockServiceUserName, account.Users[user.ID].ServiceUserName)
|
||||
assert.Equal(t, UserRole(mockRole), account.Users[user.ID].Role)
|
||||
assert.Equal(t, []string{"group1", "group2"}, account.Users[user.ID].AutoGroups)
|
||||
|
||||
assert.Equal(t, mockServiceUserName, user.Name)
|
||||
assert.Equal(t, mockRole, user.Role)
|
||||
@ -550,12 +562,15 @@ func TestUser_DeleteUser_ServiceUser(t *testing.T) {
|
||||
err = am.DeleteUser(context.Background(), mockAccountID, mockUserID, mockServiceUserID)
|
||||
tt.assertErrFunc(t, err, tt.assertErrMessage)
|
||||
|
||||
account, err2 := store.GetAccount(context.Background(), mockAccountID)
|
||||
assert.NoError(t, err2)
|
||||
|
||||
if err != nil {
|
||||
assert.Equal(t, 2, len(store.Accounts[mockAccountID].Users))
|
||||
assert.NotNil(t, store.Accounts[mockAccountID].Users[mockServiceUserID])
|
||||
assert.Equal(t, 2, len(account.Users))
|
||||
assert.NotNil(t, account.Users[mockServiceUserID])
|
||||
} else {
|
||||
assert.Equal(t, 1, len(store.Accounts[mockAccountID].Users))
|
||||
assert.Nil(t, store.Accounts[mockAccountID].Users[mockServiceUserID])
|
||||
assert.Equal(t, 1, len(account.Users))
|
||||
assert.Nil(t, account.Users[mockServiceUserID])
|
||||
}
|
||||
})
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user