2021-07-30 17:46:38 +02:00
package server
2021-07-18 20:51:09 +02:00
import (
2024-07-03 11:33:02 +02:00
"context"
2021-07-18 20:51:09 +02:00
"os"
"path/filepath"
"strings"
"sync"
2022-11-07 17:52:23 +01:00
"time"
2021-07-18 20:51:09 +02:00
2023-03-13 15:14:18 +01:00
"github.com/rs/xid"
log "github.com/sirupsen/logrus"
2024-03-27 18:48:48 +01:00
nbgroup "github.com/netbirdio/netbird/management/server/group"
2023-11-28 13:45:26 +01:00
nbpeer "github.com/netbirdio/netbird/management/server/peer"
2024-05-30 15:22:42 +02:00
"github.com/netbirdio/netbird/management/server/posture"
2023-03-16 15:57:44 +01:00
"github.com/netbirdio/netbird/management/server/status"
2023-05-19 11:42:25 +02:00
"github.com/netbirdio/netbird/management/server/telemetry"
2023-03-16 15:57:44 +01:00
2022-03-26 12:08:54 +01:00
"github.com/netbirdio/netbird/util"
2021-07-18 20:51:09 +02:00
)
// storeFileName Store file name. Stored in the datadir
const storeFileName = "store.json"
2021-07-18 21:00:32 +02:00
// FileStore represents an account storage backed by a file persisted to disk
2021-07-18 20:51:09 +02:00
type FileStore struct {
2022-03-01 15:22:18 +01:00
Accounts map [ string ] * Account
2022-11-07 12:10:56 +01:00
SetupKeyID2AccountID map [ string ] string ` json:"-" `
PeerKeyID2AccountID map [ string ] string ` json:"-" `
2023-02-03 10:33:28 +01:00
PeerID2AccountID map [ string ] string ` json:"-" `
2022-11-07 12:10:56 +01:00
UserID2AccountID map [ string ] string ` json:"-" `
PrivateDomain2AccountID map [ string ] string ` json:"-" `
2023-03-16 15:57:44 +01:00
HashedPAT2TokenID map [ string ] string ` json:"-" `
TokenID2UserID map [ string ] string ` json:"-" `
2022-10-16 13:33:46 +02:00
InstallationID string
2021-07-18 20:51:09 +02:00
// mutex to synchronise Store read/write operations
mux sync . Mutex ` json:"-" `
storeFile string ` json:"-" `
2022-11-07 17:52:23 +01:00
2024-07-31 14:53:32 +02:00
// sync.Mutex indexed by resource ID
resourceLocks sync . Map ` json:"-" `
2022-11-07 17:52:23 +01:00
globalAccountLock sync . Mutex ` json:"-" `
2023-05-19 11:42:25 +02:00
metrics telemetry . AppMetrics ` json:"-" `
2021-07-18 20:51:09 +02:00
}
2022-05-03 16:02:51 +02:00
type StoredAccount struct { }
2021-07-30 17:46:38 +02:00
2022-11-08 10:46:12 +01:00
// NewFileStore restores a store from the file located in the datadir
2024-07-03 11:33:02 +02:00
func NewFileStore ( ctx context . Context , dataDir string , metrics telemetry . AppMetrics ) ( * FileStore , error ) {
fs , err := restore ( ctx , filepath . Join ( dataDir , storeFileName ) )
2023-05-19 11:42:25 +02:00
if err != nil {
return nil , err
}
fs . metrics = metrics
return fs , nil
2021-07-18 20:51:09 +02:00
}
2023-10-12 15:42:36 +02:00
// NewFilestoreFromSqliteStore restores a store from Sqlite and stores to Filestore json in the file located in datadir
2024-07-03 11:33:02 +02:00
func NewFilestoreFromSqliteStore ( ctx context . Context , sqlStore * SqlStore , dataDir string , metrics telemetry . AppMetrics ) ( * FileStore , error ) {
store , err := NewFileStore ( ctx , dataDir , metrics )
2023-10-12 15:42:36 +02:00
if err != nil {
return nil , err
}
2024-07-03 11:33:02 +02:00
err = store . SaveInstallationID ( ctx , sqlStore . GetInstallationID ( ) )
2023-10-12 15:42:36 +02:00
if err != nil {
return nil , err
}
2024-07-03 11:33:02 +02:00
for _ , account := range sqlStore . GetAllAccounts ( ctx ) {
2023-10-12 15:42:36 +02:00
store . Accounts [ account . Id ] = account
}
2024-07-03 11:33:02 +02:00
return store , store . persist ( ctx , store . storeFile )
2023-10-12 15:42:36 +02:00
}
2022-11-07 12:10:56 +01:00
// restore the state of the store from the file.
2021-07-18 20:51:09 +02:00
// Creates a new empty store file if doesn't exist
2024-07-03 11:33:02 +02:00
func restore ( ctx context . Context , file string ) ( * FileStore , error ) {
2021-07-18 20:51:09 +02:00
if _ , err := os . Stat ( file ) ; os . IsNotExist ( err ) {
// create a new FileStore if previously didn't exist (e.g. first run)
s := & FileStore {
2022-03-01 15:22:18 +01:00
Accounts : make ( map [ string ] * Account ) ,
mux : sync . Mutex { } ,
2022-11-07 17:52:23 +01:00
globalAccountLock : sync . Mutex { } ,
2022-11-07 12:10:56 +01:00
SetupKeyID2AccountID : make ( map [ string ] string ) ,
PeerKeyID2AccountID : make ( map [ string ] string ) ,
UserID2AccountID : make ( map [ string ] string ) ,
PrivateDomain2AccountID : make ( map [ string ] string ) ,
2023-02-03 10:33:28 +01:00
PeerID2AccountID : make ( map [ string ] string ) ,
2023-03-16 15:57:44 +01:00
HashedPAT2TokenID : make ( map [ string ] string ) ,
TokenID2UserID : make ( map [ string ] string ) ,
2022-03-01 15:22:18 +01:00
storeFile : file ,
2021-07-18 20:51:09 +02:00
}
2024-07-03 11:33:02 +02:00
err = s . persist ( ctx , file )
2021-07-18 20:51:09 +02:00
if err != nil {
return nil , err
}
return s , nil
}
read , err := util . ReadJson ( file , & FileStore { } )
if err != nil {
return nil , err
}
store := read . ( * FileStore )
store . storeFile = file
2022-11-07 12:10:56 +01:00
store . SetupKeyID2AccountID = make ( map [ string ] string )
store . PeerKeyID2AccountID = make ( map [ string ] string )
store . UserID2AccountID = make ( map [ string ] string )
store . PrivateDomain2AccountID = make ( map [ string ] string )
2023-02-03 10:33:28 +01:00
store . PeerID2AccountID = make ( map [ string ] string )
2023-03-16 15:57:44 +01:00
store . HashedPAT2TokenID = make ( map [ string ] string )
store . TokenID2UserID = make ( map [ string ] string )
2022-11-07 12:10:56 +01:00
for accountID , account := range store . Accounts {
2023-02-13 15:07:15 +01:00
if account . Settings == nil {
account . Settings = & Settings {
PeerLoginExpirationEnabled : false ,
PeerLoginExpiration : DefaultPeerLoginExpiration ,
}
2023-02-13 12:21:02 +01:00
}
2021-07-18 20:51:09 +02:00
for setupKeyId := range account . SetupKeys {
2022-11-07 12:10:56 +01:00
store . SetupKeyID2AccountID [ strings . ToUpper ( setupKeyId ) ] = accountID
2022-05-21 15:21:39 +02:00
}
2022-11-07 12:10:56 +01:00
2021-07-30 17:46:38 +02:00
for _ , peer := range account . Peers {
2022-11-07 12:10:56 +01:00
store . PeerKeyID2AccountID [ peer . Key ] = accountID
2023-02-03 10:33:28 +01:00
store . PeerID2AccountID [ peer . ID ] = accountID
2021-07-22 10:28:00 +02:00
}
2021-12-27 13:17:15 +01:00
for _ , user := range account . Users {
2022-11-07 12:10:56 +01:00
store . UserID2AccountID [ user . Id ] = accountID
2023-11-01 11:04:17 +01:00
if user . Issued == "" {
user . Issued = UserIssuedAPI
account . Users [ user . Id ] = user
}
2023-03-16 15:57:44 +01:00
for _ , pat := range user . PATs {
store . TokenID2UserID [ pat . ID ] = user . Id
2023-03-29 15:21:53 +02:00
store . HashedPAT2TokenID [ pat . HashedToken ] = pat . ID
2023-03-16 15:57:44 +01:00
}
2022-08-18 18:22:15 +02:00
}
2022-11-07 12:10:56 +01:00
2022-05-21 15:21:39 +02:00
if account . Domain != "" && account . DomainCategory == PrivateCategory &&
account . IsDomainPrimaryAccount {
2022-11-07 12:10:56 +01:00
store . PrivateDomain2AccountID [ account . Domain ] = accountID
2022-03-01 15:22:18 +01:00
}
2022-11-08 10:31:34 +01:00
2023-05-29 16:00:18 +02:00
// TODO: delete this block after migration
2023-04-01 12:02:08 +02:00
policies := make ( map [ string ] int , len ( account . Policies ) )
for i , policy := range account . Policies {
policies [ policy . ID ] = i
2023-05-29 16:00:18 +02:00
policy . UpgradeAndFix ( )
2023-04-01 12:02:08 +02:00
}
if account . Policies == nil {
2023-03-13 15:14:18 +01:00
account . Policies = make ( [ ] * Policy , 0 )
2023-04-01 12:02:08 +02:00
}
2023-03-13 15:14:18 +01:00
2022-11-08 10:31:34 +01:00
// for data migration. Can be removed once most base will be with labels
existingLabels := account . getPeerDNSLabels ( )
if len ( existingLabels ) != len ( account . Peers ) {
2024-07-03 11:33:02 +02:00
addPeerLabelsToAccount ( ctx , account , existingLabels )
2022-11-08 10:31:34 +01:00
}
2022-12-06 10:11:57 +01:00
2023-06-27 16:51:05 +02:00
// TODO: delete this block after migration
// Set API as issuer for groups which has not this field
for _ , group := range account . Groups {
if group . Issued == "" {
2024-03-27 18:48:48 +01:00
group . Issued = nbgroup . GroupIssuedAPI
2023-06-27 16:51:05 +02:00
}
}
2022-12-06 10:11:57 +01:00
allGroup , err := account . GetGroupAll ( )
if err != nil {
2024-07-03 11:33:02 +02:00
log . WithContext ( ctx ) . Errorf ( "unable to find the All group, this should happen only when migrate from a version that didn't support groups. Error: %v" , err )
2022-12-06 10:11:57 +01:00
// if the All group didn't exist we probably don't have routes to update
continue
}
for _ , route := range account . Routes {
if len ( route . Groups ) == 0 {
route . Groups = [ ] string { allGroup . ID }
}
}
2023-02-03 10:33:28 +01:00
// migration to Peer.ID from Peer.Key.
// Old peers that require migration have an empty Peer.ID in the store.json.
// Generate new ID with xid for these peers.
// Set the Peer.ID to the newly generated value.
// Replace all the mentions of Peer.Key as ID (groups and routes).
// Swap Peer.Key with Peer.ID in the Account.Peers map.
2023-11-28 13:45:26 +01:00
migrationPeers := make ( map [ string ] * nbpeer . Peer ) // key to Peer
2023-02-03 10:33:28 +01:00
for key , peer := range account . Peers {
2023-03-09 11:24:42 +01:00
// set LastLogin for the peers that were onboarded before the peer login expiration feature
if peer . LastLogin . IsZero ( ) {
2023-04-03 15:09:35 +02:00
peer . LastLogin = time . Now ( ) . UTC ( )
2023-03-09 11:24:42 +01:00
}
2023-02-03 10:33:28 +01:00
if peer . ID != "" {
continue
}
id := xid . New ( ) . String ( )
peer . ID = id
migrationPeers [ key ] = peer
}
if len ( migrationPeers ) > 0 {
// swap Peer.Key with Peer.ID in the Account.Peers map.
for key , peer := range migrationPeers {
delete ( account . Peers , key )
account . Peers [ peer . ID ] = peer
store . PeerID2AccountID [ peer . ID ] = accountID
}
// detect groups that have Peer.Key as a reference and replace it with ID.
for _ , group := range account . Groups {
for i , peer := range group . Peers {
if p , ok := migrationPeers [ peer ] ; ok {
group . Peers [ i ] = p . ID
}
}
}
2023-03-13 15:14:18 +01:00
2023-02-03 10:33:28 +01:00
// detect routes that have Peer.Key as a reference and replace it with ID.
for _ , route := range account . Routes {
if peer , ok := migrationPeers [ route . Peer ] ; ok {
route . Peer = peer . ID
}
}
}
2021-07-22 10:28:00 +02:00
}
2021-07-18 20:51:09 +02:00
2022-11-07 12:10:56 +01:00
// we need this persist to apply changes we made to account.Peers (we set them to Disconnected)
2024-07-03 11:33:02 +02:00
err = store . persist ( ctx , store . storeFile )
2021-08-23 21:43:05 +02:00
if err != nil {
return nil , err
}
2022-11-07 12:10:56 +01:00
return store , nil
2021-08-23 21:43:05 +02:00
}
2022-11-07 12:10:56 +01:00
// persist account data to a file
// It is recommended to call it with locking FileStore.mux
2024-07-03 11:33:02 +02:00
func ( s * FileStore ) persist ( ctx context . Context , file string ) error {
2023-05-19 11:42:25 +02:00
start := time . Now ( )
err := util . WriteJson ( file , s )
if err != nil {
return err
}
took := time . Since ( start )
if s . metrics != nil {
s . metrics . StoreMetrics ( ) . CountPersistenceDuration ( took )
}
2024-07-03 11:33:02 +02:00
log . WithContext ( ctx ) . Debugf ( "took %d ms to persist the FileStore" , took . Milliseconds ( ) )
2023-05-19 11:42:25 +02:00
return nil
2021-07-18 20:51:09 +02:00
}
2022-11-07 17:52:23 +01:00
// AcquireGlobalLock acquires global lock across all the accounts and returns a function that releases the lock
2024-07-03 11:33:02 +02:00
func ( s * FileStore ) AcquireGlobalLock ( ctx context . Context ) ( unlock func ( ) ) {
log . WithContext ( ctx ) . Debugf ( "acquiring global lock" )
2023-04-10 10:54:23 +02:00
start := time . Now ( )
2022-11-07 17:52:23 +01:00
s . globalAccountLock . Lock ( )
unlock = func ( ) {
s . globalAccountLock . Unlock ( )
2024-07-03 11:33:02 +02:00
log . WithContext ( ctx ) . Debugf ( "released global lock in %v" , time . Since ( start ) )
2022-11-07 17:52:23 +01:00
}
2023-05-19 11:42:25 +02:00
took := time . Since ( start )
2024-07-03 11:33:02 +02:00
log . WithContext ( ctx ) . Debugf ( "took %v to acquire global lock" , took )
2023-05-19 11:42:25 +02:00
if s . metrics != nil {
s . metrics . StoreMetrics ( ) . CountGlobalLockAcquisitionDuration ( took )
}
2022-11-07 17:52:23 +01:00
return unlock
}
2024-07-31 14:53:32 +02:00
// AcquireWriteLockByUID acquires an ID lock for writing to a resource and returns a function that releases the lock
func ( s * FileStore ) AcquireWriteLockByUID ( ctx context . Context , uniqueID string ) ( unlock func ( ) ) {
log . WithContext ( ctx ) . Debugf ( "acquiring lock for ID %s" , uniqueID )
2023-04-10 10:54:23 +02:00
start := time . Now ( )
2024-07-31 14:53:32 +02:00
value , _ := s . resourceLocks . LoadOrStore ( uniqueID , & sync . Mutex { } )
2022-11-07 17:52:23 +01:00
mtx := value . ( * sync . Mutex )
mtx . Lock ( )
unlock = func ( ) {
mtx . Unlock ( )
2024-07-31 14:53:32 +02:00
log . WithContext ( ctx ) . Debugf ( "released lock for ID %s in %v" , uniqueID , time . Since ( start ) )
2022-11-07 17:52:23 +01:00
}
return unlock
}
2024-07-31 14:53:32 +02:00
// AcquireReadLockByUID acquires an ID lock for reading a resource and returns a function that releases the lock
2024-05-07 14:30:03 +02:00
// This method is still returns a write lock as file store can't handle read locks
2024-07-31 14:53:32 +02:00
func ( s * FileStore ) AcquireReadLockByUID ( ctx context . Context , uniqueID string ) ( unlock func ( ) ) {
return s . AcquireWriteLockByUID ( ctx , uniqueID )
2024-05-07 14:30:03 +02:00
}
2024-07-03 11:33:02 +02:00
func ( s * FileStore ) SaveAccount ( ctx context . Context , account * Account ) error {
2021-07-18 20:51:09 +02:00
s . mux . Lock ( )
defer s . mux . Unlock ( )
2023-07-22 13:54:08 +02:00
if account . Id == "" {
return status . Errorf ( status . InvalidArgument , "account id should not be empty" )
}
2022-11-07 12:10:56 +01:00
accountCopy := account . Copy ( )
s . Accounts [ accountCopy . Id ] = accountCopy
2021-07-18 20:51:09 +02:00
// todo check that account.Id and keyId are not exist already
// because if keyId exists for other accounts this can be bad
2022-11-07 12:10:56 +01:00
for keyID := range accountCopy . SetupKeys {
s . SetupKeyID2AccountID [ strings . ToUpper ( keyID ) ] = accountCopy . Id
2021-07-18 20:51:09 +02:00
}
2022-08-18 18:22:15 +02:00
// enforce peer to account index and delete peer to route indexes for rebuild
2022-11-07 12:10:56 +01:00
for _ , peer := range accountCopy . Peers {
s . PeerKeyID2AccountID [ peer . Key ] = accountCopy . Id
2023-02-03 10:33:28 +01:00
s . PeerID2AccountID [ peer . ID ] = accountCopy . Id
2022-08-18 18:22:15 +02:00
}
2022-11-07 12:10:56 +01:00
for _ , user := range accountCopy . Users {
s . UserID2AccountID [ user . Id ] = accountCopy . Id
2023-03-16 15:57:44 +01:00
for _ , pat := range user . PATs {
s . TokenID2UserID [ pat . ID ] = user . Id
2023-03-29 15:21:53 +02:00
s . HashedPAT2TokenID [ pat . HashedToken ] = pat . ID
2023-03-16 15:57:44 +01:00
}
2021-12-27 13:17:15 +01:00
}
2022-11-07 12:10:56 +01:00
if accountCopy . DomainCategory == PrivateCategory && accountCopy . IsDomainPrimaryAccount {
s . PrivateDomain2AccountID [ accountCopy . Domain ] = accountCopy . Id
2022-03-01 15:22:18 +01:00
}
2024-07-03 11:33:02 +02:00
return s . persist ( ctx , s . storeFile )
2021-07-18 20:51:09 +02:00
}
2021-07-22 10:28:00 +02:00
2024-07-03 11:33:02 +02:00
func ( s * FileStore ) DeleteAccount ( ctx context . Context , account * Account ) error {
2023-11-28 14:23:38 +01:00
s . mux . Lock ( )
defer s . mux . Unlock ( )
if account . Id == "" {
return status . Errorf ( status . InvalidArgument , "account id should not be empty" )
}
for keyID := range account . SetupKeys {
delete ( s . SetupKeyID2AccountID , strings . ToUpper ( keyID ) )
}
// enforce peer to account index and delete peer to route indexes for rebuild
for _ , peer := range account . Peers {
delete ( s . PeerKeyID2AccountID , peer . Key )
delete ( s . PeerID2AccountID , peer . ID )
}
for _ , user := range account . Users {
for _ , pat := range user . PATs {
delete ( s . TokenID2UserID , pat . ID )
delete ( s . HashedPAT2TokenID , pat . HashedToken )
}
delete ( s . UserID2AccountID , user . Id )
}
if account . DomainCategory == PrivateCategory && account . IsDomainPrimaryAccount {
delete ( s . PrivateDomain2AccountID , account . Domain )
}
delete ( s . Accounts , account . Id )
2024-07-03 11:33:02 +02:00
return s . persist ( ctx , s . storeFile )
2023-11-28 14:23:38 +01:00
}
2023-03-20 16:38:17 +01:00
// DeleteHashedPAT2TokenIDIndex removes an entry from the indexing map HashedPAT2TokenID
2023-03-20 16:14:55 +01:00
func ( s * FileStore ) DeleteHashedPAT2TokenIDIndex ( hashedToken string ) error {
s . mux . Lock ( )
defer s . mux . Unlock ( )
delete ( s . HashedPAT2TokenID , hashedToken )
2023-07-17 11:46:10 +02:00
return nil
2023-03-20 16:14:55 +01:00
}
2023-03-20 16:38:17 +01:00
// DeleteTokenID2UserIDIndex removes an entry from the indexing map TokenID2UserID
2023-03-20 16:14:55 +01:00
func ( s * FileStore ) DeleteTokenID2UserIDIndex ( tokenID string ) error {
s . mux . Lock ( )
defer s . mux . Unlock ( )
delete ( s . TokenID2UserID , tokenID )
2023-07-17 11:46:10 +02:00
return nil
2023-03-20 16:14:55 +01:00
}
2022-08-18 18:22:15 +02:00
// GetAccountByPrivateDomain returns account by private domain
2024-07-03 11:33:02 +02:00
func ( s * FileStore ) GetAccountByPrivateDomain ( _ context . Context , domain string ) ( * Account , error ) {
2022-11-07 19:09:22 +01:00
s . mux . Lock ( )
defer s . mux . Unlock ( )
2023-03-21 14:00:59 +01:00
accountID , ok := s . PrivateDomain2AccountID [ strings . ToLower ( domain ) ]
if ! ok {
2022-11-11 20:36:45 +01:00
return nil , status . Errorf ( status . NotFound , "account not found: provided domain is not registered or is not private" )
2022-03-01 15:22:18 +01:00
}
2022-11-08 10:46:12 +01:00
account , err := s . getAccount ( accountID )
if err != nil {
return nil , err
}
return account . Copy ( ) , nil
2022-03-01 15:22:18 +01:00
}
2022-08-18 18:22:15 +02:00
// GetAccountBySetupKey returns account by setup key id
2024-07-03 11:33:02 +02:00
func ( s * FileStore ) GetAccountBySetupKey ( _ context . Context , setupKey string ) ( * Account , error ) {
2022-11-07 19:09:22 +01:00
s . mux . Lock ( )
defer s . mux . Unlock ( )
2023-03-21 14:00:59 +01:00
accountID , ok := s . SetupKeyID2AccountID [ strings . ToUpper ( setupKey ) ]
if ! ok {
2022-11-11 20:36:45 +01:00
return nil , status . Errorf ( status . NotFound , "account not found: provided setup key doesn't exists" )
2021-07-30 17:46:38 +02:00
}
2022-11-08 10:46:12 +01:00
account , err := s . getAccount ( accountID )
if err != nil {
return nil , err
}
return account . Copy ( ) , nil
2021-09-07 18:36:46 +02:00
}
2021-07-30 17:46:38 +02:00
2023-03-16 15:57:44 +01:00
// GetTokenIDByHashedToken returns the id of a personal access token by its hashed secret
2024-07-03 11:33:02 +02:00
func ( s * FileStore ) GetTokenIDByHashedToken ( _ context . Context , token string ) ( string , error ) {
2023-03-16 15:57:44 +01:00
s . mux . Lock ( )
defer s . mux . Unlock ( )
2023-03-21 14:00:59 +01:00
tokenID , ok := s . HashedPAT2TokenID [ token ]
if ! ok {
2023-03-16 15:57:44 +01:00
return "" , status . Errorf ( status . NotFound , "tokenID not found: provided token doesn't exists" )
}
2023-03-29 18:23:10 +02:00
2023-03-16 15:57:44 +01:00
return tokenID , nil
}
// GetUserByTokenID returns a User object a tokenID belongs to
2024-07-03 11:33:02 +02:00
func ( s * FileStore ) GetUserByTokenID ( _ context . Context , tokenID string ) ( * User , error ) {
2023-03-16 15:57:44 +01:00
s . mux . Lock ( )
defer s . mux . Unlock ( )
2023-03-21 14:00:59 +01:00
userID , ok := s . TokenID2UserID [ tokenID ]
if ! ok {
2023-03-16 15:57:44 +01:00
return nil , status . Errorf ( status . NotFound , "user not found: provided tokenID doesn't exists" )
}
2023-03-29 18:23:10 +02:00
2023-03-21 14:00:59 +01:00
accountID , ok := s . UserID2AccountID [ userID ]
if ! ok {
2023-03-16 15:57:44 +01:00
return nil , status . Errorf ( status . NotFound , "accountID not found: provided userID doesn't exists" )
}
2023-03-29 18:23:10 +02:00
2023-03-16 15:57:44 +01:00
account , err := s . getAccount ( accountID )
if err != nil {
return nil , err
}
return account . Users [ userID ] . Copy ( ) , nil
}
2024-08-19 12:50:11 +02:00
func ( s * FileStore ) GetUserByUserID ( _ context . Context , userID string ) ( * User , error ) {
accountID , ok := s . UserID2AccountID [ userID ]
if ! ok {
return nil , status . Errorf ( status . NotFound , "accountID not found: provided userID doesn't exists" )
}
account , err := s . getAccount ( accountID )
if err != nil {
return nil , err
}
return account . Users [ userID ] . Copy ( ) , nil
}
func ( s * FileStore ) GetAccountGroups ( ctx context . Context , accountID string ) ( [ ] * nbgroup . Group , error ) {
account , err := s . getAccount ( accountID )
if err != nil {
return nil , err
}
groupsSlice := make ( [ ] * nbgroup . Group , 0 , len ( account . Groups ) )
for _ , group := range account . Groups {
groupsSlice = append ( groupsSlice , group )
}
return groupsSlice , nil
}
2022-08-18 18:22:15 +02:00
// GetAllAccounts returns all accounts
2024-07-03 11:33:02 +02:00
func ( s * FileStore ) GetAllAccounts ( _ context . Context ) ( all [ ] * Account ) {
2022-10-16 13:33:46 +02:00
s . mux . Lock ( )
defer s . mux . Unlock ( )
2022-05-21 15:21:39 +02:00
for _ , a := range s . Accounts {
2022-10-16 13:33:46 +02:00
all = append ( all , a . Copy ( ) )
2022-05-21 15:21:39 +02:00
}
return all
}
2022-11-08 10:46:12 +01:00
// getAccount returns a reference to the Account. Should not return a copy.
2022-11-07 19:09:22 +01:00
func ( s * FileStore ) getAccount ( accountID string ) ( * Account , error ) {
2023-03-21 14:00:59 +01:00
account , ok := s . Accounts [ accountID ]
if ! ok {
2022-11-11 20:36:45 +01:00
return nil , status . Errorf ( status . NotFound , "account not found" )
2021-07-22 10:28:00 +02:00
}
2021-07-30 17:46:38 +02:00
2022-11-08 10:46:12 +01:00
return account , nil
2021-07-30 17:46:38 +02:00
}
2022-11-07 19:09:22 +01:00
// GetAccount returns an account for ID
2024-07-03 11:33:02 +02:00
func ( s * FileStore ) GetAccount ( _ context . Context , accountID string ) ( * Account , error ) {
2022-11-07 19:09:22 +01:00
s . mux . Lock ( )
defer s . mux . Unlock ( )
2022-11-08 10:46:12 +01:00
account , err := s . getAccount ( accountID )
if err != nil {
return nil , err
}
return account . Copy ( ) , nil
2022-11-07 19:09:22 +01:00
}
2022-11-07 12:10:56 +01:00
// GetAccountByUser returns a user account
2024-07-03 11:33:02 +02:00
func ( s * FileStore ) GetAccountByUser ( _ context . Context , userID string ) ( * Account , error ) {
2021-12-27 13:17:15 +01:00
s . mux . Lock ( )
defer s . mux . Unlock ( )
2023-03-21 14:00:59 +01:00
accountID , ok := s . UserID2AccountID [ userID ]
if ! ok {
2024-05-31 16:41:12 +02:00
return nil , status . NewUserNotFoundError ( userID )
2021-12-27 13:17:15 +01:00
}
2022-11-08 10:46:12 +01:00
account , err := s . getAccount ( accountID )
if err != nil {
return nil , err
}
return account . Copy ( ) , nil
2021-12-27 13:17:15 +01:00
}
2023-02-03 10:33:28 +01:00
// GetAccountByPeerID returns an account for a given peer ID
2024-07-03 11:33:02 +02:00
func ( s * FileStore ) GetAccountByPeerID ( ctx context . Context , peerID string ) ( * Account , error ) {
2023-02-03 10:33:28 +01:00
s . mux . Lock ( )
defer s . mux . Unlock ( )
2023-03-21 14:00:59 +01:00
accountID , ok := s . PeerID2AccountID [ peerID ]
if ! ok {
2023-02-03 10:33:28 +01:00
return nil , status . Errorf ( status . NotFound , "provided peer ID doesn't exists %s" , peerID )
}
account , err := s . getAccount ( accountID )
if err != nil {
return nil , err
}
2023-03-01 12:11:32 +01:00
// this protection is needed because when we delete a peer, we don't really remove index peerID -> accountID.
// check Account.Peers for a match
if _ , ok := account . Peers [ peerID ] ; ! ok {
delete ( s . PeerID2AccountID , peerID )
2024-07-03 11:33:02 +02:00
log . WithContext ( ctx ) . Warnf ( "removed stale peerID %s to accountID %s index" , peerID , accountID )
2024-05-31 16:41:12 +02:00
return nil , status . NewPeerNotFoundError ( peerID )
2023-03-01 12:11:32 +01:00
}
2023-02-03 10:33:28 +01:00
return account . Copy ( ) , nil
}
2022-11-07 12:10:56 +01:00
// GetAccountByPeerPubKey returns an account for a given peer WireGuard public key
2024-07-03 11:33:02 +02:00
func ( s * FileStore ) GetAccountByPeerPubKey ( ctx context . Context , peerKey string ) ( * Account , error ) {
2022-08-18 18:22:15 +02:00
s . mux . Lock ( )
defer s . mux . Unlock ( )
2023-03-21 14:00:59 +01:00
accountID , ok := s . PeerKeyID2AccountID [ peerKey ]
if ! ok {
2024-05-31 16:41:12 +02:00
return nil , status . NewPeerNotFoundError ( peerKey )
2022-08-18 18:22:15 +02:00
}
2022-11-08 10:46:12 +01:00
account , err := s . getAccount ( accountID )
if err != nil {
return nil , err
}
2023-03-01 12:11:32 +01:00
// this protection is needed because when we delete a peer, we don't really remove index peerKey -> accountID.
// check Account.Peers for a match
stale := true
for _ , peer := range account . Peers {
if peer . Key == peerKey {
stale = false
break
}
}
if stale {
delete ( s . PeerKeyID2AccountID , peerKey )
2024-07-03 11:33:02 +02:00
log . WithContext ( ctx ) . Warnf ( "removed stale peerKey %s to accountID %s index" , peerKey , accountID )
2024-05-31 16:41:12 +02:00
return nil , status . NewPeerNotFoundError ( peerKey )
2023-03-01 12:11:32 +01:00
}
2022-11-08 10:46:12 +01:00
return account . Copy ( ) , nil
2022-08-18 18:22:15 +02:00
}
2022-10-16 13:33:46 +02:00
2024-07-03 11:33:02 +02:00
func ( s * FileStore ) GetAccountIDByPeerPubKey ( _ context . Context , peerKey string ) ( string , error ) {
2024-05-07 14:30:03 +02:00
s . mux . Lock ( )
defer s . mux . Unlock ( )
accountID , ok := s . PeerKeyID2AccountID [ peerKey ]
if ! ok {
2024-05-31 16:41:12 +02:00
return "" , status . NewPeerNotFoundError ( peerKey )
2024-05-07 14:30:03 +02:00
}
return accountID , nil
}
2024-05-31 16:41:12 +02:00
func ( s * FileStore ) GetAccountIDByUserID ( userID string ) ( string , error ) {
s . mux . Lock ( )
defer s . mux . Unlock ( )
accountID , ok := s . UserID2AccountID [ userID ]
if ! ok {
return "" , status . NewUserNotFoundError ( userID )
}
return accountID , nil
}
2024-07-03 11:33:02 +02:00
func ( s * FileStore ) GetAccountIDBySetupKey ( _ context . Context , setupKey string ) ( string , error ) {
2024-05-31 16:41:12 +02:00
s . mux . Lock ( )
defer s . mux . Unlock ( )
accountID , ok := s . SetupKeyID2AccountID [ strings . ToUpper ( setupKey ) ]
if ! ok {
return "" , status . Errorf ( status . NotFound , "account not found: provided setup key doesn't exists" )
}
return accountID , nil
}
2024-07-03 11:33:02 +02:00
func ( s * FileStore ) GetPeerByPeerPubKey ( _ context . Context , peerKey string ) ( * nbpeer . Peer , error ) {
2024-05-31 16:41:12 +02:00
s . mux . Lock ( )
defer s . mux . Unlock ( )
accountID , ok := s . PeerKeyID2AccountID [ peerKey ]
if ! ok {
return nil , status . NewPeerNotFoundError ( peerKey )
}
account , err := s . getAccount ( accountID )
if err != nil {
return nil , err
}
for _ , peer := range account . Peers {
if peer . Key == peerKey {
return peer . Copy ( ) , nil
}
}
return nil , status . NewPeerNotFoundError ( peerKey )
}
2024-07-03 11:33:02 +02:00
func ( s * FileStore ) GetAccountSettings ( _ context . Context , accountID string ) ( * Settings , error ) {
2024-05-31 16:41:12 +02:00
s . mux . Lock ( )
defer s . mux . Unlock ( )
account , err := s . getAccount ( accountID )
if err != nil {
return nil , err
}
return account . Settings . Copy ( ) , nil
}
2022-10-16 13:33:46 +02:00
// GetInstallationID returns the installation ID from the store
func ( s * FileStore ) GetInstallationID ( ) string {
return s . InstallationID
}
// SaveInstallationID saves the installation ID
2024-07-03 11:33:02 +02:00
func ( s * FileStore ) SaveInstallationID ( ctx context . Context , ID string ) error {
2022-11-08 10:46:12 +01:00
s . mux . Lock ( )
defer s . mux . Unlock ( )
s . InstallationID = ID
2024-07-03 11:33:02 +02:00
return s . persist ( ctx , s . storeFile )
2022-11-08 10:46:12 +01:00
}
2024-07-26 07:49:05 +02:00
// SavePeer saves the peer in the account
func ( s * FileStore ) SavePeer ( _ context . Context , accountID string , peer * nbpeer . Peer ) error {
s . mux . Lock ( )
defer s . mux . Unlock ( )
account , err := s . getAccount ( accountID )
if err != nil {
return err
}
newPeer := peer . Copy ( )
account . Peers [ peer . ID ] = newPeer
s . PeerKeyID2AccountID [ peer . Key ] = accountID
s . PeerID2AccountID [ peer . ID ] = accountID
return nil
}
2022-11-08 10:46:12 +01:00
// SavePeerStatus stores the PeerStatus in memory. It doesn't attempt to persist data to speed up things.
// PeerStatus will be saved eventually when some other changes occur.
2023-11-28 13:45:26 +01:00
func ( s * FileStore ) SavePeerStatus ( accountID , peerID string , peerStatus nbpeer . PeerStatus ) error {
2022-11-08 10:46:12 +01:00
s . mux . Lock ( )
defer s . mux . Unlock ( )
account , err := s . getAccount ( accountID )
if err != nil {
return err
}
2023-02-03 10:33:28 +01:00
peer := account . Peers [ peerID ]
2022-11-08 10:46:12 +01:00
if peer == nil {
2023-02-03 10:33:28 +01:00
return status . Errorf ( status . NotFound , "peer %s not found" , peerID )
2022-11-08 10:46:12 +01:00
}
peer . Status = & peerStatus
return nil
}
2024-02-20 09:59:56 +01:00
// SavePeerLocation stores the PeerStatus in memory. It doesn't attempt to persist data to speed up things.
// Peer.Location will be saved eventually when some other changes occur.
func ( s * FileStore ) SavePeerLocation ( accountID string , peerWithLocation * nbpeer . Peer ) error {
s . mux . Lock ( )
defer s . mux . Unlock ( )
account , err := s . getAccount ( accountID )
if err != nil {
return err
}
peer := account . Peers [ peerWithLocation . ID ]
if peer == nil {
return status . Errorf ( status . NotFound , "peer %s not found" , peerWithLocation . ID )
}
peer . Location = peerWithLocation . Location
return nil
}
2023-08-18 19:23:11 +02:00
// SaveUserLastLogin stores the last login time for a user in memory. It doesn't attempt to persist data to speed up things.
func ( s * FileStore ) SaveUserLastLogin ( accountID , userID string , lastLogin time . Time ) error {
s . mux . Lock ( )
defer s . mux . Unlock ( )
account , err := s . getAccount ( accountID )
if err != nil {
return err
}
peer := account . Users [ userID ]
if peer == nil {
return status . Errorf ( status . NotFound , "user %s not found" , userID )
}
peer . LastLogin = lastLogin
return nil
}
2024-05-30 15:22:42 +02:00
func ( s * FileStore ) GetPostureCheckByChecksDefinition ( accountID string , checks * posture . ChecksDefinition ) ( * posture . Checks , error ) {
return nil , status . Errorf ( status . Internal , "GetPostureCheckByChecksDefinition is not implemented" )
}
2022-11-08 10:46:12 +01:00
// Close the FileStore persisting data to disk
2024-07-03 11:33:02 +02:00
func ( s * FileStore ) Close ( ctx context . Context ) error {
2022-10-16 13:33:46 +02:00
s . mux . Lock ( )
defer s . mux . Unlock ( )
2024-07-03 11:33:02 +02:00
log . WithContext ( ctx ) . Infof ( "closing FileStore" )
2022-10-16 13:33:46 +02:00
2024-07-03 11:33:02 +02:00
return s . persist ( ctx , s . storeFile )
2022-10-16 13:33:46 +02:00
}
2023-10-12 15:42:36 +02:00
2023-10-16 11:19:39 +02:00
// GetStoreEngine returns FileStoreEngine
func ( s * FileStore ) GetStoreEngine ( ) StoreEngine {
return FileStoreEngine
2023-10-12 15:42:36 +02:00
}
2024-07-15 16:04:06 +02:00
func ( s * FileStore ) SaveUsers ( accountID string , users map [ string ] * User ) error {
return status . Errorf ( status . Internal , "SaveUsers is not implemented" )
}
func ( s * FileStore ) SaveGroups ( accountID string , groups map [ string ] * nbgroup . Group ) error {
return status . Errorf ( status . Internal , "SaveGroups is not implemented" )
}
2024-09-18 14:55:52 +02:00
func ( s * FileStore ) GetAccountIDByPrivateDomain ( ctx context . Context , domain string ) ( string , error ) {
return "" , status . Errorf ( status . Internal , "GetAccountIDByPrivateDomain is not implemented" )
}