2021-07-30 17:46:38 +02:00
|
|
|
package server
|
2021-07-18 20:51:09 +02:00
|
|
|
|
|
|
|
import (
|
|
|
|
"os"
|
|
|
|
"path/filepath"
|
|
|
|
"strings"
|
|
|
|
"sync"
|
2022-11-07 17:52:23 +01:00
|
|
|
"time"
|
2021-07-18 20:51:09 +02:00
|
|
|
|
2023-03-13 15:14:18 +01:00
|
|
|
"github.com/rs/xid"
|
|
|
|
log "github.com/sirupsen/logrus"
|
|
|
|
|
2023-03-16 15:57:44 +01:00
|
|
|
"github.com/netbirdio/netbird/management/server/status"
|
|
|
|
|
2022-03-26 12:08:54 +01:00
|
|
|
"github.com/netbirdio/netbird/util"
|
2021-07-18 20:51:09 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
// storeFileName Store file name. Stored in the datadir
|
|
|
|
const storeFileName = "store.json"
|
|
|
|
|
2021-07-18 21:00:32 +02:00
|
|
|
// FileStore represents an account storage backed by a file persisted to disk
|
2021-07-18 20:51:09 +02:00
|
|
|
type FileStore struct {
|
2022-03-01 15:22:18 +01:00
|
|
|
Accounts map[string]*Account
|
2022-11-07 12:10:56 +01:00
|
|
|
SetupKeyID2AccountID map[string]string `json:"-"`
|
|
|
|
PeerKeyID2AccountID map[string]string `json:"-"`
|
2023-02-03 10:33:28 +01:00
|
|
|
PeerID2AccountID map[string]string `json:"-"`
|
2022-11-07 12:10:56 +01:00
|
|
|
UserID2AccountID map[string]string `json:"-"`
|
|
|
|
PrivateDomain2AccountID map[string]string `json:"-"`
|
2023-03-16 15:57:44 +01:00
|
|
|
HashedPAT2TokenID map[string]string `json:"-"`
|
|
|
|
TokenID2UserID map[string]string `json:"-"`
|
2022-10-16 13:33:46 +02:00
|
|
|
InstallationID string
|
2021-07-18 20:51:09 +02:00
|
|
|
|
|
|
|
// mutex to synchronise Store read/write operations
|
|
|
|
mux sync.Mutex `json:"-"`
|
|
|
|
storeFile string `json:"-"`
|
2022-11-07 17:52:23 +01:00
|
|
|
|
|
|
|
// sync.Mutex indexed by accountID
|
|
|
|
accountLocks sync.Map `json:"-"`
|
|
|
|
globalAccountLock sync.Mutex `json:"-"`
|
2021-07-18 20:51:09 +02:00
|
|
|
}
|
|
|
|
|
2022-05-03 16:02:51 +02:00
|
|
|
type StoredAccount struct{}
|
2021-07-30 17:46:38 +02:00
|
|
|
|
2022-11-08 10:46:12 +01:00
|
|
|
// NewFileStore restores a store from the file located in the datadir
|
|
|
|
func NewFileStore(dataDir string) (*FileStore, error) {
|
2021-07-18 20:51:09 +02:00
|
|
|
return restore(filepath.Join(dataDir, storeFileName))
|
|
|
|
}
|
|
|
|
|
2022-11-07 12:10:56 +01:00
|
|
|
// restore the state of the store from the file.
|
2021-07-18 20:51:09 +02:00
|
|
|
// Creates a new empty store file if doesn't exist
|
|
|
|
func restore(file string) (*FileStore, error) {
|
|
|
|
if _, err := os.Stat(file); os.IsNotExist(err) {
|
|
|
|
// create a new FileStore if previously didn't exist (e.g. first run)
|
|
|
|
s := &FileStore{
|
2022-03-01 15:22:18 +01:00
|
|
|
Accounts: make(map[string]*Account),
|
|
|
|
mux: sync.Mutex{},
|
2022-11-07 17:52:23 +01:00
|
|
|
globalAccountLock: sync.Mutex{},
|
2022-11-07 12:10:56 +01:00
|
|
|
SetupKeyID2AccountID: make(map[string]string),
|
|
|
|
PeerKeyID2AccountID: make(map[string]string),
|
|
|
|
UserID2AccountID: make(map[string]string),
|
|
|
|
PrivateDomain2AccountID: make(map[string]string),
|
2023-02-03 10:33:28 +01:00
|
|
|
PeerID2AccountID: make(map[string]string),
|
2023-03-16 15:57:44 +01:00
|
|
|
HashedPAT2TokenID: make(map[string]string),
|
|
|
|
TokenID2UserID: make(map[string]string),
|
2022-03-01 15:22:18 +01:00
|
|
|
storeFile: file,
|
2021-07-18 20:51:09 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
err = s.persist(file)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return s, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
read, err := util.ReadJson(file, &FileStore{})
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
store := read.(*FileStore)
|
|
|
|
store.storeFile = file
|
2022-11-07 12:10:56 +01:00
|
|
|
store.SetupKeyID2AccountID = make(map[string]string)
|
|
|
|
store.PeerKeyID2AccountID = make(map[string]string)
|
|
|
|
store.UserID2AccountID = make(map[string]string)
|
|
|
|
store.PrivateDomain2AccountID = make(map[string]string)
|
2023-02-03 10:33:28 +01:00
|
|
|
store.PeerID2AccountID = make(map[string]string)
|
2023-03-16 15:57:44 +01:00
|
|
|
store.HashedPAT2TokenID = make(map[string]string)
|
|
|
|
store.TokenID2UserID = make(map[string]string)
|
2022-11-07 12:10:56 +01:00
|
|
|
|
|
|
|
for accountID, account := range store.Accounts {
|
2023-02-13 15:07:15 +01:00
|
|
|
if account.Settings == nil {
|
|
|
|
account.Settings = &Settings{
|
|
|
|
PeerLoginExpirationEnabled: false,
|
|
|
|
PeerLoginExpiration: DefaultPeerLoginExpiration,
|
|
|
|
}
|
2023-02-13 12:21:02 +01:00
|
|
|
}
|
|
|
|
|
2021-07-18 20:51:09 +02:00
|
|
|
for setupKeyId := range account.SetupKeys {
|
2022-11-07 12:10:56 +01:00
|
|
|
store.SetupKeyID2AccountID[strings.ToUpper(setupKeyId)] = accountID
|
2022-05-21 15:21:39 +02:00
|
|
|
}
|
2022-11-07 12:10:56 +01:00
|
|
|
|
2021-07-30 17:46:38 +02:00
|
|
|
for _, peer := range account.Peers {
|
2022-11-07 12:10:56 +01:00
|
|
|
store.PeerKeyID2AccountID[peer.Key] = accountID
|
2023-02-03 10:33:28 +01:00
|
|
|
store.PeerID2AccountID[peer.ID] = accountID
|
2022-11-07 12:10:56 +01:00
|
|
|
// reset all peers to status = Disconnected
|
|
|
|
if peer.Status != nil && peer.Status.Connected {
|
|
|
|
peer.Status.Connected = false
|
|
|
|
}
|
2021-07-22 10:28:00 +02:00
|
|
|
}
|
2021-12-27 13:17:15 +01:00
|
|
|
for _, user := range account.Users {
|
2022-11-07 12:10:56 +01:00
|
|
|
store.UserID2AccountID[user.Id] = accountID
|
2023-03-16 15:57:44 +01:00
|
|
|
for _, pat := range user.PATs {
|
|
|
|
store.TokenID2UserID[pat.ID] = user.Id
|
2023-03-29 15:21:53 +02:00
|
|
|
store.HashedPAT2TokenID[pat.HashedToken] = pat.ID
|
2023-03-16 15:57:44 +01:00
|
|
|
}
|
2022-08-18 18:22:15 +02:00
|
|
|
}
|
2022-11-07 12:10:56 +01:00
|
|
|
|
2022-05-21 15:21:39 +02:00
|
|
|
if account.Domain != "" && account.DomainCategory == PrivateCategory &&
|
|
|
|
account.IsDomainPrimaryAccount {
|
2022-11-07 12:10:56 +01:00
|
|
|
store.PrivateDomain2AccountID[account.Domain] = accountID
|
2022-03-01 15:22:18 +01:00
|
|
|
}
|
2022-11-08 10:31:34 +01:00
|
|
|
|
2023-03-13 15:14:18 +01:00
|
|
|
// if no policies are defined, that means we need to migrate Rules to policies
|
|
|
|
if len(account.Policies) == 0 {
|
|
|
|
account.Policies = make([]*Policy, 0)
|
|
|
|
for _, rule := range account.Rules {
|
|
|
|
policy, err := RuleToPolicy(rule)
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("unable to migrate rule to policy: %v", err)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
account.Policies = append(account.Policies, policy)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-11-08 10:31:34 +01:00
|
|
|
// for data migration. Can be removed once most base will be with labels
|
|
|
|
existingLabels := account.getPeerDNSLabels()
|
|
|
|
if len(existingLabels) != len(account.Peers) {
|
|
|
|
addPeerLabelsToAccount(account, existingLabels)
|
|
|
|
}
|
2022-12-06 10:11:57 +01:00
|
|
|
|
|
|
|
allGroup, err := account.GetGroupAll()
|
|
|
|
if err != nil {
|
|
|
|
log.Errorf("unable to find the All group, this should happen only when migrate from a version that didn't support groups. Error: %v", err)
|
|
|
|
// if the All group didn't exist we probably don't have routes to update
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, route := range account.Routes {
|
|
|
|
if len(route.Groups) == 0 {
|
|
|
|
route.Groups = []string{allGroup.ID}
|
|
|
|
}
|
|
|
|
}
|
2023-02-03 10:33:28 +01:00
|
|
|
|
|
|
|
// migration to Peer.ID from Peer.Key.
|
|
|
|
// Old peers that require migration have an empty Peer.ID in the store.json.
|
|
|
|
// Generate new ID with xid for these peers.
|
|
|
|
// Set the Peer.ID to the newly generated value.
|
|
|
|
// Replace all the mentions of Peer.Key as ID (groups and routes).
|
|
|
|
// Swap Peer.Key with Peer.ID in the Account.Peers map.
|
|
|
|
migrationPeers := make(map[string]*Peer) // key to Peer
|
|
|
|
for key, peer := range account.Peers {
|
2023-03-09 11:24:42 +01:00
|
|
|
// set LastLogin for the peers that were onboarded before the peer login expiration feature
|
|
|
|
if peer.LastLogin.IsZero() {
|
|
|
|
peer.LastLogin = time.Now()
|
|
|
|
}
|
2023-02-03 10:33:28 +01:00
|
|
|
if peer.ID != "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
id := xid.New().String()
|
|
|
|
peer.ID = id
|
|
|
|
migrationPeers[key] = peer
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(migrationPeers) > 0 {
|
|
|
|
// swap Peer.Key with Peer.ID in the Account.Peers map.
|
|
|
|
for key, peer := range migrationPeers {
|
|
|
|
delete(account.Peers, key)
|
|
|
|
account.Peers[peer.ID] = peer
|
|
|
|
store.PeerID2AccountID[peer.ID] = accountID
|
|
|
|
}
|
|
|
|
|
|
|
|
// detect groups that have Peer.Key as a reference and replace it with ID.
|
|
|
|
for _, group := range account.Groups {
|
|
|
|
for i, peer := range group.Peers {
|
|
|
|
if p, ok := migrationPeers[peer]; ok {
|
|
|
|
group.Peers[i] = p.ID
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2023-03-13 15:14:18 +01:00
|
|
|
|
2023-02-03 10:33:28 +01:00
|
|
|
// detect routes that have Peer.Key as a reference and replace it with ID.
|
|
|
|
for _, route := range account.Routes {
|
|
|
|
if peer, ok := migrationPeers[route.Peer]; ok {
|
|
|
|
route.Peer = peer.ID
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-07-22 10:28:00 +02:00
|
|
|
}
|
2021-07-18 20:51:09 +02:00
|
|
|
|
2022-11-07 12:10:56 +01:00
|
|
|
// we need this persist to apply changes we made to account.Peers (we set them to Disconnected)
|
|
|
|
err = store.persist(store.storeFile)
|
2021-08-23 21:43:05 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2022-11-07 12:10:56 +01:00
|
|
|
return store, nil
|
2021-08-23 21:43:05 +02:00
|
|
|
}
|
|
|
|
|
2022-11-07 12:10:56 +01:00
|
|
|
// persist account data to a file
|
|
|
|
// It is recommended to call it with locking FileStore.mux
|
|
|
|
func (s *FileStore) persist(file string) error {
|
|
|
|
return util.WriteJson(file, s)
|
2021-07-18 20:51:09 +02:00
|
|
|
}
|
|
|
|
|
2022-11-07 17:52:23 +01:00
|
|
|
// AcquireGlobalLock acquires global lock across all the accounts and returns a function that releases the lock
|
|
|
|
func (s *FileStore) AcquireGlobalLock() (unlock func()) {
|
|
|
|
log.Debugf("acquiring global lock")
|
|
|
|
start := time.Now()
|
|
|
|
s.globalAccountLock.Lock()
|
|
|
|
|
|
|
|
unlock = func() {
|
|
|
|
s.globalAccountLock.Unlock()
|
|
|
|
log.Debugf("released global lock in %v", time.Since(start))
|
|
|
|
}
|
|
|
|
|
|
|
|
return unlock
|
|
|
|
}
|
|
|
|
|
|
|
|
// AcquireAccountLock acquires account lock and returns a function that releases the lock
|
|
|
|
func (s *FileStore) AcquireAccountLock(accountID string) (unlock func()) {
|
|
|
|
log.Debugf("acquiring lock for account %s", accountID)
|
|
|
|
start := time.Now()
|
|
|
|
value, _ := s.accountLocks.LoadOrStore(accountID, &sync.Mutex{})
|
|
|
|
mtx := value.(*sync.Mutex)
|
|
|
|
mtx.Lock()
|
|
|
|
|
|
|
|
unlock = func() {
|
|
|
|
mtx.Unlock()
|
|
|
|
log.Debugf("released lock for account %s in %v", accountID, time.Since(start))
|
|
|
|
}
|
|
|
|
|
|
|
|
return unlock
|
|
|
|
}
|
|
|
|
|
2021-07-30 17:46:38 +02:00
|
|
|
func (s *FileStore) SaveAccount(account *Account) error {
|
2021-07-18 20:51:09 +02:00
|
|
|
s.mux.Lock()
|
|
|
|
defer s.mux.Unlock()
|
|
|
|
|
2022-11-07 12:10:56 +01:00
|
|
|
accountCopy := account.Copy()
|
|
|
|
|
|
|
|
s.Accounts[accountCopy.Id] = accountCopy
|
2021-07-18 20:51:09 +02:00
|
|
|
|
|
|
|
// todo check that account.Id and keyId are not exist already
|
|
|
|
// because if keyId exists for other accounts this can be bad
|
2022-11-07 12:10:56 +01:00
|
|
|
for keyID := range accountCopy.SetupKeys {
|
|
|
|
s.SetupKeyID2AccountID[strings.ToUpper(keyID)] = accountCopy.Id
|
2021-07-18 20:51:09 +02:00
|
|
|
}
|
|
|
|
|
2022-08-18 18:22:15 +02:00
|
|
|
// enforce peer to account index and delete peer to route indexes for rebuild
|
2022-11-07 12:10:56 +01:00
|
|
|
for _, peer := range accountCopy.Peers {
|
|
|
|
s.PeerKeyID2AccountID[peer.Key] = accountCopy.Id
|
2023-02-03 10:33:28 +01:00
|
|
|
s.PeerID2AccountID[peer.ID] = accountCopy.Id
|
2022-08-18 18:22:15 +02:00
|
|
|
}
|
|
|
|
|
2022-11-07 12:10:56 +01:00
|
|
|
for _, user := range accountCopy.Users {
|
|
|
|
s.UserID2AccountID[user.Id] = accountCopy.Id
|
2023-03-16 15:57:44 +01:00
|
|
|
for _, pat := range user.PATs {
|
|
|
|
s.TokenID2UserID[pat.ID] = user.Id
|
2023-03-29 15:21:53 +02:00
|
|
|
s.HashedPAT2TokenID[pat.HashedToken] = pat.ID
|
2023-03-16 15:57:44 +01:00
|
|
|
}
|
2021-12-27 13:17:15 +01:00
|
|
|
}
|
|
|
|
|
2022-11-07 12:10:56 +01:00
|
|
|
if accountCopy.DomainCategory == PrivateCategory && accountCopy.IsDomainPrimaryAccount {
|
|
|
|
s.PrivateDomain2AccountID[accountCopy.Domain] = accountCopy.Id
|
2022-03-01 15:22:18 +01:00
|
|
|
}
|
|
|
|
|
2023-03-15 06:42:40 +01:00
|
|
|
if accountCopy.Rules == nil {
|
|
|
|
accountCopy.Rules = make(map[string]*Rule)
|
|
|
|
}
|
|
|
|
for _, policy := range accountCopy.Policies {
|
|
|
|
for _, rule := range policy.Rules {
|
|
|
|
accountCopy.Rules[rule.ID] = rule.ToRule()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-05-03 16:02:51 +02:00
|
|
|
return s.persist(s.storeFile)
|
2021-07-18 20:51:09 +02:00
|
|
|
}
|
2021-07-22 10:28:00 +02:00
|
|
|
|
2023-03-20 16:38:17 +01:00
|
|
|
// DeleteHashedPAT2TokenIDIndex removes an entry from the indexing map HashedPAT2TokenID
|
2023-03-20 16:14:55 +01:00
|
|
|
func (s *FileStore) DeleteHashedPAT2TokenIDIndex(hashedToken string) error {
|
|
|
|
s.mux.Lock()
|
|
|
|
defer s.mux.Unlock()
|
|
|
|
|
|
|
|
delete(s.HashedPAT2TokenID, hashedToken)
|
|
|
|
|
|
|
|
return s.persist(s.storeFile)
|
|
|
|
}
|
|
|
|
|
2023-03-20 16:38:17 +01:00
|
|
|
// DeleteTokenID2UserIDIndex removes an entry from the indexing map TokenID2UserID
|
2023-03-20 16:14:55 +01:00
|
|
|
func (s *FileStore) DeleteTokenID2UserIDIndex(tokenID string) error {
|
|
|
|
s.mux.Lock()
|
|
|
|
defer s.mux.Unlock()
|
|
|
|
|
|
|
|
delete(s.TokenID2UserID, tokenID)
|
|
|
|
|
|
|
|
return s.persist(s.storeFile)
|
|
|
|
}
|
|
|
|
|
2022-08-18 18:22:15 +02:00
|
|
|
// GetAccountByPrivateDomain returns account by private domain
|
2022-03-01 15:22:18 +01:00
|
|
|
func (s *FileStore) GetAccountByPrivateDomain(domain string) (*Account, error) {
|
2022-11-07 19:09:22 +01:00
|
|
|
s.mux.Lock()
|
|
|
|
defer s.mux.Unlock()
|
|
|
|
|
2023-03-21 14:00:59 +01:00
|
|
|
accountID, ok := s.PrivateDomain2AccountID[strings.ToLower(domain)]
|
|
|
|
if !ok {
|
2022-11-11 20:36:45 +01:00
|
|
|
return nil, status.Errorf(status.NotFound, "account not found: provided domain is not registered or is not private")
|
2022-03-01 15:22:18 +01:00
|
|
|
}
|
|
|
|
|
2022-11-08 10:46:12 +01:00
|
|
|
account, err := s.getAccount(accountID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return account.Copy(), nil
|
2022-03-01 15:22:18 +01:00
|
|
|
}
|
|
|
|
|
2022-08-18 18:22:15 +02:00
|
|
|
// GetAccountBySetupKey returns account by setup key id
|
2021-07-30 17:46:38 +02:00
|
|
|
func (s *FileStore) GetAccountBySetupKey(setupKey string) (*Account, error) {
|
2022-11-07 19:09:22 +01:00
|
|
|
s.mux.Lock()
|
|
|
|
defer s.mux.Unlock()
|
|
|
|
|
2023-03-21 14:00:59 +01:00
|
|
|
accountID, ok := s.SetupKeyID2AccountID[strings.ToUpper(setupKey)]
|
|
|
|
if !ok {
|
2022-11-11 20:36:45 +01:00
|
|
|
return nil, status.Errorf(status.NotFound, "account not found: provided setup key doesn't exists")
|
2021-07-30 17:46:38 +02:00
|
|
|
}
|
|
|
|
|
2022-11-08 10:46:12 +01:00
|
|
|
account, err := s.getAccount(accountID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return account.Copy(), nil
|
2021-09-07 18:36:46 +02:00
|
|
|
}
|
2021-07-30 17:46:38 +02:00
|
|
|
|
2023-03-16 15:57:44 +01:00
|
|
|
// GetTokenIDByHashedToken returns the id of a personal access token by its hashed secret
|
|
|
|
func (s *FileStore) GetTokenIDByHashedToken(token string) (string, error) {
|
|
|
|
s.mux.Lock()
|
|
|
|
defer s.mux.Unlock()
|
|
|
|
|
2023-03-21 14:00:59 +01:00
|
|
|
tokenID, ok := s.HashedPAT2TokenID[token]
|
|
|
|
if !ok {
|
2023-03-16 15:57:44 +01:00
|
|
|
return "", status.Errorf(status.NotFound, "tokenID not found: provided token doesn't exists")
|
|
|
|
}
|
2023-03-29 18:23:10 +02:00
|
|
|
|
2023-03-16 15:57:44 +01:00
|
|
|
return tokenID, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// GetUserByTokenID returns a User object a tokenID belongs to
|
|
|
|
func (s *FileStore) GetUserByTokenID(tokenID string) (*User, error) {
|
|
|
|
s.mux.Lock()
|
|
|
|
defer s.mux.Unlock()
|
|
|
|
|
2023-03-21 14:00:59 +01:00
|
|
|
userID, ok := s.TokenID2UserID[tokenID]
|
|
|
|
if !ok {
|
2023-03-16 15:57:44 +01:00
|
|
|
return nil, status.Errorf(status.NotFound, "user not found: provided tokenID doesn't exists")
|
|
|
|
}
|
2023-03-29 18:23:10 +02:00
|
|
|
|
2023-03-21 14:00:59 +01:00
|
|
|
accountID, ok := s.UserID2AccountID[userID]
|
|
|
|
if !ok {
|
2023-03-16 15:57:44 +01:00
|
|
|
return nil, status.Errorf(status.NotFound, "accountID not found: provided userID doesn't exists")
|
|
|
|
}
|
2023-03-29 18:23:10 +02:00
|
|
|
|
2023-03-16 15:57:44 +01:00
|
|
|
account, err := s.getAccount(accountID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return account.Users[userID].Copy(), nil
|
|
|
|
}
|
|
|
|
|
2022-08-18 18:22:15 +02:00
|
|
|
// GetAllAccounts returns all accounts
|
2022-05-21 15:21:39 +02:00
|
|
|
func (s *FileStore) GetAllAccounts() (all []*Account) {
|
2022-10-16 13:33:46 +02:00
|
|
|
s.mux.Lock()
|
|
|
|
defer s.mux.Unlock()
|
2022-05-21 15:21:39 +02:00
|
|
|
for _, a := range s.Accounts {
|
2022-10-16 13:33:46 +02:00
|
|
|
all = append(all, a.Copy())
|
2022-05-21 15:21:39 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
return all
|
|
|
|
}
|
|
|
|
|
2022-11-08 10:46:12 +01:00
|
|
|
// getAccount returns a reference to the Account. Should not return a copy.
|
2022-11-07 19:09:22 +01:00
|
|
|
func (s *FileStore) getAccount(accountID string) (*Account, error) {
|
2023-03-21 14:00:59 +01:00
|
|
|
account, ok := s.Accounts[accountID]
|
|
|
|
if !ok {
|
2022-11-11 20:36:45 +01:00
|
|
|
return nil, status.Errorf(status.NotFound, "account not found")
|
2021-07-22 10:28:00 +02:00
|
|
|
}
|
2021-07-30 17:46:38 +02:00
|
|
|
|
2022-11-08 10:46:12 +01:00
|
|
|
return account, nil
|
2021-07-30 17:46:38 +02:00
|
|
|
}
|
|
|
|
|
2022-11-07 19:09:22 +01:00
|
|
|
// GetAccount returns an account for ID
|
|
|
|
func (s *FileStore) GetAccount(accountID string) (*Account, error) {
|
|
|
|
s.mux.Lock()
|
|
|
|
defer s.mux.Unlock()
|
|
|
|
|
2022-11-08 10:46:12 +01:00
|
|
|
account, err := s.getAccount(accountID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return account.Copy(), nil
|
2022-11-07 19:09:22 +01:00
|
|
|
}
|
|
|
|
|
2022-11-07 12:10:56 +01:00
|
|
|
// GetAccountByUser returns a user account
|
|
|
|
func (s *FileStore) GetAccountByUser(userID string) (*Account, error) {
|
2021-12-27 13:17:15 +01:00
|
|
|
s.mux.Lock()
|
|
|
|
defer s.mux.Unlock()
|
|
|
|
|
2023-03-21 14:00:59 +01:00
|
|
|
accountID, ok := s.UserID2AccountID[userID]
|
|
|
|
if !ok {
|
2022-11-11 20:36:45 +01:00
|
|
|
return nil, status.Errorf(status.NotFound, "account not found")
|
2021-12-27 13:17:15 +01:00
|
|
|
}
|
|
|
|
|
2022-11-08 10:46:12 +01:00
|
|
|
account, err := s.getAccount(accountID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
|
|
|
return account.Copy(), nil
|
2021-12-27 13:17:15 +01:00
|
|
|
}
|
|
|
|
|
2023-02-03 10:33:28 +01:00
|
|
|
// GetAccountByPeerID returns an account for a given peer ID
|
|
|
|
func (s *FileStore) GetAccountByPeerID(peerID string) (*Account, error) {
|
|
|
|
s.mux.Lock()
|
|
|
|
defer s.mux.Unlock()
|
|
|
|
|
2023-03-21 14:00:59 +01:00
|
|
|
accountID, ok := s.PeerID2AccountID[peerID]
|
|
|
|
if !ok {
|
2023-02-03 10:33:28 +01:00
|
|
|
return nil, status.Errorf(status.NotFound, "provided peer ID doesn't exists %s", peerID)
|
|
|
|
}
|
|
|
|
|
|
|
|
account, err := s.getAccount(accountID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-03-01 12:11:32 +01:00
|
|
|
// this protection is needed because when we delete a peer, we don't really remove index peerID -> accountID.
|
|
|
|
// check Account.Peers for a match
|
|
|
|
if _, ok := account.Peers[peerID]; !ok {
|
|
|
|
delete(s.PeerID2AccountID, peerID)
|
|
|
|
log.Warnf("removed stale peerID %s to accountID %s index", peerID, accountID)
|
|
|
|
return nil, status.Errorf(status.NotFound, "provided peer doesn't exists %s", peerID)
|
|
|
|
}
|
|
|
|
|
2023-02-03 10:33:28 +01:00
|
|
|
return account.Copy(), nil
|
|
|
|
}
|
|
|
|
|
2022-11-07 12:10:56 +01:00
|
|
|
// GetAccountByPeerPubKey returns an account for a given peer WireGuard public key
|
|
|
|
func (s *FileStore) GetAccountByPeerPubKey(peerKey string) (*Account, error) {
|
2022-08-18 18:22:15 +02:00
|
|
|
s.mux.Lock()
|
|
|
|
defer s.mux.Unlock()
|
|
|
|
|
2023-03-21 14:00:59 +01:00
|
|
|
accountID, ok := s.PeerKeyID2AccountID[peerKey]
|
|
|
|
if !ok {
|
2022-11-11 20:36:45 +01:00
|
|
|
return nil, status.Errorf(status.NotFound, "provided peer key doesn't exists %s", peerKey)
|
2022-08-18 18:22:15 +02:00
|
|
|
}
|
|
|
|
|
2022-11-08 10:46:12 +01:00
|
|
|
account, err := s.getAccount(accountID)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
|
2023-03-01 12:11:32 +01:00
|
|
|
// this protection is needed because when we delete a peer, we don't really remove index peerKey -> accountID.
|
|
|
|
// check Account.Peers for a match
|
|
|
|
stale := true
|
|
|
|
for _, peer := range account.Peers {
|
|
|
|
if peer.Key == peerKey {
|
|
|
|
stale = false
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if stale {
|
|
|
|
delete(s.PeerKeyID2AccountID, peerKey)
|
|
|
|
log.Warnf("removed stale peerKey %s to accountID %s index", peerKey, accountID)
|
|
|
|
return nil, status.Errorf(status.NotFound, "provided peer doesn't exists %s", peerKey)
|
|
|
|
}
|
|
|
|
|
2022-11-08 10:46:12 +01:00
|
|
|
return account.Copy(), nil
|
2022-08-18 18:22:15 +02:00
|
|
|
}
|
2022-10-16 13:33:46 +02:00
|
|
|
|
|
|
|
// GetInstallationID returns the installation ID from the store
|
|
|
|
func (s *FileStore) GetInstallationID() string {
|
|
|
|
return s.InstallationID
|
|
|
|
}
|
|
|
|
|
|
|
|
// SaveInstallationID saves the installation ID
|
2022-11-08 10:46:12 +01:00
|
|
|
func (s *FileStore) SaveInstallationID(ID string) error {
|
|
|
|
s.mux.Lock()
|
|
|
|
defer s.mux.Unlock()
|
|
|
|
|
|
|
|
s.InstallationID = ID
|
|
|
|
|
|
|
|
return s.persist(s.storeFile)
|
|
|
|
}
|
|
|
|
|
|
|
|
// SavePeerStatus stores the PeerStatus in memory. It doesn't attempt to persist data to speed up things.
|
|
|
|
// PeerStatus will be saved eventually when some other changes occur.
|
2023-02-03 10:33:28 +01:00
|
|
|
func (s *FileStore) SavePeerStatus(accountID, peerID string, peerStatus PeerStatus) error {
|
2022-11-08 10:46:12 +01:00
|
|
|
s.mux.Lock()
|
|
|
|
defer s.mux.Unlock()
|
|
|
|
|
|
|
|
account, err := s.getAccount(accountID)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2023-02-03 10:33:28 +01:00
|
|
|
peer := account.Peers[peerID]
|
2022-11-08 10:46:12 +01:00
|
|
|
if peer == nil {
|
2023-02-03 10:33:28 +01:00
|
|
|
return status.Errorf(status.NotFound, "peer %s not found", peerID)
|
2022-11-08 10:46:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
peer.Status = &peerStatus
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Close the FileStore persisting data to disk
|
|
|
|
func (s *FileStore) Close() error {
|
2022-10-16 13:33:46 +02:00
|
|
|
s.mux.Lock()
|
|
|
|
defer s.mux.Unlock()
|
|
|
|
|
2022-11-08 10:46:12 +01:00
|
|
|
log.Infof("closing FileStore")
|
2022-10-16 13:33:46 +02:00
|
|
|
|
|
|
|
return s.persist(s.storeFile)
|
|
|
|
}
|