mirror of
https://github.com/netbirdio/netbird.git
synced 2024-11-26 10:03:47 +01:00
51f133fdc6
* wip: add posture checks structs * add netbird version check * Refactor posture checks and add version checks * Add posture check activities (#1445) * Integrate Endpoints for Posture Checks (#1432) * wip: add posture checks structs * add netbird version check * Refactor posture checks and add version checks * Implement posture and version checks in API models * Refactor API models and enhance posture check functionality * wip: add posture checks endpoints * go mod tidy * Reference the posture checks by id's in policy * Add posture checks management to server * Add posture checks management mocks * implement posture checks handlers * Add posture checks to account copy and fix tests * Refactor posture checks validation * wip: Add posture checks handler tests * Add JSON encoding support to posture checks * Encode posture checks to correct api response object * Refactored posture checks implementation to align with the new API schema * Refactor structure of `Checks` from slice to map * Cleanup * Add posture check activities (#1445) * Revert map to use list of checks * Add posture check activity events * Refactor posture check initialization in account test * Improve the handling of version range in posture check * Fix tests and linter * Remove max_version from NBVersionCheck * Added unit tests for NBVersionCheck * go mod tidy * Extend policy endpoint with posture checks (#1450) * Implement posture and version checks in API models * go mod tidy * Allow attaching posture checks to policy * Update error message for linked posture check on deleting * Refactor PostureCheck and Checks structures * go mod tidy * Add validation for non-existing posture checks * fix unit tests * use Wt version * Remove the enabled field, as posture check will now automatically be activated by default when attaching to a policy * wip: add posture checks structs * add netbird version check * Refactor posture checks and add version checks * Add posture check activities (#1445) * Integrate Endpoints for Posture Checks (#1432) * wip: add posture checks structs * add netbird version check * Refactor posture checks and add version checks * Implement posture and version checks in API models * Refactor API models and enhance posture check functionality * wip: add posture checks endpoints * go mod tidy * Reference the posture checks by id's in policy * Add posture checks management to server * Add posture checks management mocks * implement posture checks handlers * Add posture checks to account copy and fix tests * Refactor posture checks validation * wip: Add posture checks handler tests * Add JSON encoding support to posture checks * Encode posture checks to correct api response object * Refactored posture checks implementation to align with the new API schema * Refactor structure of `Checks` from slice to map * Cleanup * Add posture check activities (#1445) * Revert map to use list of checks * Add posture check activity events * Refactor posture check initialization in account test * Improve the handling of version range in posture check * Fix tests and linter * Remove max_version from NBVersionCheck * Added unit tests for NBVersionCheck * go mod tidy * Extend policy endpoint with posture checks (#1450) * Implement posture and version checks in API models * go mod tidy * Allow attaching posture checks to policy * Update error message for linked posture check on deleting * Refactor PostureCheck and Checks structures * go mod tidy * Add validation for non-existing posture checks * fix unit tests * use Wt version * Remove the enabled field, as posture check will now automatically be activated by default when attaching to a policy * Extend network map generation with posture checks (#1466) * Apply posture checks to network map generation * run policy posture checks on peers to connect * Refactor and streamline policy posture check process for peers to connect. * Add posture checks testing in a network map * Remove redundant nil check in policy.go * Refactor peer validation check in policy.go * Update 'Check' function signature and use logger for version check * Refactor posture checks run on sources and updated the validation func * Update peer validation * fix tests * improved test coverage for policy posture check * Refactoring * Extend NetBird agent to collect kernel version (#1495) * Add KernelVersion field to LoginRequest * Add KernelVersion to system info retrieval * Fix tests * Remove Core field from system info * Replace Core field with new OSVersion field in system info * Added WMI dependency to info_windows.go * Add OS Version posture checks (#1479) * Initial support of Geolocation service (#1491) * Add Geo Location posture check (#1500) * wip: implement geolocation check * add geo location posture checks to posture api * Merge branch 'feature/posture-checks' into geo-posture-check * Remove CityGeoNameID and update required fields in API * Add geoLocation checks to posture checks handler tests * Implement geo location-based checks for peers * Update test values and embed location struct in peer system * add support for country wide checks * initialize country code regex once * Fix peer meta core compability with older clients (#1515) * Refactor extraction of OSVersion in grpcserver * Ignore lint check * Fix peer meta core compability with older management (#1532) * Revert core field deprecation * fix tests * Extend peer meta with location information (#1517) This PR uses the geolocation service to resolve IP to location. The lookup happens once on the first connection - when a client calls the Sync func. The location is stored as part of the peer: * Add Locations endpoints (#1516) * add locations endpoints * Add sqlite3 check and database generation in geolite script * Add SQLite storage for geolocation data * Refactor file existence check into a separate function * Integrate geolocation services into management application * Refactoring * Refactor city retrieval to include Geonames ID * Add signature verification for GeoLite2 database download * Change to in-memory database for geolocation store * Merge manager to geolocation * Update GetAllCountries to return Country name and iso code * fix tests * Add reload to SqliteStore * Add geoname indexes * move db file check to connectDB * Add concurrency safety to SQL queries and database reloading The commit adds mutex locks to the GetAllCountries and GetCitiesByCountry functions to ensure thread-safety during database queries. Additionally, it introduces a mechanism to safely close the old database connection before a new connection is established upon reloading, which improves the reliability of database operations. Lastly, it moves the checking of database file existence to the connectDB function. * Add sha256 sum check to geolocation store before reload * Use read lock * Check SHA256 twice when reload geonames db --------- Co-authored-by: Yury Gargay <yury.gargay@gmail.com> * Add tests and validation for empty peer location in GeoLocationCheck (#1546) * Disallow Geo check creation/update without configured Geo DB (#1548) * Fix shared access to in memory copy of geonames.db (#1550) * Trim suffix in when evaluate Min Kernel Version in OS check * Add Valid Peer Windows Kernel version test * Add Geolocation handler tests (#1556) * Implement user admin checks in posture checks * Add geolocation handler tests * Mark initGeolocationTestData as helper func * Add error handling to geolocation database closure * Add cleanup function to close geolocation resources * Simplify checks definition serialisation (#1555) * Regenerate network map on posture check update (#1563) * change network state and generate map on posture check update * Refactoring * Make city name optional (#1575) * Do not return empty city name * Validate action param of geo location checks (#1577) We only support allow and deny * Switch realip middleware to upstream (#1578) * Be more silent in download-geolite2.sh script * Fix geonames db reload (#1580) * Ensure posture check name uniqueness when create (#1594) * Enhance the management of posture checks (#1595) * add a correct min version and kernel for os posture check example * handle error when geo or location db is nil * expose all peer location details in api response * Check for nil geolocation manager only * Validate posture check before save * bump open api version * add peer location fields to toPeerListItemResponse * Feautre/extend sys meta (#1536) * Collect network addresses * Add Linux sys product info * Fix peer meta comparison * Collect sys info on mac * Add windows sys info * Fix test * Fix test * Fix grpc client * Ignore test * Fix test * Collect IPv6 addresses * Change the IP to IP + net * fix tests * Use netip on server side * Serialize netip to json * Extend Peer metadata with cloud detection (#1552) * add cloud detection + test binary * test windows exe * Collect IPv6 addresses * Change the IP to IP + net * switch to forked cloud detect lib * new test builds * new GCE build * discontinue using library but local copy instead * fix imports * remove openstack check * add hierarchy to cloud check * merge IBM and SoftLayer * close resp bodies and use os lib for file reading * close more resp bodies * fix error check logic * parallelize IBM checks * fix response value * go mod tidy * include context + change kubernetes detection * add context in info functions * extract platform into separate field * fix imports * add missing wmi import --------- Co-authored-by: Zoltan Papp <zoltan.pmail@gmail.com> --------- Co-authored-by: pascal-fischer <32096965+pascal-fischer@users.noreply.github.com> * generate proto * remove test binaries --------- Co-authored-by: bcmmbaga <bethuelmbaga12@gmail.com> Co-authored-by: Yury Gargay <yury.gargay@gmail.com> Co-authored-by: Zoltan Papp <zoltan.pmail@gmail.com>
479 lines
14 KiB
Go
479 lines
14 KiB
Go
package client
|
|
|
|
import (
|
|
"context"
|
|
"crypto/tls"
|
|
"fmt"
|
|
"io"
|
|
"sync"
|
|
"time"
|
|
|
|
"google.golang.org/grpc/codes"
|
|
gstatus "google.golang.org/grpc/status"
|
|
|
|
log "github.com/sirupsen/logrus"
|
|
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
|
"google.golang.org/grpc"
|
|
"google.golang.org/grpc/connectivity"
|
|
"google.golang.org/grpc/credentials"
|
|
"google.golang.org/grpc/credentials/insecure"
|
|
"google.golang.org/grpc/keepalive"
|
|
|
|
"github.com/cenkalti/backoff/v4"
|
|
|
|
"github.com/netbirdio/netbird/client/system"
|
|
"github.com/netbirdio/netbird/encryption"
|
|
"github.com/netbirdio/netbird/management/proto"
|
|
)
|
|
|
|
// ConnStateNotifier is a wrapper interface of the status recorders
|
|
type ConnStateNotifier interface {
|
|
MarkManagementDisconnected(error)
|
|
MarkManagementConnected()
|
|
}
|
|
|
|
type GrpcClient struct {
|
|
key wgtypes.Key
|
|
realClient proto.ManagementServiceClient
|
|
ctx context.Context
|
|
conn *grpc.ClientConn
|
|
connStateCallback ConnStateNotifier
|
|
connStateCallbackLock sync.RWMutex
|
|
}
|
|
|
|
// NewClient creates a new client to Management service
|
|
func NewClient(ctx context.Context, addr string, ourPrivateKey wgtypes.Key, tlsEnabled bool) (*GrpcClient, error) {
|
|
transportOption := grpc.WithTransportCredentials(insecure.NewCredentials())
|
|
|
|
if tlsEnabled {
|
|
transportOption = grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{}))
|
|
}
|
|
|
|
mgmCtx, cancel := context.WithTimeout(ctx, 5*time.Second)
|
|
defer cancel()
|
|
conn, err := grpc.DialContext(
|
|
mgmCtx,
|
|
addr,
|
|
transportOption,
|
|
grpc.WithBlock(),
|
|
grpc.WithKeepaliveParams(keepalive.ClientParameters{
|
|
Time: 30 * time.Second,
|
|
Timeout: 10 * time.Second,
|
|
}))
|
|
if err != nil {
|
|
log.Errorf("failed creating connection to Management Service %v", err)
|
|
return nil, err
|
|
}
|
|
|
|
realClient := proto.NewManagementServiceClient(conn)
|
|
|
|
return &GrpcClient{
|
|
key: ourPrivateKey,
|
|
realClient: realClient,
|
|
ctx: ctx,
|
|
conn: conn,
|
|
connStateCallbackLock: sync.RWMutex{},
|
|
}, nil
|
|
}
|
|
|
|
// Close closes connection to the Management Service
|
|
func (c *GrpcClient) Close() error {
|
|
return c.conn.Close()
|
|
}
|
|
|
|
// SetConnStateListener set the ConnStateNotifier
|
|
func (c *GrpcClient) SetConnStateListener(notifier ConnStateNotifier) {
|
|
c.connStateCallbackLock.Lock()
|
|
defer c.connStateCallbackLock.Unlock()
|
|
c.connStateCallback = notifier
|
|
}
|
|
|
|
// defaultBackoff is a basic backoff mechanism for general issues
|
|
func defaultBackoff(ctx context.Context) backoff.BackOff {
|
|
return backoff.WithContext(&backoff.ExponentialBackOff{
|
|
InitialInterval: 800 * time.Millisecond,
|
|
RandomizationFactor: 1,
|
|
Multiplier: 1.7,
|
|
MaxInterval: 10 * time.Second,
|
|
MaxElapsedTime: 3 * 30 * 24 * time.Hour, // 3 months
|
|
Stop: backoff.Stop,
|
|
Clock: backoff.SystemClock,
|
|
}, ctx)
|
|
}
|
|
|
|
// ready indicates whether the client is okay and ready to be used
|
|
// for now it just checks whether gRPC connection to the service is ready
|
|
func (c *GrpcClient) ready() bool {
|
|
return c.conn.GetState() == connectivity.Ready || c.conn.GetState() == connectivity.Idle
|
|
}
|
|
|
|
// Sync wraps the real client's Sync endpoint call and takes care of retries and encryption/decryption of messages
|
|
// Blocking request. The result will be sent via msgHandler callback function
|
|
func (c *GrpcClient) Sync(msgHandler func(msg *proto.SyncResponse) error) error {
|
|
backOff := defaultBackoff(c.ctx)
|
|
|
|
operation := func() error {
|
|
log.Debugf("management connection state %v", c.conn.GetState())
|
|
|
|
connState := c.conn.GetState()
|
|
if connState == connectivity.Shutdown {
|
|
return backoff.Permanent(fmt.Errorf("connection to management has been shut down"))
|
|
} else if !(connState == connectivity.Ready || connState == connectivity.Idle) {
|
|
c.conn.WaitForStateChange(c.ctx, connState)
|
|
return fmt.Errorf("connection to management is not ready and in %s state", connState)
|
|
}
|
|
|
|
serverPubKey, err := c.GetServerPublicKey()
|
|
if err != nil {
|
|
log.Debugf("failed getting Management Service public key: %s", err)
|
|
return err
|
|
}
|
|
|
|
ctx, cancelStream := context.WithCancel(c.ctx)
|
|
defer cancelStream()
|
|
stream, err := c.connectToStream(ctx, *serverPubKey)
|
|
if err != nil {
|
|
log.Debugf("failed to open Management Service stream: %s", err)
|
|
if s, ok := gstatus.FromError(err); ok && s.Code() == codes.PermissionDenied {
|
|
return backoff.Permanent(err) // unrecoverable error, propagate to the upper layer
|
|
}
|
|
return err
|
|
}
|
|
|
|
log.Infof("connected to the Management Service stream")
|
|
c.notifyConnected()
|
|
// blocking until error
|
|
err = c.receiveEvents(stream, *serverPubKey, msgHandler)
|
|
if err != nil {
|
|
s, _ := gstatus.FromError(err)
|
|
switch s.Code() {
|
|
case codes.PermissionDenied:
|
|
return backoff.Permanent(err) // unrecoverable error, propagate to the upper layer
|
|
case codes.Canceled:
|
|
log.Debugf("management connection context has been canceled, this usually indicates shutdown")
|
|
return nil
|
|
default:
|
|
backOff.Reset() // reset backoff counter after successful connection
|
|
c.notifyDisconnected(err)
|
|
log.Warnf("disconnected from the Management service but will retry silently. Reason: %v", err)
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
err := backoff.Retry(operation, backOff)
|
|
if err != nil {
|
|
log.Warnf("exiting the Management service connection retry loop due to the unrecoverable error: %s", err)
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// GetNetworkMap return with the network map
|
|
func (c *GrpcClient) GetNetworkMap() (*proto.NetworkMap, error) {
|
|
serverPubKey, err := c.GetServerPublicKey()
|
|
if err != nil {
|
|
log.Debugf("failed getting Management Service public key: %s", err)
|
|
return nil, err
|
|
}
|
|
|
|
ctx, cancelStream := context.WithCancel(c.ctx)
|
|
defer cancelStream()
|
|
stream, err := c.connectToStream(ctx, *serverPubKey)
|
|
if err != nil {
|
|
log.Debugf("failed to open Management Service stream: %s", err)
|
|
return nil, err
|
|
}
|
|
defer func() {
|
|
_ = stream.CloseSend()
|
|
}()
|
|
|
|
update, err := stream.Recv()
|
|
if err == io.EOF {
|
|
log.Debugf("Management stream has been closed by server: %s", err)
|
|
return nil, err
|
|
}
|
|
if err != nil {
|
|
log.Debugf("disconnected from Management Service sync stream: %v", err)
|
|
return nil, err
|
|
}
|
|
|
|
decryptedResp := &proto.SyncResponse{}
|
|
err = encryption.DecryptMessage(*serverPubKey, c.key, update.Body, decryptedResp)
|
|
if err != nil {
|
|
log.Errorf("failed decrypting update message from Management Service: %s", err)
|
|
return nil, err
|
|
}
|
|
|
|
if decryptedResp.GetNetworkMap() == nil {
|
|
return nil, fmt.Errorf("invalid msg, required network map")
|
|
}
|
|
|
|
return decryptedResp.GetNetworkMap(), nil
|
|
}
|
|
|
|
func (c *GrpcClient) connectToStream(ctx context.Context, serverPubKey wgtypes.Key) (proto.ManagementService_SyncClient, error) {
|
|
req := &proto.SyncRequest{}
|
|
|
|
myPrivateKey := c.key
|
|
myPublicKey := myPrivateKey.PublicKey()
|
|
|
|
encryptedReq, err := encryption.EncryptMessage(serverPubKey, myPrivateKey, req)
|
|
if err != nil {
|
|
log.Errorf("failed encrypting message: %s", err)
|
|
return nil, err
|
|
}
|
|
syncReq := &proto.EncryptedMessage{WgPubKey: myPublicKey.String(), Body: encryptedReq}
|
|
sync, err := c.realClient.Sync(ctx, syncReq)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
return sync, nil
|
|
}
|
|
|
|
func (c *GrpcClient) receiveEvents(stream proto.ManagementService_SyncClient, serverPubKey wgtypes.Key, msgHandler func(msg *proto.SyncResponse) error) error {
|
|
for {
|
|
update, err := stream.Recv()
|
|
if err == io.EOF {
|
|
log.Debugf("Management stream has been closed by server: %s", err)
|
|
return err
|
|
}
|
|
if err != nil {
|
|
log.Debugf("disconnected from Management Service sync stream: %v", err)
|
|
return err
|
|
}
|
|
|
|
log.Debugf("got an update message from Management Service")
|
|
decryptedResp := &proto.SyncResponse{}
|
|
err = encryption.DecryptMessage(serverPubKey, c.key, update.Body, decryptedResp)
|
|
if err != nil {
|
|
log.Errorf("failed decrypting update message from Management Service: %s", err)
|
|
return err
|
|
}
|
|
|
|
err = msgHandler(decryptedResp)
|
|
if err != nil {
|
|
log.Errorf("failed handling an update message received from Management Service: %v", err.Error())
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
|
|
// GetServerPublicKey returns server's WireGuard public key (used later for encrypting messages sent to the server)
|
|
func (c *GrpcClient) GetServerPublicKey() (*wgtypes.Key, error) {
|
|
if !c.ready() {
|
|
return nil, fmt.Errorf("no connection to management")
|
|
}
|
|
|
|
mgmCtx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
|
|
defer cancel()
|
|
resp, err := c.realClient.GetServerKey(mgmCtx, &proto.Empty{})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
serverKey, err := wgtypes.ParseKey(resp.Key)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
return &serverKey, nil
|
|
}
|
|
|
|
// IsHealthy probes the gRPC connection and returns false on errors
|
|
func (c *GrpcClient) IsHealthy() bool {
|
|
switch c.conn.GetState() {
|
|
case connectivity.TransientFailure:
|
|
return false
|
|
case connectivity.Connecting:
|
|
return true
|
|
case connectivity.Shutdown:
|
|
return true
|
|
case connectivity.Idle:
|
|
case connectivity.Ready:
|
|
}
|
|
|
|
ctx, cancel := context.WithTimeout(c.ctx, 1*time.Second)
|
|
defer cancel()
|
|
|
|
_, err := c.realClient.GetServerKey(ctx, &proto.Empty{})
|
|
if err != nil {
|
|
c.notifyDisconnected(err)
|
|
log.Warnf("health check returned: %s", err)
|
|
return false
|
|
}
|
|
c.notifyConnected()
|
|
return true
|
|
}
|
|
|
|
func (c *GrpcClient) login(serverKey wgtypes.Key, req *proto.LoginRequest) (*proto.LoginResponse, error) {
|
|
if !c.ready() {
|
|
return nil, fmt.Errorf("no connection to management")
|
|
}
|
|
loginReq, err := encryption.EncryptMessage(serverKey, c.key, req)
|
|
if err != nil {
|
|
log.Errorf("failed to encrypt message: %s", err)
|
|
return nil, err
|
|
}
|
|
mgmCtx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
|
|
defer cancel()
|
|
resp, err := c.realClient.Login(mgmCtx, &proto.EncryptedMessage{
|
|
WgPubKey: c.key.PublicKey().String(),
|
|
Body: loginReq,
|
|
})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
loginResp := &proto.LoginResponse{}
|
|
err = encryption.DecryptMessage(serverKey, c.key, resp.Body, loginResp)
|
|
if err != nil {
|
|
log.Errorf("failed to decrypt registration message: %s", err)
|
|
return nil, err
|
|
}
|
|
|
|
return loginResp, nil
|
|
}
|
|
|
|
// Register registers peer on Management Server. It actually calls a Login endpoint with a provided setup key
|
|
// Takes care of encrypting and decrypting messages.
|
|
// This method will also collect system info and send it with the request (e.g. hostname, os, etc)
|
|
func (c *GrpcClient) Register(serverKey wgtypes.Key, setupKey string, jwtToken string, sysInfo *system.Info, pubSSHKey []byte) (*proto.LoginResponse, error) {
|
|
keys := &proto.PeerKeys{
|
|
SshPubKey: pubSSHKey,
|
|
WgPubKey: []byte(c.key.PublicKey().String()),
|
|
}
|
|
return c.login(serverKey, &proto.LoginRequest{SetupKey: setupKey, Meta: infoToMetaData(sysInfo), JwtToken: jwtToken, PeerKeys: keys})
|
|
}
|
|
|
|
// Login attempts login to Management Server. Takes care of encrypting and decrypting messages.
|
|
func (c *GrpcClient) Login(serverKey wgtypes.Key, sysInfo *system.Info, pubSSHKey []byte) (*proto.LoginResponse, error) {
|
|
keys := &proto.PeerKeys{
|
|
SshPubKey: pubSSHKey,
|
|
WgPubKey: []byte(c.key.PublicKey().String()),
|
|
}
|
|
return c.login(serverKey, &proto.LoginRequest{Meta: infoToMetaData(sysInfo), PeerKeys: keys})
|
|
}
|
|
|
|
// GetDeviceAuthorizationFlow returns a device authorization flow information.
|
|
// It also takes care of encrypting and decrypting messages.
|
|
func (c *GrpcClient) GetDeviceAuthorizationFlow(serverKey wgtypes.Key) (*proto.DeviceAuthorizationFlow, error) {
|
|
if !c.ready() {
|
|
return nil, fmt.Errorf("no connection to management in order to get device authorization flow")
|
|
}
|
|
mgmCtx, cancel := context.WithTimeout(c.ctx, time.Second*2)
|
|
defer cancel()
|
|
|
|
message := &proto.DeviceAuthorizationFlowRequest{}
|
|
encryptedMSG, err := encryption.EncryptMessage(serverKey, c.key, message)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
resp, err := c.realClient.GetDeviceAuthorizationFlow(mgmCtx, &proto.EncryptedMessage{
|
|
WgPubKey: c.key.PublicKey().String(),
|
|
Body: encryptedMSG},
|
|
)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
flowInfoResp := &proto.DeviceAuthorizationFlow{}
|
|
err = encryption.DecryptMessage(serverKey, c.key, resp.Body, flowInfoResp)
|
|
if err != nil {
|
|
errWithMSG := fmt.Errorf("failed to decrypt device authorization flow message: %s", err)
|
|
log.Error(errWithMSG)
|
|
return nil, errWithMSG
|
|
}
|
|
|
|
return flowInfoResp, nil
|
|
}
|
|
|
|
// GetPKCEAuthorizationFlow returns a pkce authorization flow information.
|
|
// It also takes care of encrypting and decrypting messages.
|
|
func (c *GrpcClient) GetPKCEAuthorizationFlow(serverKey wgtypes.Key) (*proto.PKCEAuthorizationFlow, error) {
|
|
if !c.ready() {
|
|
return nil, fmt.Errorf("no connection to management in order to get pkce authorization flow")
|
|
}
|
|
mgmCtx, cancel := context.WithTimeout(c.ctx, time.Second*2)
|
|
defer cancel()
|
|
|
|
message := &proto.PKCEAuthorizationFlowRequest{}
|
|
encryptedMSG, err := encryption.EncryptMessage(serverKey, c.key, message)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
resp, err := c.realClient.GetPKCEAuthorizationFlow(mgmCtx, &proto.EncryptedMessage{
|
|
WgPubKey: c.key.PublicKey().String(),
|
|
Body: encryptedMSG,
|
|
})
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
flowInfoResp := &proto.PKCEAuthorizationFlow{}
|
|
err = encryption.DecryptMessage(serverKey, c.key, resp.Body, flowInfoResp)
|
|
if err != nil {
|
|
errWithMSG := fmt.Errorf("failed to decrypt pkce authorization flow message: %s", err)
|
|
log.Error(errWithMSG)
|
|
return nil, errWithMSG
|
|
}
|
|
|
|
return flowInfoResp, nil
|
|
}
|
|
|
|
func (c *GrpcClient) notifyDisconnected(err error) {
|
|
c.connStateCallbackLock.RLock()
|
|
defer c.connStateCallbackLock.RUnlock()
|
|
|
|
if c.connStateCallback == nil {
|
|
return
|
|
}
|
|
c.connStateCallback.MarkManagementDisconnected(err)
|
|
}
|
|
|
|
func (c *GrpcClient) notifyConnected() {
|
|
c.connStateCallbackLock.RLock()
|
|
defer c.connStateCallbackLock.RUnlock()
|
|
|
|
if c.connStateCallback == nil {
|
|
return
|
|
}
|
|
c.connStateCallback.MarkManagementConnected()
|
|
}
|
|
|
|
func infoToMetaData(info *system.Info) *proto.PeerSystemMeta {
|
|
if info == nil {
|
|
return nil
|
|
}
|
|
|
|
addresses := make([]*proto.NetworkAddress, 0, len(info.NetworkAddresses))
|
|
for _, addr := range info.NetworkAddresses {
|
|
addresses = append(addresses, &proto.NetworkAddress{
|
|
NetIP: addr.NetIP.String(),
|
|
Mac: addr.Mac,
|
|
})
|
|
}
|
|
|
|
return &proto.PeerSystemMeta{
|
|
Hostname: info.Hostname,
|
|
GoOS: info.GoOS,
|
|
OS: info.OS,
|
|
Core: info.OSVersion,
|
|
OSVersion: info.OSVersion,
|
|
Platform: info.Platform,
|
|
Kernel: info.Kernel,
|
|
WiretrusteeVersion: info.WiretrusteeVersion,
|
|
UiVersion: info.UIVersion,
|
|
KernelVersion: info.KernelVersion,
|
|
NetworkAddresses: addresses,
|
|
SysSerialNumber: info.SystemSerialNumber,
|
|
SysManufacturer: info.SystemManufacturer,
|
|
SysProductName: info.SystemProductName,
|
|
}
|
|
}
|