mirror of
https://github.com/netbirdio/netbird.git
synced 2025-08-17 02:21:47 +02:00
Move management server to a separate directory (#67)
* Move management server to a separate directory
This commit is contained in:
13
management/server/management_suite_test.go
Normal file
13
management/server/management_suite_test.go
Normal file
@ -0,0 +1,13 @@
|
||||
package server_test
|
||||
|
||||
import (
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestManagement(t *testing.T) {
|
||||
RegisterFailHandler(Fail)
|
||||
RunSpecs(t, "Management Service Suite")
|
||||
}
|
337
management/server/management_test.go
Normal file
337
management/server/management_test.go
Normal file
@ -0,0 +1,337 @@
|
||||
package server_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
server2 "github.com/wiretrustee/wiretrustee/management/server"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
sync2 "sync"
|
||||
"time"
|
||||
|
||||
pb "github.com/golang/protobuf/proto" //nolint
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/wiretrustee/wiretrustee/encryption"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
mgmtProto "github.com/wiretrustee/wiretrustee/management/proto"
|
||||
"github.com/wiretrustee/wiretrustee/util"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
)
|
||||
|
||||
const (
|
||||
ValidSetupKey = "A2C8E62B-38F5-4553-B31E-DD66C696CEBB"
|
||||
InvalidSetupKey = "INVALID_SETUP_KEY"
|
||||
)
|
||||
|
||||
var _ = Describe("Management service", func() {
|
||||
|
||||
var (
|
||||
addr string
|
||||
server *grpc.Server
|
||||
dataDir string
|
||||
client mgmtProto.ManagementServiceClient
|
||||
serverPubKey wgtypes.Key
|
||||
conn *grpc.ClientConn
|
||||
)
|
||||
|
||||
BeforeEach(func() {
|
||||
level, _ := log.ParseLevel("Debug")
|
||||
log.SetLevel(level)
|
||||
var err error
|
||||
dataDir, err = ioutil.TempDir("", "wiretrustee_mgmt_test_tmp_*")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = util.CopyFileContents("testdata/store.json", filepath.Join(dataDir, "store.json"))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
var listener net.Listener
|
||||
server, listener = startServer(dataDir)
|
||||
addr = listener.Addr().String()
|
||||
client, conn = createRawClient(addr)
|
||||
|
||||
// server public key
|
||||
resp, err := client.GetServerKey(context.TODO(), &mgmtProto.Empty{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
serverPubKey, err = wgtypes.ParseKey(resp.Key)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
server.Stop()
|
||||
err := conn.Close()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = os.RemoveAll(dataDir)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
})
|
||||
|
||||
Context("when calling IsHealthy endpoint", func() {
|
||||
Specify("a non-error result is returned", func() {
|
||||
|
||||
healthy, err := client.IsHealthy(context.TODO(), &mgmtProto.Empty{})
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(healthy).ToNot(BeNil())
|
||||
})
|
||||
})
|
||||
|
||||
Context("when calling Sync endpoint", func() {
|
||||
|
||||
Context("when there are 3 peers registered under one account", func() {
|
||||
Specify("a list containing other 2 peers is returned", func() {
|
||||
key, _ := wgtypes.GenerateKey()
|
||||
key1, _ := wgtypes.GenerateKey()
|
||||
key2, _ := wgtypes.GenerateKey()
|
||||
registerPeerWithValidSetupKey(key, client)
|
||||
registerPeerWithValidSetupKey(key1, client)
|
||||
registerPeerWithValidSetupKey(key2, client)
|
||||
|
||||
messageBytes, err := pb.Marshal(&mgmtProto.SyncRequest{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
encryptedBytes, err := encryption.Encrypt(messageBytes, serverPubKey, key)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
sync, err := client.Sync(context.TODO(), &mgmtProto.EncryptedMessage{
|
||||
WgPubKey: key.PublicKey().String(),
|
||||
Body: encryptedBytes,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
encryptedResponse := &mgmtProto.EncryptedMessage{}
|
||||
err = sync.RecvMsg(encryptedResponse)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
decryptedBytes, err := encryption.Decrypt(encryptedResponse.Body, serverPubKey, key)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
resp := &mgmtProto.SyncResponse{}
|
||||
err = pb.Unmarshal(decryptedBytes, resp)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Expect(resp.Peers).To(HaveLen(2))
|
||||
Expect(resp.Peers).To(ContainElements(key1.PublicKey().String(), key2.PublicKey().String()))
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
Context("when there is a new peer registered", func() {
|
||||
Specify("an update is returned", func() {
|
||||
// register only a single peer
|
||||
key, _ := wgtypes.GenerateKey()
|
||||
registerPeerWithValidSetupKey(key, client)
|
||||
|
||||
messageBytes, err := pb.Marshal(&mgmtProto.SyncRequest{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
encryptedBytes, err := encryption.Encrypt(messageBytes, serverPubKey, key)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
sync, err := client.Sync(context.TODO(), &mgmtProto.EncryptedMessage{
|
||||
WgPubKey: key.PublicKey().String(),
|
||||
Body: encryptedBytes,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// after the initial sync call we have 0 peer updates
|
||||
encryptedResponse := &mgmtProto.EncryptedMessage{}
|
||||
err = sync.RecvMsg(encryptedResponse)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
decryptedBytes, err := encryption.Decrypt(encryptedResponse.Body, serverPubKey, key)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
resp := &mgmtProto.SyncResponse{}
|
||||
err = pb.Unmarshal(decryptedBytes, resp)
|
||||
Expect(resp.Peers).To(HaveLen(0))
|
||||
|
||||
wg := sync2.WaitGroup{}
|
||||
wg.Add(1)
|
||||
|
||||
// continue listening on updates for a peer
|
||||
go func() {
|
||||
err = sync.RecvMsg(encryptedResponse)
|
||||
|
||||
decryptedBytes, err = encryption.Decrypt(encryptedResponse.Body, serverPubKey, key)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
resp = &mgmtProto.SyncResponse{}
|
||||
err = pb.Unmarshal(decryptedBytes, resp)
|
||||
wg.Done()
|
||||
|
||||
}()
|
||||
|
||||
// register a new peer
|
||||
key1, _ := wgtypes.GenerateKey()
|
||||
registerPeerWithValidSetupKey(key1, client)
|
||||
|
||||
wg.Wait()
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(resp.Peers).To(ContainElements(key1.PublicKey().String()))
|
||||
Expect(resp.Peers).To(HaveLen(1))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Context("when calling GetServerKey endpoint", func() {
|
||||
Specify("a public Wireguard key of the service is returned", func() {
|
||||
|
||||
resp, err := client.GetServerKey(context.TODO(), &mgmtProto.Empty{})
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(resp).ToNot(BeNil())
|
||||
Expect(resp.Key).ToNot(BeNil())
|
||||
Expect(resp.ExpiresAt).ToNot(BeNil())
|
||||
|
||||
//check if the key is a valid Wireguard key
|
||||
key, err := wgtypes.ParseKey(resp.Key)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(key).ToNot(BeNil())
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
Context("when calling RegisterPeer endpoint", func() {
|
||||
|
||||
Context("with an invalid setup key", func() {
|
||||
Specify("an error is returned", func() {
|
||||
|
||||
key, _ := wgtypes.GenerateKey()
|
||||
resp, err := client.RegisterPeer(context.TODO(), &mgmtProto.RegisterPeerRequest{
|
||||
Key: key.PublicKey().String(),
|
||||
SetupKey: InvalidSetupKey,
|
||||
})
|
||||
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(resp).To(BeNil())
|
||||
|
||||
})
|
||||
})
|
||||
|
||||
Context("with a valid setup key", func() {
|
||||
It("a non error result is returned", func() {
|
||||
|
||||
key, _ := wgtypes.GenerateKey()
|
||||
resp := registerPeerWithValidSetupKey(key, client)
|
||||
|
||||
Expect(resp).ToNot(BeNil())
|
||||
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
Context("when there are 50 peers registered under one account", func() {
|
||||
Context("when there are 10 more peers registered under the same account", func() {
|
||||
Specify("all of the 50 peers will get updates of 10 newly registered peers", func() {
|
||||
|
||||
initialPeers := 20
|
||||
additionalPeers := 10
|
||||
|
||||
var peers []wgtypes.Key
|
||||
for i := 0; i < initialPeers; i++ {
|
||||
key, _ := wgtypes.GenerateKey()
|
||||
registerPeerWithValidSetupKey(key, client)
|
||||
peers = append(peers, key)
|
||||
}
|
||||
|
||||
wg := sync2.WaitGroup{}
|
||||
wg.Add(initialPeers + initialPeers*additionalPeers)
|
||||
|
||||
var clients []mgmtProto.ManagementService_SyncClient
|
||||
for _, peer := range peers {
|
||||
messageBytes, err := pb.Marshal(&mgmtProto.SyncRequest{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
encryptedBytes, err := encryption.Encrypt(messageBytes, serverPubKey, peer)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// open stream
|
||||
sync, err := client.Sync(context.TODO(), &mgmtProto.EncryptedMessage{
|
||||
WgPubKey: peer.PublicKey().String(),
|
||||
Body: encryptedBytes,
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
clients = append(clients, sync)
|
||||
|
||||
// receive stream
|
||||
peer := peer
|
||||
go func() {
|
||||
for {
|
||||
encryptedResponse := &mgmtProto.EncryptedMessage{}
|
||||
err = sync.RecvMsg(encryptedResponse)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
decryptedBytes, err := encryption.Decrypt(encryptedResponse.Body, serverPubKey, peer)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
resp := &mgmtProto.SyncResponse{}
|
||||
err = pb.Unmarshal(decryptedBytes, resp)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
wg.Done()
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
for i := 0; i < additionalPeers; i++ {
|
||||
key, _ := wgtypes.GenerateKey()
|
||||
registerPeerWithValidSetupKey(key, client)
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
n := rand.Intn(500)
|
||||
time.Sleep(time.Duration(n) * time.Millisecond)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
|
||||
for _, syncClient := range clients {
|
||||
err := syncClient.CloseSend()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
})
|
||||
})
|
||||
})
|
||||
})
|
||||
|
||||
func registerPeerWithValidSetupKey(key wgtypes.Key, client mgmtProto.ManagementServiceClient) *mgmtProto.RegisterPeerResponse {
|
||||
|
||||
resp, err := client.RegisterPeer(context.TODO(), &mgmtProto.RegisterPeerRequest{
|
||||
Key: key.PublicKey().String(),
|
||||
SetupKey: ValidSetupKey,
|
||||
})
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return resp
|
||||
|
||||
}
|
||||
|
||||
func createRawClient(addr string) (mgmtProto.ManagementServiceClient, *grpc.ClientConn) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
conn, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(),
|
||||
grpc.WithBlock(),
|
||||
grpc.WithKeepaliveParams(keepalive.ClientParameters{
|
||||
Time: 10 * time.Second,
|
||||
Timeout: 2 * time.Second,
|
||||
}))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
return mgmtProto.NewManagementServiceClient(conn), conn
|
||||
}
|
||||
|
||||
func startServer(dataDir string) (*grpc.Server, net.Listener) {
|
||||
lis, err := net.Listen("tcp", ":0")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
s := grpc.NewServer()
|
||||
server, err := server2.NewServer(dataDir)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
mgmtProto.RegisterManagementServiceServer(s, server)
|
||||
go func() {
|
||||
if err := s.Serve(lis); err != nil {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
}()
|
||||
|
||||
return s, lis
|
||||
}
|
225
management/server/server.go
Normal file
225
management/server/server.go
Normal file
@ -0,0 +1,225 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/wiretrustee/wiretrustee/management/store"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/wiretrustee/wiretrustee/encryption"
|
||||
"github.com/wiretrustee/wiretrustee/management/proto"
|
||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Server an instance of a Management server
|
||||
type Server struct {
|
||||
Store *store.FileStore
|
||||
wgKey wgtypes.Key
|
||||
proto.UnimplementedManagementServiceServer
|
||||
peerChannels map[string]chan *UpdateChannelMessage
|
||||
channelsMux *sync.Mutex
|
||||
}
|
||||
|
||||
type UpdateChannelMessage struct {
|
||||
Update *proto.SyncResponse
|
||||
}
|
||||
|
||||
// NewServer creates a new Management server
|
||||
func NewServer(dataDir string) (*Server, error) {
|
||||
key, err := wgtypes.GeneratePrivateKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
store, err := store.NewStore(dataDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Server{
|
||||
Store: store,
|
||||
wgKey: key,
|
||||
// peerKey -> event channel
|
||||
peerChannels: make(map[string]chan *UpdateChannelMessage),
|
||||
channelsMux: &sync.Mutex{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) GetServerKey(ctx context.Context, req *proto.Empty) (*proto.ServerKeyResponse, error) {
|
||||
|
||||
// todo introduce something more meaningful with the key expiration/rotation
|
||||
now := time.Now().Add(24 * time.Hour)
|
||||
secs := int64(now.Second())
|
||||
nanos := int32(now.Nanosecond())
|
||||
expiresAt := ×tamp.Timestamp{Seconds: secs, Nanos: nanos}
|
||||
|
||||
return &proto.ServerKeyResponse{
|
||||
Key: s.wgKey.PublicKey().String(),
|
||||
ExpiresAt: expiresAt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
//Sync validates the existence of a connecting peer, sends an initial state (all available for the connecting peers) and
|
||||
// notifies the connected peer of any updates (e.g. new peers under the same account)
|
||||
func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_SyncServer) error {
|
||||
|
||||
log.Debugf("Sync request from peer %s", req.WgPubKey)
|
||||
|
||||
peerKey, err := wgtypes.ParseKey(req.GetWgPubKey())
|
||||
if err != nil {
|
||||
log.Warnf("error while parsing peer's Wireguard public key %s on Sync request.", peerKey.String())
|
||||
return status.Errorf(codes.InvalidArgument, "provided wgPubKey %s is invalid", peerKey.String())
|
||||
}
|
||||
|
||||
exists := s.Store.PeerExists(peerKey.String())
|
||||
if !exists {
|
||||
return status.Errorf(codes.Unauthenticated, "provided peer with the key wgPubKey %s is not registered", peerKey.String())
|
||||
}
|
||||
|
||||
syncReq := &proto.SyncRequest{}
|
||||
err = encryption.DecryptMessage(peerKey, s.wgKey, req.Body, syncReq)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.InvalidArgument, "invalid request message")
|
||||
}
|
||||
|
||||
err = s.sendInitialSync(peerKey, srv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
updates := s.openUpdatesChannel(peerKey.String())
|
||||
|
||||
// keep a connection to the peer and send updates when available
|
||||
for {
|
||||
select {
|
||||
// condition when there are some updates
|
||||
case update, open := <-updates:
|
||||
if !open {
|
||||
// updates channel has been closed
|
||||
return nil
|
||||
}
|
||||
log.Debugf("recevied an update for peer %s", peerKey.String())
|
||||
|
||||
encryptedResp, err := encryption.EncryptMessage(peerKey, s.wgKey, update.Update)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "failed processing update message")
|
||||
}
|
||||
|
||||
err = srv.SendMsg(&proto.EncryptedMessage{
|
||||
WgPubKey: s.wgKey.PublicKey().String(),
|
||||
Body: encryptedResp,
|
||||
})
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "failed sending update message")
|
||||
}
|
||||
// condition when client <-> server connection has been terminated
|
||||
case <-srv.Context().Done():
|
||||
// happens when connection drops, e.g. client disconnects
|
||||
log.Debugf("stream of peer %s has been closed", peerKey.String())
|
||||
s.closeUpdatesChannel(peerKey.String())
|
||||
return srv.Context().Err()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterPeer adds a peer to the Store. Returns 404 in case the provided setup key doesn't exist
|
||||
func (s *Server) RegisterPeer(ctx context.Context, req *proto.RegisterPeerRequest) (*proto.RegisterPeerResponse, error) {
|
||||
|
||||
log.Debugf("RegisterPeer request from peer %s", req.Key)
|
||||
|
||||
s.channelsMux.Lock()
|
||||
defer s.channelsMux.Unlock()
|
||||
|
||||
err := s.Store.AddPeer(req.SetupKey, req.Key)
|
||||
if err != nil {
|
||||
return &proto.RegisterPeerResponse{}, status.Errorf(codes.NotFound, "provided setup key doesn't exists")
|
||||
}
|
||||
|
||||
peers, err := s.Store.GetPeersForAPeer(req.Key)
|
||||
if err != nil {
|
||||
//todo return a proper error
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// notify other peers of our registration
|
||||
for _, peer := range peers {
|
||||
if channel, ok := s.peerChannels[peer]; ok {
|
||||
// exclude notified peer and add ourselves
|
||||
peersToSend := []string{req.Key}
|
||||
for _, p := range peers {
|
||||
if peer != p {
|
||||
peersToSend = append(peersToSend, p)
|
||||
}
|
||||
}
|
||||
update := &proto.SyncResponse{Peers: peersToSend}
|
||||
channel <- &UpdateChannelMessage{Update: update}
|
||||
}
|
||||
}
|
||||
|
||||
return &proto.RegisterPeerResponse{}, nil
|
||||
}
|
||||
|
||||
// IsHealthy indicates whether the service is healthy
|
||||
func (s *Server) IsHealthy(ctx context.Context, req *proto.Empty) (*proto.Empty, error) {
|
||||
return &proto.Empty{}, nil
|
||||
}
|
||||
|
||||
// openUpdatesChannel creates a go channel for a given peer used to deliver updates relevant to the peer.
|
||||
func (s *Server) openUpdatesChannel(peerKey string) chan *UpdateChannelMessage {
|
||||
s.channelsMux.Lock()
|
||||
defer s.channelsMux.Unlock()
|
||||
if channel, ok := s.peerChannels[peerKey]; ok {
|
||||
delete(s.peerChannels, peerKey)
|
||||
close(channel)
|
||||
}
|
||||
//mbragin: todo shouldn't it be more? or configurable?
|
||||
channel := make(chan *UpdateChannelMessage, 100)
|
||||
s.peerChannels[peerKey] = channel
|
||||
|
||||
log.Debugf("opened updates channel for a peer %s", peerKey)
|
||||
return channel
|
||||
}
|
||||
|
||||
// closeUpdatesChannel closes updates channel of a given peer
|
||||
func (s *Server) closeUpdatesChannel(peerKey string) {
|
||||
s.channelsMux.Lock()
|
||||
defer s.channelsMux.Unlock()
|
||||
if channel, ok := s.peerChannels[peerKey]; ok {
|
||||
delete(s.peerChannels, peerKey)
|
||||
close(channel)
|
||||
}
|
||||
|
||||
log.Debugf("closed updates channel of a peer %s", peerKey)
|
||||
}
|
||||
|
||||
// sendInitialSync sends initial proto.SyncResponse to the peer requesting synchronization
|
||||
func (s *Server) sendInitialSync(peerKey wgtypes.Key, srv proto.ManagementService_SyncServer) error {
|
||||
|
||||
peers, err := s.Store.GetPeersForAPeer(peerKey.String())
|
||||
if err != nil {
|
||||
log.Warnf("error getting a list of peers for a peer %s", peerKey.String())
|
||||
return err
|
||||
}
|
||||
plainResp := &proto.SyncResponse{
|
||||
Peers: peers,
|
||||
}
|
||||
|
||||
encryptedResp, err := encryption.EncryptMessage(peerKey, s.wgKey, plainResp)
|
||||
if err != nil {
|
||||
return status.Errorf(codes.Internal, "error handling request")
|
||||
}
|
||||
|
||||
err = srv.Send(&proto.EncryptedMessage{
|
||||
WgPubKey: s.wgKey.PublicKey().String(),
|
||||
Body: encryptedResp,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
log.Errorf("failed sending SyncResponse %v", err)
|
||||
return status.Errorf(codes.Internal, "error handling request")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
13
management/server/testdata/store.json
vendored
Normal file
13
management/server/testdata/store.json
vendored
Normal file
@ -0,0 +1,13 @@
|
||||
{
|
||||
"Accounts": {
|
||||
"bf1c8084-ba50-4ce7-9439-34653001fc3b": {
|
||||
"Id": "bf1c8084-ba50-4ce7-9439-34653001fc3b",
|
||||
"SetupKeys": {
|
||||
"a2c8e62b-38f5-4553-b31e-dd66c696cebb": {
|
||||
"Key": "a2c8e62b-38f5-4553-b31e-dd66c696cebb"
|
||||
}
|
||||
},
|
||||
"Peers": {}
|
||||
}
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user