2022-11-03 18:39:37 +01:00
|
|
|
package dns
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"errors"
|
2023-05-26 17:13:59 +02:00
|
|
|
"fmt"
|
2022-11-03 18:39:37 +01:00
|
|
|
"net"
|
2023-02-13 15:25:11 +01:00
|
|
|
"sync"
|
|
|
|
"sync/atomic"
|
2022-11-03 18:39:37 +01:00
|
|
|
"time"
|
2023-02-13 15:25:11 +01:00
|
|
|
|
2023-05-26 17:13:59 +02:00
|
|
|
"github.com/cenkalti/backoff/v4"
|
2024-03-12 19:06:16 +01:00
|
|
|
"github.com/hashicorp/go-multierror"
|
2023-02-13 15:25:11 +01:00
|
|
|
"github.com/miekg/dns"
|
|
|
|
log "github.com/sirupsen/logrus"
|
2024-03-12 19:06:16 +01:00
|
|
|
|
|
|
|
"github.com/netbirdio/netbird/client/internal/peer"
|
2022-11-03 18:39:37 +01:00
|
|
|
)
|
|
|
|
|
2023-02-13 15:25:11 +01:00
|
|
|
const (
|
2023-05-26 17:13:59 +02:00
|
|
|
failsTillDeact = int32(5)
|
|
|
|
reactivatePeriod = 30 * time.Second
|
2023-02-13 15:25:11 +01:00
|
|
|
upstreamTimeout = 15 * time.Second
|
2024-01-23 17:23:12 +01:00
|
|
|
probeTimeout = 2 * time.Second
|
2023-02-13 15:25:11 +01:00
|
|
|
)
|
2022-11-03 18:39:37 +01:00
|
|
|
|
2024-07-17 23:48:37 +02:00
|
|
|
const testRecord = "com."
|
2024-01-23 17:23:12 +01:00
|
|
|
|
2023-05-26 17:13:59 +02:00
|
|
|
type upstreamClient interface {
|
2024-01-29 17:10:47 +01:00
|
|
|
exchange(ctx context.Context, upstream string, r *dns.Msg) (*dns.Msg, time.Duration, error)
|
2023-05-26 17:13:59 +02:00
|
|
|
}
|
|
|
|
|
2023-12-18 11:46:58 +01:00
|
|
|
type UpstreamResolver interface {
|
|
|
|
serveDNS(r *dns.Msg) (*dns.Msg, time.Duration, error)
|
|
|
|
upstreamExchange(upstream string, r *dns.Msg) (*dns.Msg, time.Duration, error)
|
|
|
|
}
|
|
|
|
|
|
|
|
type upstreamResolverBase struct {
|
2023-02-13 15:25:11 +01:00
|
|
|
ctx context.Context
|
2023-05-26 17:13:59 +02:00
|
|
|
cancel context.CancelFunc
|
|
|
|
upstreamClient upstreamClient
|
2023-02-13 15:25:11 +01:00
|
|
|
upstreamServers []string
|
|
|
|
disabled bool
|
|
|
|
failsCount atomic.Int32
|
2024-07-17 23:48:37 +02:00
|
|
|
successCount atomic.Int32
|
2023-02-13 15:25:11 +01:00
|
|
|
failsTillDeact int32
|
|
|
|
mutex sync.Mutex
|
|
|
|
reactivatePeriod time.Duration
|
|
|
|
upstreamTimeout time.Duration
|
|
|
|
|
2024-03-12 19:06:16 +01:00
|
|
|
deactivate func(error)
|
|
|
|
reactivate func()
|
|
|
|
statusRecorder *peer.Status
|
2023-02-13 15:25:11 +01:00
|
|
|
}
|
|
|
|
|
2024-03-12 19:06:16 +01:00
|
|
|
func newUpstreamResolverBase(ctx context.Context, statusRecorder *peer.Status) *upstreamResolverBase {
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
2023-12-18 11:46:58 +01:00
|
|
|
|
|
|
|
return &upstreamResolverBase{
|
2023-02-13 15:25:11 +01:00
|
|
|
ctx: ctx,
|
2023-05-26 17:13:59 +02:00
|
|
|
cancel: cancel,
|
2023-02-13 15:25:11 +01:00
|
|
|
upstreamTimeout: upstreamTimeout,
|
|
|
|
reactivatePeriod: reactivatePeriod,
|
|
|
|
failsTillDeact: failsTillDeact,
|
2024-03-12 19:06:16 +01:00
|
|
|
statusRecorder: statusRecorder,
|
2023-02-13 15:25:11 +01:00
|
|
|
}
|
2022-11-03 18:39:37 +01:00
|
|
|
}
|
|
|
|
|
2023-12-18 11:46:58 +01:00
|
|
|
func (u *upstreamResolverBase) stop() {
|
2023-11-01 17:11:16 +01:00
|
|
|
log.Debugf("stopping serving DNS for upstreams %s", u.upstreamServers)
|
2023-05-26 17:13:59 +02:00
|
|
|
u.cancel()
|
|
|
|
}
|
|
|
|
|
2022-11-03 18:39:37 +01:00
|
|
|
// ServeDNS handles a DNS request
|
2023-12-18 11:46:58 +01:00
|
|
|
func (u *upstreamResolverBase) ServeDNS(w dns.ResponseWriter, r *dns.Msg) {
|
2024-03-12 19:06:16 +01:00
|
|
|
var err error
|
|
|
|
defer func() {
|
|
|
|
u.checkUpstreamFails(err)
|
|
|
|
}()
|
2022-11-03 18:39:37 +01:00
|
|
|
|
2023-02-13 15:25:11 +01:00
|
|
|
log.WithField("question", r.Question[0]).Trace("received an upstream question")
|
2024-06-27 18:36:24 +02:00
|
|
|
// set the AuthenticatedData flag and the EDNS0 buffer size to 4096 bytes to support larger dns records
|
2024-06-25 17:18:04 +02:00
|
|
|
if r.Extra == nil {
|
2024-06-27 18:36:24 +02:00
|
|
|
r.SetEdns0(4096, false)
|
|
|
|
r.MsgHdr.AuthenticatedData = true
|
2024-06-25 17:18:04 +02:00
|
|
|
}
|
2022-11-03 18:39:37 +01:00
|
|
|
|
|
|
|
select {
|
2023-02-13 15:25:11 +01:00
|
|
|
case <-u.ctx.Done():
|
2022-11-03 18:39:37 +01:00
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
for _, upstream := range u.upstreamServers {
|
2024-01-29 17:10:47 +01:00
|
|
|
var rm *dns.Msg
|
|
|
|
var t time.Duration
|
|
|
|
|
|
|
|
func() {
|
|
|
|
ctx, cancel := context.WithTimeout(u.ctx, u.upstreamTimeout)
|
|
|
|
defer cancel()
|
|
|
|
rm, t, err = u.upstreamClient.exchange(ctx, upstream, r)
|
|
|
|
}()
|
2022-11-03 18:39:37 +01:00
|
|
|
|
|
|
|
if err != nil {
|
2024-01-23 17:23:12 +01:00
|
|
|
if errors.Is(err, context.DeadlineExceeded) || isTimeout(err) {
|
2023-02-13 15:25:11 +01:00
|
|
|
log.WithError(err).WithField("upstream", upstream).
|
|
|
|
Warn("got an error while connecting to upstream")
|
2022-11-03 18:39:37 +01:00
|
|
|
continue
|
|
|
|
}
|
2023-02-13 15:25:11 +01:00
|
|
|
u.failsCount.Add(1)
|
|
|
|
log.WithError(err).WithField("upstream", upstream).
|
2023-12-18 11:46:58 +01:00
|
|
|
Error("got other error while querying the upstream")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if rm == nil {
|
|
|
|
log.WithError(err).WithField("upstream", upstream).
|
|
|
|
Warn("no response from upstream")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// those checks need to be independent of each other due to memory address issues
|
|
|
|
if !rm.Response {
|
|
|
|
log.WithError(err).WithField("upstream", upstream).
|
|
|
|
Warn("no response from upstream")
|
2022-11-03 18:39:37 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-07-17 23:48:37 +02:00
|
|
|
u.successCount.Add(1)
|
2022-11-03 18:39:37 +01:00
|
|
|
log.Tracef("took %s to query the upstream %s", t, upstream)
|
|
|
|
|
|
|
|
err = w.WriteMsg(rm)
|
|
|
|
if err != nil {
|
2023-02-13 15:25:11 +01:00
|
|
|
log.WithError(err).Error("got an error while writing the upstream resolver response")
|
|
|
|
}
|
|
|
|
// count the fails only if they happen sequentially
|
|
|
|
u.failsCount.Store(0)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
u.failsCount.Add(1)
|
|
|
|
log.Error("all queries to the upstream nameservers failed with timeout")
|
|
|
|
}
|
|
|
|
|
|
|
|
// checkUpstreamFails counts fails and disables or enables upstream resolving
|
|
|
|
//
|
|
|
|
// If fails count is greater that failsTillDeact, upstream resolving
|
|
|
|
// will be disabled for reactivatePeriod, after that time period fails counter
|
|
|
|
// will be reset and upstream will be reactivated.
|
2024-03-12 19:06:16 +01:00
|
|
|
func (u *upstreamResolverBase) checkUpstreamFails(err error) {
|
2023-02-13 15:25:11 +01:00
|
|
|
u.mutex.Lock()
|
|
|
|
defer u.mutex.Unlock()
|
|
|
|
|
|
|
|
if u.failsCount.Load() < u.failsTillDeact || u.disabled {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-u.ctx.Done():
|
|
|
|
return
|
|
|
|
default:
|
2024-01-23 17:23:12 +01:00
|
|
|
}
|
|
|
|
|
2024-03-12 19:06:16 +01:00
|
|
|
u.disable(err)
|
2024-01-23 17:23:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// probeAvailability tests all upstream servers simultaneously and
|
|
|
|
// disables the resolver if none work
|
|
|
|
func (u *upstreamResolverBase) probeAvailability() {
|
|
|
|
u.mutex.Lock()
|
|
|
|
defer u.mutex.Unlock()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-u.ctx.Done():
|
|
|
|
return
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
2024-07-17 23:48:37 +02:00
|
|
|
// avoid probe if upstreams could resolve at least one query and fails count is less than failsTillDeact
|
|
|
|
if u.successCount.Load() > 0 && u.failsCount.Load() < u.failsTillDeact {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-01-23 17:23:12 +01:00
|
|
|
var success bool
|
|
|
|
var mu sync.Mutex
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
|
2024-03-12 19:06:16 +01:00
|
|
|
var errors *multierror.Error
|
2024-01-23 17:23:12 +01:00
|
|
|
for _, upstream := range u.upstreamServers {
|
|
|
|
upstream := upstream
|
|
|
|
|
|
|
|
wg.Add(1)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
2024-07-17 23:48:37 +02:00
|
|
|
err := u.testNameserver(upstream, 500*time.Millisecond)
|
2024-03-12 19:06:16 +01:00
|
|
|
if err != nil {
|
|
|
|
errors = multierror.Append(errors, err)
|
2024-01-23 17:23:12 +01:00
|
|
|
log.Warnf("probing upstream nameserver %s: %s", upstream, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
mu.Lock()
|
|
|
|
defer mu.Unlock()
|
|
|
|
success = true
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
wg.Wait()
|
|
|
|
|
|
|
|
// didn't find a working upstream server, let's disable and try later
|
|
|
|
if !success {
|
2024-03-12 19:06:16 +01:00
|
|
|
u.disable(errors.ErrorOrNil())
|
2023-02-13 15:25:11 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-05-26 17:13:59 +02:00
|
|
|
// waitUntilResponse retries, in an exponential interval, querying the upstream servers until it gets a positive response
|
2023-12-18 11:46:58 +01:00
|
|
|
func (u *upstreamResolverBase) waitUntilResponse() {
|
2023-05-26 17:13:59 +02:00
|
|
|
exponentialBackOff := &backoff.ExponentialBackOff{
|
|
|
|
InitialInterval: 500 * time.Millisecond,
|
|
|
|
RandomizationFactor: 0.5,
|
|
|
|
Multiplier: 1.1,
|
|
|
|
MaxInterval: u.reactivatePeriod,
|
|
|
|
MaxElapsedTime: 0,
|
|
|
|
Stop: backoff.Stop,
|
|
|
|
Clock: backoff.SystemClock,
|
|
|
|
}
|
|
|
|
|
|
|
|
operation := func() error {
|
|
|
|
select {
|
|
|
|
case <-u.ctx.Done():
|
|
|
|
return backoff.Permanent(fmt.Errorf("exiting upstream retry loop for upstreams %s: parent context has been canceled", u.upstreamServers))
|
|
|
|
default:
|
2022-11-03 18:39:37 +01:00
|
|
|
}
|
2023-02-13 15:25:11 +01:00
|
|
|
|
2023-05-26 17:13:59 +02:00
|
|
|
for _, upstream := range u.upstreamServers {
|
2024-07-17 23:48:37 +02:00
|
|
|
if err := u.testNameserver(upstream, probeTimeout); err != nil {
|
2024-01-23 17:23:12 +01:00
|
|
|
log.Tracef("upstream check for %s: %s", upstream, err)
|
|
|
|
} else {
|
|
|
|
// at least one upstream server is available, stop probing
|
2023-05-26 17:13:59 +02:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-01-23 17:23:12 +01:00
|
|
|
log.Tracef("checking connectivity with upstreams %s failed. Retrying in %s", u.upstreamServers, exponentialBackOff.NextBackOff())
|
2024-01-30 09:58:56 +01:00
|
|
|
return fmt.Errorf("upstream check call error")
|
2023-05-26 17:13:59 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
err := backoff.Retry(operation, exponentialBackOff)
|
|
|
|
if err != nil {
|
|
|
|
log.Warn(err)
|
2022-11-03 18:39:37 +01:00
|
|
|
return
|
|
|
|
}
|
2023-05-26 17:13:59 +02:00
|
|
|
|
|
|
|
log.Infof("upstreams %s are responsive again. Adding them back to system", u.upstreamServers)
|
|
|
|
u.failsCount.Store(0)
|
2024-07-17 23:48:37 +02:00
|
|
|
u.successCount.Add(1)
|
2023-05-26 17:13:59 +02:00
|
|
|
u.reactivate()
|
|
|
|
u.disabled = false
|
2022-11-03 18:39:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// isTimeout returns true if the given error is a network timeout error.
|
|
|
|
//
|
|
|
|
// Copied from k8s.io/apimachinery/pkg/util/net.IsTimeout
|
|
|
|
func isTimeout(err error) bool {
|
|
|
|
var neterr net.Error
|
|
|
|
if errors.As(err, &neterr) {
|
|
|
|
return neterr != nil && neterr.Timeout()
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
2024-01-23 17:23:12 +01:00
|
|
|
|
2024-03-12 19:06:16 +01:00
|
|
|
func (u *upstreamResolverBase) disable(err error) {
|
2024-01-23 17:23:12 +01:00
|
|
|
if u.disabled {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2024-05-17 12:59:46 +02:00
|
|
|
log.Warnf("Upstream resolving is Disabled for %v", reactivatePeriod)
|
2024-07-17 23:48:37 +02:00
|
|
|
u.successCount.Store(0)
|
2024-05-17 12:59:46 +02:00
|
|
|
u.deactivate(err)
|
|
|
|
u.disabled = true
|
|
|
|
go u.waitUntilResponse()
|
2024-01-23 17:23:12 +01:00
|
|
|
}
|
|
|
|
|
2024-07-17 23:48:37 +02:00
|
|
|
func (u *upstreamResolverBase) testNameserver(server string, timeout time.Duration) error {
|
|
|
|
ctx, cancel := context.WithTimeout(u.ctx, timeout)
|
2024-01-23 17:23:12 +01:00
|
|
|
defer cancel()
|
|
|
|
|
|
|
|
r := new(dns.Msg).SetQuestion(testRecord, dns.TypeSOA)
|
|
|
|
|
2024-01-29 17:10:47 +01:00
|
|
|
_, _, err := u.upstreamClient.exchange(ctx, server, r)
|
2024-01-23 17:23:12 +01:00
|
|
|
return err
|
|
|
|
}
|