mirror of
https://github.com/slackhq/nebula.git
synced 2025-11-09 21:33:58 +01:00
Compare commits
10 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
105e0ec66c | ||
|
|
4870bb680d | ||
|
|
a1498ca8f8 | ||
|
|
9877648da9 | ||
|
|
8e0a7bcbb7 | ||
|
|
8c29b15c6d | ||
|
|
04d7a8ccba | ||
|
|
b55b9019a7 | ||
|
|
2e85d138cd | ||
|
|
9bfdfbafc1 |
27
CHANGELOG.md
27
CHANGELOG.md
@ -7,6 +7,29 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|||||||
|
|
||||||
## [Unreleased]
|
## [Unreleased]
|
||||||
|
|
||||||
|
## [1.9.6] - 2025-7-15
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Support dropping inactive tunnels. This is disabled by default in this release but can be enabled with `tunnels.drop_inactive`. See example config for more details. (#1413)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix Darwin freeze due to presence of some Network Extensions (#1426)
|
||||||
|
- Ensure the same relay tunnel is always used when multiple relay tunnels are present (#1422)
|
||||||
|
- Fix Windows freeze due to ICMP error handling (#1412)
|
||||||
|
- Fix relay migration panic (#1403)
|
||||||
|
|
||||||
|
## [1.9.5] - 2024-12-05
|
||||||
|
|
||||||
|
### Added
|
||||||
|
|
||||||
|
- Gracefully ignore v2 certificates. (#1282)
|
||||||
|
|
||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix relays that refuse to re-establish after one of the remote tunnel pairs breaks. (#1277)
|
||||||
|
|
||||||
## [1.9.4] - 2024-09-09
|
## [1.9.4] - 2024-09-09
|
||||||
|
|
||||||
### Added
|
### Added
|
||||||
@ -664,7 +687,9 @@ created.)
|
|||||||
|
|
||||||
- Initial public release.
|
- Initial public release.
|
||||||
|
|
||||||
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.9.4...HEAD
|
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.9.6...HEAD
|
||||||
|
[1.9.6]: https://github.com/slackhq/nebula/releases/tag/v1.9.6
|
||||||
|
[1.9.5]: https://github.com/slackhq/nebula/releases/tag/v1.9.5
|
||||||
[1.9.4]: https://github.com/slackhq/nebula/releases/tag/v1.9.4
|
[1.9.4]: https://github.com/slackhq/nebula/releases/tag/v1.9.4
|
||||||
[1.9.3]: https://github.com/slackhq/nebula/releases/tag/v1.9.3
|
[1.9.3]: https://github.com/slackhq/nebula/releases/tag/v1.9.3
|
||||||
[1.9.2]: https://github.com/slackhq/nebula/releases/tag/v1.9.2
|
[1.9.2]: https://github.com/slackhq/nebula/releases/tag/v1.9.2
|
||||||
|
|||||||
28
cert/ca.go
28
cert/ca.go
@ -24,31 +24,39 @@ func NewCAPool() *NebulaCAPool {
|
|||||||
|
|
||||||
// NewCAPoolFromBytes will create a new CA pool from the provided
|
// NewCAPoolFromBytes will create a new CA pool from the provided
|
||||||
// input bytes, which must be a PEM-encoded set of nebula certificates.
|
// input bytes, which must be a PEM-encoded set of nebula certificates.
|
||||||
|
// If the pool contains unsupported certificates, they will generate warnings
|
||||||
|
// in the []error return arg.
|
||||||
// If the pool contains any expired certificates, an ErrExpired will be
|
// If the pool contains any expired certificates, an ErrExpired will be
|
||||||
// returned along with the pool. The caller must handle any such errors.
|
// returned along with the pool. The caller must handle any such errors.
|
||||||
func NewCAPoolFromBytes(caPEMs []byte) (*NebulaCAPool, error) {
|
func NewCAPoolFromBytes(caPEMs []byte) (*NebulaCAPool, []error, error) {
|
||||||
pool := NewCAPool()
|
pool := NewCAPool()
|
||||||
var err error
|
var err error
|
||||||
var expired bool
|
var warnings []error
|
||||||
|
good := 0
|
||||||
|
|
||||||
for {
|
for {
|
||||||
caPEMs, err = pool.AddCACertificate(caPEMs)
|
caPEMs, err = pool.AddCACertificate(caPEMs)
|
||||||
if errors.Is(err, ErrExpired) {
|
if errors.Is(err, ErrExpired) {
|
||||||
expired = true
|
warnings = append(warnings, err)
|
||||||
err = nil
|
} else if errors.Is(err, ErrInvalidPEMCertificateUnsupported) {
|
||||||
}
|
warnings = append(warnings, err)
|
||||||
if err != nil {
|
} else if err != nil {
|
||||||
return nil, err
|
return nil, warnings, err
|
||||||
|
} else {
|
||||||
|
// Only consider a good certificate if there were no errors present
|
||||||
|
good++
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(caPEMs) == 0 || strings.TrimSpace(string(caPEMs)) == "" {
|
if len(caPEMs) == 0 || strings.TrimSpace(string(caPEMs)) == "" {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if expired {
|
if good == 0 {
|
||||||
return pool, ErrExpired
|
return nil, warnings, errors.New("no valid CA certificates present")
|
||||||
}
|
}
|
||||||
|
|
||||||
return pool, nil
|
return pool, warnings, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddCACertificate verifies a Nebula CA certificate and adds it to the pool
|
// AddCACertificate verifies a Nebula CA certificate and adds it to the pool
|
||||||
|
|||||||
@ -28,6 +28,7 @@ const publicKeyLen = 32
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
CertBanner = "NEBULA CERTIFICATE"
|
CertBanner = "NEBULA CERTIFICATE"
|
||||||
|
CertificateV2Banner = "NEBULA CERTIFICATE V2"
|
||||||
X25519PrivateKeyBanner = "NEBULA X25519 PRIVATE KEY"
|
X25519PrivateKeyBanner = "NEBULA X25519 PRIVATE KEY"
|
||||||
X25519PublicKeyBanner = "NEBULA X25519 PUBLIC KEY"
|
X25519PublicKeyBanner = "NEBULA X25519 PUBLIC KEY"
|
||||||
EncryptedEd25519PrivateKeyBanner = "NEBULA ED25519 ENCRYPTED PRIVATE KEY"
|
EncryptedEd25519PrivateKeyBanner = "NEBULA ED25519 ENCRYPTED PRIVATE KEY"
|
||||||
@ -163,6 +164,9 @@ func UnmarshalNebulaCertificateFromPEM(b []byte) (*NebulaCertificate, []byte, er
|
|||||||
if p == nil {
|
if p == nil {
|
||||||
return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block")
|
return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block")
|
||||||
}
|
}
|
||||||
|
if p.Type == CertificateV2Banner {
|
||||||
|
return nil, r, fmt.Errorf("%w: %s", ErrInvalidPEMCertificateUnsupported, p.Type)
|
||||||
|
}
|
||||||
if p.Type != CertBanner {
|
if p.Type != CertBanner {
|
||||||
return nil, r, fmt.Errorf("bytes did not contain a proper nebula certificate banner")
|
return nil, r, fmt.Errorf("bytes did not contain a proper nebula certificate banner")
|
||||||
}
|
}
|
||||||
|
|||||||
@ -5,6 +5,7 @@ import (
|
|||||||
"crypto/ecdsa"
|
"crypto/ecdsa"
|
||||||
"crypto/elliptic"
|
"crypto/elliptic"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"net"
|
"net"
|
||||||
@ -572,6 +573,13 @@ CmYKEG5lYnVsYSBQMjU2IHRlc3Qo4s+7mgYw4tXrsAc6QQRkaW2jFmllYvN4+/k2
|
|||||||
76gvQAGgBgESRzBFAiEAib0/te6eMiZOKD8gdDeloMTS0wGuX2t0C7TFdUhAQzgC
|
76gvQAGgBgESRzBFAiEAib0/te6eMiZOKD8gdDeloMTS0wGuX2t0C7TFdUhAQzgC
|
||||||
IBNWYMep3ysx9zCgknfG5dKtwGTaqF++BWKDYdyl34KX
|
IBNWYMep3ysx9zCgknfG5dKtwGTaqF++BWKDYdyl34KX
|
||||||
-----END NEBULA CERTIFICATE-----
|
-----END NEBULA CERTIFICATE-----
|
||||||
|
`
|
||||||
|
|
||||||
|
v2 := `
|
||||||
|
# valid PEM with the V2 header
|
||||||
|
-----BEGIN NEBULA CERTIFICATE V2-----
|
||||||
|
CmYKEG5lYnVsYSBQMjU2IHRlc3Qo4s+7mgYw4tXrsAc6QQRkaW2jFmllYvN4+/k2
|
||||||
|
-----END NEBULA CERTIFICATE V2-----
|
||||||
`
|
`
|
||||||
|
|
||||||
rootCA := NebulaCertificate{
|
rootCA := NebulaCertificate{
|
||||||
@ -592,33 +600,46 @@ IBNWYMep3ysx9zCgknfG5dKtwGTaqF++BWKDYdyl34KX
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
p, err := NewCAPoolFromBytes([]byte(noNewLines))
|
p, warn, err := NewCAPoolFromBytes([]byte(noNewLines))
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
assert.Nil(t, warn)
|
||||||
assert.Equal(t, p.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
|
assert.Equal(t, p.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
|
||||||
assert.Equal(t, p.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
assert.Equal(t, p.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
||||||
|
|
||||||
pp, err := NewCAPoolFromBytes([]byte(withNewLines))
|
pp, warn, err := NewCAPoolFromBytes([]byte(withNewLines))
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
assert.Nil(t, warn)
|
||||||
assert.Equal(t, pp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
|
assert.Equal(t, pp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
|
||||||
assert.Equal(t, pp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
assert.Equal(t, pp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
||||||
|
|
||||||
// expired cert, no valid certs
|
// expired cert, no valid certs
|
||||||
ppp, err := NewCAPoolFromBytes([]byte(expired))
|
ppp, warn, err := NewCAPoolFromBytes([]byte(expired))
|
||||||
assert.Equal(t, ErrExpired, err)
|
assert.Error(t, err, "no valid CA certificates present")
|
||||||
assert.Equal(t, ppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired")
|
assert.Len(t, warn, 1)
|
||||||
|
assert.Error(t, warn[0], ErrExpired)
|
||||||
|
assert.Nil(t, ppp)
|
||||||
|
|
||||||
// expired cert, with valid certs
|
// expired cert, with valid certs
|
||||||
pppp, err := NewCAPoolFromBytes(append([]byte(expired), noNewLines...))
|
pppp, warn, err := NewCAPoolFromBytes(append([]byte(expired), noNewLines...))
|
||||||
assert.Equal(t, ErrExpired, err)
|
assert.Len(t, warn, 1)
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.Error(t, warn[0], ErrExpired)
|
||||||
assert.Equal(t, pppp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
|
assert.Equal(t, pppp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
|
||||||
assert.Equal(t, pppp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
assert.Equal(t, pppp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
||||||
assert.Equal(t, pppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired")
|
assert.Equal(t, pppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired")
|
||||||
assert.Equal(t, len(pppp.CAs), 3)
|
assert.Equal(t, len(pppp.CAs), 3)
|
||||||
|
|
||||||
ppppp, err := NewCAPoolFromBytes([]byte(p256))
|
ppppp, warn, err := NewCAPoolFromBytes([]byte(p256))
|
||||||
assert.Nil(t, err)
|
assert.Nil(t, err)
|
||||||
|
assert.Nil(t, warn)
|
||||||
assert.Equal(t, ppppp.CAs[string("a7938893ec8c4ef769b06d7f425e5e46f7a7f5ffa49c3bcf4a86b608caba9159")].Details.Name, rootCAP256.Details.Name)
|
assert.Equal(t, ppppp.CAs[string("a7938893ec8c4ef769b06d7f425e5e46f7a7f5ffa49c3bcf4a86b608caba9159")].Details.Name, rootCAP256.Details.Name)
|
||||||
assert.Equal(t, len(ppppp.CAs), 1)
|
assert.Equal(t, len(ppppp.CAs), 1)
|
||||||
|
|
||||||
|
pppppp, warn, err := NewCAPoolFromBytes(append([]byte(p256), []byte(v2)...))
|
||||||
|
assert.Nil(t, err)
|
||||||
|
assert.True(t, errors.Is(warn[0], ErrInvalidPEMCertificateUnsupported))
|
||||||
|
assert.Equal(t, pppppp.CAs[string("a7938893ec8c4ef769b06d7f425e5e46f7a7f5ffa49c3bcf4a86b608caba9159")].Details.Name, rootCAP256.Details.Name)
|
||||||
|
assert.Equal(t, len(pppppp.CAs), 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func appendByteSlices(b ...[]byte) []byte {
|
func appendByteSlices(b ...[]byte) []byte {
|
||||||
|
|||||||
@ -5,10 +5,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ErrRootExpired = errors.New("root certificate is expired")
|
ErrRootExpired = errors.New("root certificate is expired")
|
||||||
ErrExpired = errors.New("certificate is expired")
|
ErrExpired = errors.New("certificate is expired")
|
||||||
ErrNotCA = errors.New("certificate is not a CA")
|
ErrNotCA = errors.New("certificate is not a CA")
|
||||||
ErrNotSelfSigned = errors.New("certificate is not self-signed")
|
ErrNotSelfSigned = errors.New("certificate is not self-signed")
|
||||||
ErrBlockListed = errors.New("certificate is in the block list")
|
ErrBlockListed = errors.New("certificate is in the block list")
|
||||||
ErrSignatureMismatch = errors.New("certificate signature did not match")
|
ErrSignatureMismatch = errors.New("certificate signature did not match")
|
||||||
|
ErrInvalidPEMCertificateUnsupported = errors.New("bytes contain an unsupported certificate format")
|
||||||
)
|
)
|
||||||
|
|||||||
@ -4,13 +4,16 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rcrowley/go-metrics"
|
"github.com/rcrowley/go-metrics"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula/cert"
|
"github.com/slackhq/nebula/cert"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/header"
|
"github.com/slackhq/nebula/header"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -27,130 +30,124 @@ const (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type connectionManager struct {
|
type connectionManager struct {
|
||||||
in map[uint32]struct{}
|
|
||||||
inLock *sync.RWMutex
|
|
||||||
|
|
||||||
out map[uint32]struct{}
|
|
||||||
outLock *sync.RWMutex
|
|
||||||
|
|
||||||
// relayUsed holds which relay localIndexs are in use
|
// relayUsed holds which relay localIndexs are in use
|
||||||
relayUsed map[uint32]struct{}
|
relayUsed map[uint32]struct{}
|
||||||
relayUsedLock *sync.RWMutex
|
relayUsedLock *sync.RWMutex
|
||||||
|
|
||||||
hostMap *HostMap
|
hostMap *HostMap
|
||||||
trafficTimer *LockingTimerWheel[uint32]
|
trafficTimer *LockingTimerWheel[uint32]
|
||||||
intf *Interface
|
intf *Interface
|
||||||
pendingDeletion map[uint32]struct{}
|
punchy *Punchy
|
||||||
punchy *Punchy
|
|
||||||
|
// Configuration settings
|
||||||
checkInterval time.Duration
|
checkInterval time.Duration
|
||||||
pendingDeletionInterval time.Duration
|
pendingDeletionInterval time.Duration
|
||||||
metricsTxPunchy metrics.Counter
|
inactivityTimeout atomic.Int64
|
||||||
|
dropInactive atomic.Bool
|
||||||
|
|
||||||
|
metricsTxPunchy metrics.Counter
|
||||||
|
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval time.Duration, punchy *Punchy) *connectionManager {
|
func newConnectionManagerFromConfig(l *logrus.Logger, c *config.C, hm *HostMap, p *Punchy) *connectionManager {
|
||||||
var max time.Duration
|
cm := &connectionManager{
|
||||||
if checkInterval < pendingDeletionInterval {
|
hostMap: hm,
|
||||||
max = pendingDeletionInterval
|
l: l,
|
||||||
} else {
|
punchy: p,
|
||||||
max = checkInterval
|
relayUsed: make(map[uint32]struct{}),
|
||||||
|
relayUsedLock: &sync.RWMutex{},
|
||||||
|
metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
|
||||||
}
|
}
|
||||||
|
|
||||||
nc := &connectionManager{
|
cm.reload(c, true)
|
||||||
hostMap: intf.hostMap,
|
c.RegisterReloadCallback(func(c *config.C) {
|
||||||
in: make(map[uint32]struct{}),
|
cm.reload(c, false)
|
||||||
inLock: &sync.RWMutex{},
|
})
|
||||||
out: make(map[uint32]struct{}),
|
|
||||||
outLock: &sync.RWMutex{},
|
|
||||||
relayUsed: make(map[uint32]struct{}),
|
|
||||||
relayUsedLock: &sync.RWMutex{},
|
|
||||||
trafficTimer: NewLockingTimerWheel[uint32](time.Millisecond*500, max),
|
|
||||||
intf: intf,
|
|
||||||
pendingDeletion: make(map[uint32]struct{}),
|
|
||||||
checkInterval: checkInterval,
|
|
||||||
pendingDeletionInterval: pendingDeletionInterval,
|
|
||||||
punchy: punchy,
|
|
||||||
metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
|
|
||||||
l: l,
|
|
||||||
}
|
|
||||||
|
|
||||||
nc.Start(ctx)
|
return cm
|
||||||
return nc
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) In(localIndex uint32) {
|
func (cm *connectionManager) reload(c *config.C, initial bool) {
|
||||||
n.inLock.RLock()
|
if initial {
|
||||||
// If this already exists, return
|
cm.checkInterval = time.Duration(c.GetInt("timers.connection_alive_interval", 5)) * time.Second
|
||||||
if _, ok := n.in[localIndex]; ok {
|
cm.pendingDeletionInterval = time.Duration(c.GetInt("timers.pending_deletion_interval", 10)) * time.Second
|
||||||
n.inLock.RUnlock()
|
|
||||||
return
|
// We want at least a minimum resolution of 500ms per tick so that we can hit these intervals
|
||||||
|
// pretty close to their configured duration.
|
||||||
|
// The inactivity duration is checked each time a hostinfo ticks through so we don't need the wheel to contain it.
|
||||||
|
minDuration := min(time.Millisecond*500, cm.checkInterval, cm.pendingDeletionInterval)
|
||||||
|
maxDuration := max(cm.checkInterval, cm.pendingDeletionInterval)
|
||||||
|
cm.trafficTimer = NewLockingTimerWheel[uint32](minDuration, maxDuration)
|
||||||
|
}
|
||||||
|
|
||||||
|
if initial || c.HasChanged("tunnels.inactivity_timeout") {
|
||||||
|
old := cm.getInactivityTimeout()
|
||||||
|
cm.inactivityTimeout.Store((int64)(c.GetDuration("tunnels.inactivity_timeout", 10*time.Minute)))
|
||||||
|
if !initial {
|
||||||
|
cm.l.WithField("oldDuration", old).
|
||||||
|
WithField("newDuration", cm.getInactivityTimeout()).
|
||||||
|
Info("Inactivity timeout has changed")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if initial || c.HasChanged("tunnels.drop_inactive") {
|
||||||
|
old := cm.dropInactive.Load()
|
||||||
|
cm.dropInactive.Store(c.GetBool("tunnels.drop_inactive", false))
|
||||||
|
if !initial {
|
||||||
|
cm.l.WithField("oldBool", old).
|
||||||
|
WithField("newBool", cm.dropInactive.Load()).
|
||||||
|
Info("Drop inactive setting has changed")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
n.inLock.RUnlock()
|
|
||||||
n.inLock.Lock()
|
|
||||||
n.in[localIndex] = struct{}{}
|
|
||||||
n.inLock.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) Out(localIndex uint32) {
|
func (cm *connectionManager) getInactivityTimeout() time.Duration {
|
||||||
n.outLock.RLock()
|
return (time.Duration)(cm.inactivityTimeout.Load())
|
||||||
// If this already exists, return
|
|
||||||
if _, ok := n.out[localIndex]; ok {
|
|
||||||
n.outLock.RUnlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
n.outLock.RUnlock()
|
|
||||||
n.outLock.Lock()
|
|
||||||
n.out[localIndex] = struct{}{}
|
|
||||||
n.outLock.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) RelayUsed(localIndex uint32) {
|
func (cm *connectionManager) In(h *HostInfo) {
|
||||||
n.relayUsedLock.RLock()
|
h.in.Store(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cm *connectionManager) Out(h *HostInfo) {
|
||||||
|
h.out.Store(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cm *connectionManager) RelayUsed(localIndex uint32) {
|
||||||
|
cm.relayUsedLock.RLock()
|
||||||
// If this already exists, return
|
// If this already exists, return
|
||||||
if _, ok := n.relayUsed[localIndex]; ok {
|
if _, ok := cm.relayUsed[localIndex]; ok {
|
||||||
n.relayUsedLock.RUnlock()
|
cm.relayUsedLock.RUnlock()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
n.relayUsedLock.RUnlock()
|
cm.relayUsedLock.RUnlock()
|
||||||
n.relayUsedLock.Lock()
|
cm.relayUsedLock.Lock()
|
||||||
n.relayUsed[localIndex] = struct{}{}
|
cm.relayUsed[localIndex] = struct{}{}
|
||||||
n.relayUsedLock.Unlock()
|
cm.relayUsedLock.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// getAndResetTrafficCheck returns if there was any inbound or outbound traffic within the last tick and
|
// getAndResetTrafficCheck returns if there was any inbound or outbound traffic within the last tick and
|
||||||
// resets the state for this local index
|
// resets the state for this local index
|
||||||
func (n *connectionManager) getAndResetTrafficCheck(localIndex uint32) (bool, bool) {
|
func (cm *connectionManager) getAndResetTrafficCheck(h *HostInfo, now time.Time) (bool, bool) {
|
||||||
n.inLock.Lock()
|
in := h.in.Swap(false)
|
||||||
n.outLock.Lock()
|
out := h.out.Swap(false)
|
||||||
_, in := n.in[localIndex]
|
if in || out {
|
||||||
_, out := n.out[localIndex]
|
h.lastUsed = now
|
||||||
delete(n.in, localIndex)
|
}
|
||||||
delete(n.out, localIndex)
|
|
||||||
n.inLock.Unlock()
|
|
||||||
n.outLock.Unlock()
|
|
||||||
return in, out
|
return in, out
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) AddTrafficWatch(localIndex uint32) {
|
// AddTrafficWatch must be called for every new HostInfo.
|
||||||
// Use a write lock directly because it should be incredibly rare that we are ever already tracking this index
|
// We will continue to monitor the HostInfo until the tunnel is dropped.
|
||||||
n.outLock.Lock()
|
func (cm *connectionManager) AddTrafficWatch(h *HostInfo) {
|
||||||
if _, ok := n.out[localIndex]; ok {
|
if h.out.Swap(true) == false {
|
||||||
n.outLock.Unlock()
|
cm.trafficTimer.Add(h.localIndexId, cm.checkInterval)
|
||||||
return
|
|
||||||
}
|
}
|
||||||
n.out[localIndex] = struct{}{}
|
|
||||||
n.trafficTimer.Add(localIndex, n.checkInterval)
|
|
||||||
n.outLock.Unlock()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) Start(ctx context.Context) {
|
func (cm *connectionManager) Start(ctx context.Context) {
|
||||||
go n.Run(ctx)
|
clockSource := time.NewTicker(cm.trafficTimer.t.tickDuration)
|
||||||
}
|
|
||||||
|
|
||||||
func (n *connectionManager) Run(ctx context.Context) {
|
|
||||||
//TODO: this tick should be based on the min wheel tick? Check firewall
|
|
||||||
clockSource := time.NewTicker(500 * time.Millisecond)
|
|
||||||
defer clockSource.Stop()
|
defer clockSource.Stop()
|
||||||
|
|
||||||
p := []byte("")
|
p := []byte("")
|
||||||
@ -163,61 +160,61 @@ func (n *connectionManager) Run(ctx context.Context) {
|
|||||||
return
|
return
|
||||||
|
|
||||||
case now := <-clockSource.C:
|
case now := <-clockSource.C:
|
||||||
n.trafficTimer.Advance(now)
|
cm.trafficTimer.Advance(now)
|
||||||
for {
|
for {
|
||||||
localIndex, has := n.trafficTimer.Purge()
|
localIndex, has := cm.trafficTimer.Purge()
|
||||||
if !has {
|
if !has {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
n.doTrafficCheck(localIndex, p, nb, out, now)
|
cm.doTrafficCheck(localIndex, p, nb, out, now)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
|
func (cm *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
|
||||||
decision, hostinfo, primary := n.makeTrafficDecision(localIndex, now)
|
decision, hostinfo, primary := cm.makeTrafficDecision(localIndex, now)
|
||||||
|
|
||||||
switch decision {
|
switch decision {
|
||||||
case deleteTunnel:
|
case deleteTunnel:
|
||||||
if n.hostMap.DeleteHostInfo(hostinfo) {
|
if cm.hostMap.DeleteHostInfo(hostinfo) {
|
||||||
// Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap
|
// Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap
|
||||||
n.intf.lightHouse.DeleteVpnIp(hostinfo.vpnIp)
|
cm.intf.lightHouse.DeleteVpnIp(hostinfo.vpnIp)
|
||||||
}
|
}
|
||||||
|
|
||||||
case closeTunnel:
|
case closeTunnel:
|
||||||
n.intf.sendCloseTunnel(hostinfo)
|
cm.intf.sendCloseTunnel(hostinfo)
|
||||||
n.intf.closeTunnel(hostinfo)
|
cm.intf.closeTunnel(hostinfo)
|
||||||
|
|
||||||
case swapPrimary:
|
case swapPrimary:
|
||||||
n.swapPrimary(hostinfo, primary)
|
cm.swapPrimary(hostinfo, primary)
|
||||||
|
|
||||||
case migrateRelays:
|
case migrateRelays:
|
||||||
n.migrateRelayUsed(hostinfo, primary)
|
cm.migrateRelayUsed(hostinfo, primary)
|
||||||
|
|
||||||
case tryRehandshake:
|
case tryRehandshake:
|
||||||
n.tryRehandshake(hostinfo)
|
cm.tryRehandshake(hostinfo)
|
||||||
|
|
||||||
case sendTestPacket:
|
case sendTestPacket:
|
||||||
n.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
|
cm.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
|
||||||
}
|
}
|
||||||
|
|
||||||
n.resetRelayTrafficCheck(hostinfo)
|
cm.resetRelayTrafficCheck(hostinfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) {
|
func (cm *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) {
|
||||||
if hostinfo != nil {
|
if hostinfo != nil {
|
||||||
n.relayUsedLock.Lock()
|
cm.relayUsedLock.Lock()
|
||||||
defer n.relayUsedLock.Unlock()
|
defer cm.relayUsedLock.Unlock()
|
||||||
// No need to migrate any relays, delete usage info now.
|
// No need to migrate any relays, delete usage info now.
|
||||||
for _, idx := range hostinfo.relayState.CopyRelayForIdxs() {
|
for _, idx := range hostinfo.relayState.CopyRelayForIdxs() {
|
||||||
delete(n.relayUsed, idx)
|
delete(cm.relayUsed, idx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) {
|
func (cm *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) {
|
||||||
relayFor := oldhostinfo.relayState.CopyAllRelayFor()
|
relayFor := oldhostinfo.relayState.CopyAllRelayFor()
|
||||||
|
|
||||||
for _, r := range relayFor {
|
for _, r := range relayFor {
|
||||||
@ -227,46 +224,51 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
|
|||||||
var relayFrom netip.Addr
|
var relayFrom netip.Addr
|
||||||
var relayTo netip.Addr
|
var relayTo netip.Addr
|
||||||
switch {
|
switch {
|
||||||
case ok && existing.State == Established:
|
case ok:
|
||||||
// This relay already exists in newhostinfo, then do nothing.
|
switch existing.State {
|
||||||
continue
|
case Established, PeerRequested, Disestablished:
|
||||||
case ok && existing.State == Requested:
|
// This relay already exists in newhostinfo, then do nothing.
|
||||||
// The relay exists in a Requested state; re-send the request
|
continue
|
||||||
index = existing.LocalIndex
|
case Requested:
|
||||||
switch r.Type {
|
// The relayed connection exists in a Requested state; re-send the request
|
||||||
case TerminalType:
|
index = existing.LocalIndex
|
||||||
relayFrom = n.intf.myVpnNet.Addr()
|
switch r.Type {
|
||||||
relayTo = existing.PeerIp
|
case TerminalType:
|
||||||
case ForwardingType:
|
relayFrom = cm.intf.myVpnNet.Addr()
|
||||||
relayFrom = existing.PeerIp
|
relayTo = existing.PeerIp
|
||||||
relayTo = newhostinfo.vpnIp
|
case ForwardingType:
|
||||||
default:
|
relayFrom = existing.PeerIp
|
||||||
// should never happen
|
relayTo = newhostinfo.vpnIp
|
||||||
|
default:
|
||||||
|
// should never happen
|
||||||
|
panic(fmt.Sprintf("Migrating unknown relay type: %v", r.Type))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
case !ok:
|
case !ok:
|
||||||
n.relayUsedLock.RLock()
|
cm.relayUsedLock.RLock()
|
||||||
if _, relayUsed := n.relayUsed[r.LocalIndex]; !relayUsed {
|
if _, relayUsed := cm.relayUsed[r.LocalIndex]; !relayUsed {
|
||||||
// The relay hasn't been used; don't migrate it.
|
// The relay hasn't been used; don't migrate it.
|
||||||
n.relayUsedLock.RUnlock()
|
cm.relayUsedLock.RUnlock()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
n.relayUsedLock.RUnlock()
|
cm.relayUsedLock.RUnlock()
|
||||||
// The relay doesn't exist at all; create some relay state and send the request.
|
// The relay doesn't exist at all; create some relay state and send the request.
|
||||||
var err error
|
var err error
|
||||||
index, err = AddRelay(n.l, newhostinfo, n.hostMap, r.PeerIp, nil, r.Type, Requested)
|
index, err = AddRelay(cm.l, newhostinfo, cm.hostMap, r.PeerIp, nil, r.Type, Requested)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
n.l.WithError(err).Error("failed to migrate relay to new hostinfo")
|
cm.l.WithError(err).Error("failed to migrate relay to new hostinfo")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
switch r.Type {
|
switch r.Type {
|
||||||
case TerminalType:
|
case TerminalType:
|
||||||
relayFrom = n.intf.myVpnNet.Addr()
|
relayFrom = cm.intf.myVpnNet.Addr()
|
||||||
relayTo = r.PeerIp
|
relayTo = r.PeerIp
|
||||||
case ForwardingType:
|
case ForwardingType:
|
||||||
relayFrom = r.PeerIp
|
relayFrom = r.PeerIp
|
||||||
relayTo = newhostinfo.vpnIp
|
relayTo = newhostinfo.vpnIp
|
||||||
default:
|
default:
|
||||||
// should never happen
|
// should never happen
|
||||||
|
panic(fmt.Sprintf("Migrating unknown relay type: %v", r.Type))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -283,10 +285,10 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
|
|||||||
}
|
}
|
||||||
msg, err := req.Marshal()
|
msg, err := req.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
n.l.WithError(err).Error("failed to marshal Control message to migrate relay")
|
cm.l.WithError(err).Error("failed to marshal Control message to migrate relay")
|
||||||
} else {
|
} else {
|
||||||
n.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
|
cm.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||||
n.l.WithFields(logrus.Fields{
|
cm.l.WithFields(logrus.Fields{
|
||||||
"relayFrom": req.RelayFromIp,
|
"relayFrom": req.RelayFromIp,
|
||||||
"relayTo": req.RelayToIp,
|
"relayTo": req.RelayToIp,
|
||||||
"initiatorRelayIndex": req.InitiatorRelayIndex,
|
"initiatorRelayIndex": req.InitiatorRelayIndex,
|
||||||
@ -297,46 +299,45 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
|
func (cm *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
|
||||||
n.hostMap.RLock()
|
// Read lock the main hostmap to order decisions based on tunnels being the primary tunnel
|
||||||
defer n.hostMap.RUnlock()
|
cm.hostMap.RLock()
|
||||||
|
defer cm.hostMap.RUnlock()
|
||||||
|
|
||||||
hostinfo := n.hostMap.Indexes[localIndex]
|
hostinfo := cm.hostMap.Indexes[localIndex]
|
||||||
if hostinfo == nil {
|
if hostinfo == nil {
|
||||||
n.l.WithField("localIndex", localIndex).Debugf("Not found in hostmap")
|
cm.l.WithField("localIndex", localIndex).Debugln("Not found in hostmap")
|
||||||
delete(n.pendingDeletion, localIndex)
|
|
||||||
return doNothing, nil, nil
|
return doNothing, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if n.isInvalidCertificate(now, hostinfo) {
|
if cm.isInvalidCertificate(now, hostinfo) {
|
||||||
delete(n.pendingDeletion, hostinfo.localIndexId)
|
|
||||||
return closeTunnel, hostinfo, nil
|
return closeTunnel, hostinfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
primary := n.hostMap.Hosts[hostinfo.vpnIp]
|
primary := cm.hostMap.Hosts[hostinfo.vpnIp]
|
||||||
mainHostInfo := true
|
mainHostInfo := true
|
||||||
if primary != nil && primary != hostinfo {
|
if primary != nil && primary != hostinfo {
|
||||||
mainHostInfo = false
|
mainHostInfo = false
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check for traffic on this hostinfo
|
// Check for traffic on this hostinfo
|
||||||
inTraffic, outTraffic := n.getAndResetTrafficCheck(localIndex)
|
inTraffic, outTraffic := cm.getAndResetTrafficCheck(hostinfo, now)
|
||||||
|
|
||||||
// A hostinfo is determined alive if there is incoming traffic
|
// A hostinfo is determined alive if there is incoming traffic
|
||||||
if inTraffic {
|
if inTraffic {
|
||||||
decision := doNothing
|
decision := doNothing
|
||||||
if n.l.Level >= logrus.DebugLevel {
|
if cm.l.Level >= logrus.DebugLevel {
|
||||||
hostinfo.logger(n.l).
|
hostinfo.logger(cm.l).
|
||||||
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
|
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
|
||||||
Debug("Tunnel status")
|
Debug("Tunnel status")
|
||||||
}
|
}
|
||||||
delete(n.pendingDeletion, hostinfo.localIndexId)
|
hostinfo.pendingDeletion.Store(false)
|
||||||
|
|
||||||
if mainHostInfo {
|
if mainHostInfo {
|
||||||
decision = tryRehandshake
|
decision = tryRehandshake
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
if n.shouldSwapPrimary(hostinfo, primary) {
|
if cm.shouldSwapPrimary(hostinfo, primary) {
|
||||||
decision = swapPrimary
|
decision = swapPrimary
|
||||||
} else {
|
} else {
|
||||||
// migrate the relays to the primary, if in use.
|
// migrate the relays to the primary, if in use.
|
||||||
@ -344,46 +345,55 @@ func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
|
cm.trafficTimer.Add(hostinfo.localIndexId, cm.checkInterval)
|
||||||
|
|
||||||
if !outTraffic {
|
if !outTraffic {
|
||||||
// Send a punch packet to keep the NAT state alive
|
// Send a punch packet to keep the NAT state alive
|
||||||
n.sendPunch(hostinfo)
|
cm.sendPunch(hostinfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
return decision, hostinfo, primary
|
return decision, hostinfo, primary
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := n.pendingDeletion[hostinfo.localIndexId]; ok {
|
if hostinfo.pendingDeletion.Load() {
|
||||||
// We have already sent a test packet and nothing was returned, this hostinfo is dead
|
// We have already sent a test packet and nothing was returned, this hostinfo is dead
|
||||||
hostinfo.logger(n.l).
|
hostinfo.logger(cm.l).
|
||||||
WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
|
WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
|
||||||
Info("Tunnel status")
|
Info("Tunnel status")
|
||||||
|
|
||||||
delete(n.pendingDeletion, hostinfo.localIndexId)
|
|
||||||
return deleteTunnel, hostinfo, nil
|
return deleteTunnel, hostinfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
decision := doNothing
|
decision := doNothing
|
||||||
if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo {
|
if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo {
|
||||||
if !outTraffic {
|
if !outTraffic {
|
||||||
|
inactiveFor, isInactive := cm.isInactive(hostinfo, now)
|
||||||
|
if isInactive {
|
||||||
|
// Tunnel is inactive, tear it down
|
||||||
|
hostinfo.logger(cm.l).
|
||||||
|
WithField("inactiveDuration", inactiveFor).
|
||||||
|
WithField("primary", mainHostInfo).
|
||||||
|
Info("Dropping tunnel due to inactivity")
|
||||||
|
|
||||||
|
return closeTunnel, hostinfo, primary
|
||||||
|
}
|
||||||
|
|
||||||
// If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel.
|
// If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel.
|
||||||
// Just maintain NAT state if configured to do so.
|
// Just maintain NAT state if configured to do so.
|
||||||
n.sendPunch(hostinfo)
|
cm.sendPunch(hostinfo)
|
||||||
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
|
cm.trafficTimer.Add(hostinfo.localIndexId, cm.checkInterval)
|
||||||
return doNothing, nil, nil
|
return doNothing, nil, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if n.punchy.GetTargetEverything() {
|
if cm.punchy.GetTargetEverything() {
|
||||||
// This is similar to the old punchy behavior with a slight optimization.
|
// This is similar to the old punchy behavior with a slight optimization.
|
||||||
// We aren't receiving traffic but we are sending it, punch on all known
|
// We aren't receiving traffic but we are sending it, punch on all known
|
||||||
// ips in case we need to re-prime NAT state
|
// ips in case we need to re-prime NAT state
|
||||||
n.sendPunch(hostinfo)
|
cm.sendPunch(hostinfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
if n.l.Level >= logrus.DebugLevel {
|
if cm.l.Level >= logrus.DebugLevel {
|
||||||
hostinfo.logger(n.l).
|
hostinfo.logger(cm.l).
|
||||||
WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
|
WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
|
||||||
Debug("Tunnel status")
|
Debug("Tunnel status")
|
||||||
}
|
}
|
||||||
@ -392,95 +402,118 @@ func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time
|
|||||||
decision = sendTestPacket
|
decision = sendTestPacket
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
if n.l.Level >= logrus.DebugLevel {
|
if cm.l.Level >= logrus.DebugLevel {
|
||||||
hostinfo.logger(n.l).Debugf("Hostinfo sadness")
|
hostinfo.logger(cm.l).Debugf("Hostinfo sadness")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
n.pendingDeletion[hostinfo.localIndexId] = struct{}{}
|
hostinfo.pendingDeletion.Store(true)
|
||||||
n.trafficTimer.Add(hostinfo.localIndexId, n.pendingDeletionInterval)
|
cm.trafficTimer.Add(hostinfo.localIndexId, cm.pendingDeletionInterval)
|
||||||
return decision, hostinfo, nil
|
return decision, hostinfo, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
|
func (cm *connectionManager) isInactive(hostinfo *HostInfo, now time.Time) (time.Duration, bool) {
|
||||||
|
if cm.dropInactive.Load() == false {
|
||||||
|
// We aren't configured to drop inactive tunnels
|
||||||
|
return 0, false
|
||||||
|
}
|
||||||
|
|
||||||
|
inactiveDuration := now.Sub(hostinfo.lastUsed)
|
||||||
|
if inactiveDuration < cm.getInactivityTimeout() {
|
||||||
|
// It's not considered inactive
|
||||||
|
return inactiveDuration, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// The tunnel is inactive
|
||||||
|
return inactiveDuration, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cm *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
|
||||||
// The primary tunnel is the most recent handshake to complete locally and should work entirely fine.
|
// The primary tunnel is the most recent handshake to complete locally and should work entirely fine.
|
||||||
// If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary.
|
// If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary.
|
||||||
// Let's sort this out.
|
// Let's sort this out.
|
||||||
|
|
||||||
if current.vpnIp.Compare(n.intf.myVpnNet.Addr()) < 0 {
|
if current.vpnIp.Compare(cm.intf.myVpnNet.Addr()) < 0 {
|
||||||
// Only one side should flip primary because if both flip then we may never resolve to a single tunnel.
|
// Only one side should flip primary because if both flip then we may never resolve to a single tunnel.
|
||||||
// vpn ip is static across all tunnels for this host pair so lets use that to determine who is flipping.
|
// vpn ip is static across all tunnels for this host pair so lets use that to determine who is flipping.
|
||||||
// The remotes vpn ip is lower than mine. I will not flip.
|
// The remotes vpn ip is lower than mine. I will not flip.
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
certState := n.intf.pki.GetCertState()
|
certState := cm.intf.pki.GetCertState()
|
||||||
return bytes.Equal(current.ConnectionState.myCert.Signature, certState.Certificate.Signature)
|
return bytes.Equal(current.ConnectionState.myCert.Signature, certState.Certificate.Signature)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) swapPrimary(current, primary *HostInfo) {
|
func (cm *connectionManager) swapPrimary(current, primary *HostInfo) {
|
||||||
n.hostMap.Lock()
|
cm.hostMap.Lock()
|
||||||
// Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake.
|
// Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake.
|
||||||
if n.hostMap.Hosts[current.vpnIp] == primary {
|
if cm.hostMap.Hosts[current.vpnIp] == primary {
|
||||||
n.hostMap.unlockedMakePrimary(current)
|
cm.hostMap.unlockedMakePrimary(current)
|
||||||
}
|
}
|
||||||
n.hostMap.Unlock()
|
cm.hostMap.Unlock()
|
||||||
}
|
}
|
||||||
|
|
||||||
// isInvalidCertificate will check if we should destroy a tunnel if pki.disconnect_invalid is true and
|
// isInvalidCertificate will check if we should destroy a tunnel if pki.disconnect_invalid is true and
|
||||||
// the certificate is no longer valid. Block listed certificates will skip the pki.disconnect_invalid
|
// the certificate is no longer valid. Block listed certificates will skip the pki.disconnect_invalid
|
||||||
// check and return true.
|
// check and return true.
|
||||||
func (n *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool {
|
func (cm *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool {
|
||||||
remoteCert := hostinfo.GetCert()
|
remoteCert := hostinfo.GetCert()
|
||||||
if remoteCert == nil {
|
if remoteCert == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
valid, err := remoteCert.VerifyWithCache(now, n.intf.pki.GetCAPool())
|
valid, err := remoteCert.VerifyWithCache(now, cm.intf.pki.GetCAPool())
|
||||||
if valid {
|
if valid {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if !n.intf.disconnectInvalid.Load() && err != cert.ErrBlockListed {
|
if !cm.intf.disconnectInvalid.Load() && err != cert.ErrBlockListed {
|
||||||
// Block listed certificates should always be disconnected
|
// Block listed certificates should always be disconnected
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
fingerprint, _ := remoteCert.Sha256Sum()
|
fingerprint, _ := remoteCert.Sha256Sum()
|
||||||
hostinfo.logger(n.l).WithError(err).
|
hostinfo.logger(cm.l).WithError(err).
|
||||||
WithField("fingerprint", fingerprint).
|
WithField("fingerprint", fingerprint).
|
||||||
Info("Remote certificate is no longer valid, tearing down the tunnel")
|
Info("Remote certificate is no longer valid, tearing down the tunnel")
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) sendPunch(hostinfo *HostInfo) {
|
func (cm *connectionManager) sendPunch(hostinfo *HostInfo) {
|
||||||
if !n.punchy.GetPunch() {
|
if !cm.punchy.GetPunch() {
|
||||||
// Punching is disabled
|
// Punching is disabled
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if n.punchy.GetTargetEverything() {
|
if cm.intf.lightHouse.IsLighthouseIP(hostinfo.vpnIp) {
|
||||||
hostinfo.remotes.ForEach(n.hostMap.GetPreferredRanges(), func(addr netip.AddrPort, preferred bool) {
|
// Do not punch to lighthouses, we assume our lighthouse update interval is good enough.
|
||||||
n.metricsTxPunchy.Inc(1)
|
// In the event the update interval is not sufficient to maintain NAT state then a publicly available lighthouse
|
||||||
n.intf.outside.WriteTo([]byte{1}, addr)
|
// would lose the ability to notify us and punchy.respond would become unreliable.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if cm.punchy.GetTargetEverything() {
|
||||||
|
hostinfo.remotes.ForEach(cm.hostMap.GetPreferredRanges(), func(addr netip.AddrPort, preferred bool) {
|
||||||
|
cm.metricsTxPunchy.Inc(1)
|
||||||
|
cm.intf.outside.WriteTo([]byte{1}, addr)
|
||||||
})
|
})
|
||||||
|
|
||||||
} else if hostinfo.remote.IsValid() {
|
} else if hostinfo.remote.IsValid() {
|
||||||
n.metricsTxPunchy.Inc(1)
|
cm.metricsTxPunchy.Inc(1)
|
||||||
n.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
|
cm.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) {
|
func (cm *connectionManager) tryRehandshake(hostinfo *HostInfo) {
|
||||||
certState := n.intf.pki.GetCertState()
|
certState := cm.intf.pki.GetCertState()
|
||||||
if bytes.Equal(hostinfo.ConnectionState.myCert.Signature, certState.Certificate.Signature) {
|
if bytes.Equal(hostinfo.ConnectionState.myCert.Signature, certState.Certificate.Signature) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
n.l.WithField("vpnIp", hostinfo.vpnIp).
|
cm.l.WithField("vpnIp", hostinfo.vpnIp).
|
||||||
WithField("reason", "local certificate is not current").
|
WithField("reason", "local certificate is not current").
|
||||||
Info("Re-handshaking with remote")
|
Info("Re-handshaking with remote")
|
||||||
|
|
||||||
n.intf.handshakeManager.StartHandshake(hostinfo.vpnIp, nil)
|
cm.intf.handshakeManager.StartHandshake(hostinfo.vpnIp, nil)
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,7 +1,6 @@
|
|||||||
package nebula
|
package nebula
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"net"
|
"net"
|
||||||
@ -65,10 +64,10 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
|||||||
ifce.pki.cs.Store(cs)
|
ifce.pki.cs.Store(cs)
|
||||||
|
|
||||||
// Create manager
|
// Create manager
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
conf := config.NewC(l)
|
||||||
defer cancel()
|
punchy := NewPunchyFromConfig(l, conf)
|
||||||
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
|
||||||
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
nc.intf = ifce
|
||||||
p := []byte("")
|
p := []byte("")
|
||||||
nb := make([]byte, 12, 12)
|
nb := make([]byte, 12, 12)
|
||||||
out := make([]byte, mtu)
|
out := make([]byte, mtu)
|
||||||
@ -86,31 +85,32 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
|||||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||||
|
|
||||||
// We saw traffic out to vpnIp
|
// We saw traffic out to vpnIp
|
||||||
nc.Out(hostinfo.localIndexId)
|
nc.Out(hostinfo)
|
||||||
nc.In(hostinfo.localIndexId)
|
nc.In(hostinfo)
|
||||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.out, hostinfo.localIndexId)
|
assert.True(t, hostinfo.out.Load())
|
||||||
|
assert.True(t, hostinfo.in.Load())
|
||||||
|
|
||||||
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
||||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
assert.False(t, hostinfo.out.Load())
|
||||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
assert.False(t, hostinfo.in.Load())
|
||||||
|
|
||||||
// Do another traffic check tick, this host should be pending deletion now
|
// Do another traffic check tick, this host should be pending deletion now
|
||||||
nc.Out(hostinfo.localIndexId)
|
nc.Out(hostinfo)
|
||||||
|
assert.True(t, hostinfo.out.Load())
|
||||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
assert.True(t, hostinfo.pendingDeletion.Load())
|
||||||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
assert.False(t, hostinfo.out.Load())
|
||||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
assert.False(t, hostinfo.in.Load())
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
|
|
||||||
// Do a final traffic check tick, the host should now be removed
|
// Do a final traffic check tick, the host should now be removed
|
||||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
|
||||||
assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
}
|
}
|
||||||
@ -148,10 +148,10 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
|||||||
ifce.pki.cs.Store(cs)
|
ifce.pki.cs.Store(cs)
|
||||||
|
|
||||||
// Create manager
|
// Create manager
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
conf := config.NewC(l)
|
||||||
defer cancel()
|
punchy := NewPunchyFromConfig(l, conf)
|
||||||
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
|
||||||
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
nc.intf = ifce
|
||||||
p := []byte("")
|
p := []byte("")
|
||||||
nb := make([]byte, 12, 12)
|
nb := make([]byte, 12, 12)
|
||||||
out := make([]byte, mtu)
|
out := make([]byte, mtu)
|
||||||
@ -169,33 +169,130 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
|||||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||||
|
|
||||||
// We saw traffic out to vpnIp
|
// We saw traffic out to vpnIp
|
||||||
nc.Out(hostinfo.localIndexId)
|
nc.Out(hostinfo)
|
||||||
nc.In(hostinfo.localIndexId)
|
nc.In(hostinfo)
|
||||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.vpnIp)
|
assert.True(t, hostinfo.in.Load())
|
||||||
|
assert.True(t, hostinfo.out.Load())
|
||||||
|
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
|
|
||||||
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
||||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
assert.False(t, hostinfo.out.Load())
|
||||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
assert.False(t, hostinfo.in.Load())
|
||||||
|
|
||||||
// Do another traffic check tick, this host should be pending deletion now
|
// Do another traffic check tick, this host should be pending deletion now
|
||||||
nc.Out(hostinfo.localIndexId)
|
nc.Out(hostinfo)
|
||||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
assert.True(t, hostinfo.pendingDeletion.Load())
|
||||||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
assert.False(t, hostinfo.out.Load())
|
||||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
assert.False(t, hostinfo.in.Load())
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
|
|
||||||
// We saw traffic, should no longer be pending deletion
|
// We saw traffic, should no longer be pending deletion
|
||||||
nc.In(hostinfo.localIndexId)
|
nc.In(hostinfo)
|
||||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
assert.False(t, hostinfo.out.Load())
|
||||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
assert.False(t, hostinfo.in.Load())
|
||||||
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test_NewConnectionManager_DisconnectInactive(t *testing.T) {
|
||||||
|
l := test.NewLogger()
|
||||||
|
vpncidr := netip.MustParsePrefix("172.1.1.1/24")
|
||||||
|
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
||||||
|
vpnIp := netip.MustParseAddr("172.1.1.2")
|
||||||
|
preferredRanges := []netip.Prefix{localrange}
|
||||||
|
|
||||||
|
// Very incomplete mock objects
|
||||||
|
hostMap := newHostMap(l, vpncidr)
|
||||||
|
hostMap.preferredRanges.Store(&preferredRanges)
|
||||||
|
|
||||||
|
cs := &CertState{
|
||||||
|
RawCertificate: []byte{},
|
||||||
|
PrivateKey: []byte{},
|
||||||
|
Certificate: &cert.NebulaCertificate{},
|
||||||
|
RawCertificateNoKey: []byte{},
|
||||||
|
}
|
||||||
|
|
||||||
|
lh := newTestLighthouse()
|
||||||
|
ifce := &Interface{
|
||||||
|
hostMap: hostMap,
|
||||||
|
inside: &test.NoopTun{},
|
||||||
|
outside: &udp.NoopConn{},
|
||||||
|
firewall: &Firewall{},
|
||||||
|
lightHouse: lh,
|
||||||
|
pki: &PKI{},
|
||||||
|
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
|
||||||
|
l: l,
|
||||||
|
}
|
||||||
|
ifce.pki.cs.Store(cs)
|
||||||
|
|
||||||
|
// Create manager
|
||||||
|
conf := config.NewC(l)
|
||||||
|
conf.Settings["tunnels"] = map[interface{}]interface{}{
|
||||||
|
"drop_inactive": true,
|
||||||
|
}
|
||||||
|
punchy := NewPunchyFromConfig(l, conf)
|
||||||
|
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
|
||||||
|
assert.True(t, nc.dropInactive.Load())
|
||||||
|
nc.intf = ifce
|
||||||
|
|
||||||
|
// Add an ip we have established a connection w/ to hostmap
|
||||||
|
hostinfo := &HostInfo{
|
||||||
|
vpnIp: vpnIp,
|
||||||
|
localIndexId: 1099,
|
||||||
|
remoteIndexId: 9901,
|
||||||
|
}
|
||||||
|
hostinfo.ConnectionState = &ConnectionState{
|
||||||
|
myCert: &cert.NebulaCertificate{},
|
||||||
|
H: &noise.HandshakeState{},
|
||||||
|
}
|
||||||
|
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||||
|
|
||||||
|
// Do a traffic check tick, in and out should be cleared but should not be pending deletion
|
||||||
|
nc.Out(hostinfo)
|
||||||
|
nc.In(hostinfo)
|
||||||
|
assert.True(t, hostinfo.out.Load())
|
||||||
|
assert.True(t, hostinfo.in.Load())
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
decision, _, _ := nc.makeTrafficDecision(hostinfo.localIndexId, now)
|
||||||
|
assert.Equal(t, tryRehandshake, decision)
|
||||||
|
assert.Equal(t, now, hostinfo.lastUsed)
|
||||||
|
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||||
|
assert.False(t, hostinfo.out.Load())
|
||||||
|
assert.False(t, hostinfo.in.Load())
|
||||||
|
|
||||||
|
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Second*5))
|
||||||
|
assert.Equal(t, doNothing, decision)
|
||||||
|
assert.Equal(t, now, hostinfo.lastUsed)
|
||||||
|
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||||
|
assert.False(t, hostinfo.out.Load())
|
||||||
|
assert.False(t, hostinfo.in.Load())
|
||||||
|
|
||||||
|
// Do another traffic check tick, should still not be pending deletion
|
||||||
|
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Second*10))
|
||||||
|
assert.Equal(t, doNothing, decision)
|
||||||
|
assert.Equal(t, now, hostinfo.lastUsed)
|
||||||
|
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||||
|
assert.False(t, hostinfo.out.Load())
|
||||||
|
assert.False(t, hostinfo.in.Load())
|
||||||
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
|
|
||||||
|
// Finally advance beyond the inactivity timeout
|
||||||
|
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Minute*10))
|
||||||
|
assert.Equal(t, closeTunnel, decision)
|
||||||
|
assert.Equal(t, now, hostinfo.lastUsed)
|
||||||
|
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||||
|
assert.False(t, hostinfo.out.Load())
|
||||||
|
assert.False(t, hostinfo.in.Load())
|
||||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||||
}
|
}
|
||||||
@ -273,10 +370,10 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
|||||||
ifce.disconnectInvalid.Store(true)
|
ifce.disconnectInvalid.Store(true)
|
||||||
|
|
||||||
// Create manager
|
// Create manager
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
conf := config.NewC(l)
|
||||||
defer cancel()
|
punchy := NewPunchyFromConfig(l, conf)
|
||||||
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
|
||||||
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
nc.intf = ifce
|
||||||
ifce.connectionManager = nc
|
ifce.connectionManager = nc
|
||||||
|
|
||||||
hostinfo := &HostInfo{
|
hostinfo := &HostInfo{
|
||||||
|
|||||||
20
control.go
20
control.go
@ -26,14 +26,15 @@ type controlHostLister interface {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Control struct {
|
type Control struct {
|
||||||
f *Interface
|
f *Interface
|
||||||
l *logrus.Logger
|
l *logrus.Logger
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
sshStart func()
|
sshStart func()
|
||||||
statsStart func()
|
statsStart func()
|
||||||
dnsStart func()
|
dnsStart func()
|
||||||
lighthouseStart func()
|
lighthouseStart func()
|
||||||
|
connectionManagerStart func(context.Context)
|
||||||
}
|
}
|
||||||
|
|
||||||
type ControlHostInfo struct {
|
type ControlHostInfo struct {
|
||||||
@ -63,6 +64,9 @@ func (c *Control) Start() {
|
|||||||
if c.dnsStart != nil {
|
if c.dnsStart != nil {
|
||||||
go c.dnsStart()
|
go c.dnsStart()
|
||||||
}
|
}
|
||||||
|
if c.connectionManagerStart != nil {
|
||||||
|
go c.connectionManagerStart(c.ctx)
|
||||||
|
}
|
||||||
if c.lighthouseStart != nil {
|
if c.lighthouseStart != nil {
|
||||||
c.lighthouseStart()
|
c.lighthouseStart()
|
||||||
}
|
}
|
||||||
|
|||||||
@ -66,7 +66,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||||||
localIndexId: 201,
|
localIndexId: 201,
|
||||||
vpnIp: vpnIp,
|
vpnIp: vpnIp,
|
||||||
relayState: RelayState{
|
relayState: RelayState{
|
||||||
relays: map[netip.Addr]struct{}{},
|
relays: nil,
|
||||||
relayForByIp: map[netip.Addr]*Relay{},
|
relayForByIp: map[netip.Addr]*Relay{},
|
||||||
relayForByIdx: map[uint32]*Relay{},
|
relayForByIdx: map[uint32]*Relay{},
|
||||||
},
|
},
|
||||||
@ -85,7 +85,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
|||||||
localIndexId: 201,
|
localIndexId: 201,
|
||||||
vpnIp: vpnIp2,
|
vpnIp: vpnIp2,
|
||||||
relayState: RelayState{
|
relayState: RelayState{
|
||||||
relays: map[netip.Addr]struct{}{},
|
relays: nil,
|
||||||
relayForByIp: map[netip.Addr]*Relay{},
|
relayForByIp: map[netip.Addr]*Relay{},
|
||||||
relayForByIdx: map[uint32]*Relay{},
|
relayForByIdx: map[uint32]*Relay{},
|
||||||
},
|
},
|
||||||
|
|||||||
@ -4,11 +4,13 @@
|
|||||||
package e2e
|
package e2e
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"net/netip"
|
"net/netip"
|
||||||
|
"slices"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/google/gopacket"
|
||||||
|
"github.com/google/gopacket/layers"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
"github.com/slackhq/nebula"
|
"github.com/slackhq/nebula"
|
||||||
"github.com/slackhq/nebula/e2e/router"
|
"github.com/slackhq/nebula/e2e/router"
|
||||||
@ -369,6 +371,137 @@ func TestRelays(t *testing.T) {
|
|||||||
//TODO: assert we actually used the relay even though it should be impossible for a tunnel to have occurred without it
|
//TODO: assert we actually used the relay even though it should be impossible for a tunnel to have occurred without it
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestReestablishRelays(t *testing.T) {
|
||||||
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||||
|
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", m{"relay": m{"use_relays": true}})
|
||||||
|
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", "10.128.0.128/24", m{"relay": m{"am_relay": true}})
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", "10.128.0.2/24", m{"relay": m{"use_relays": true}})
|
||||||
|
|
||||||
|
// Teach my how to get to the relay and that their can be reached via the relay
|
||||||
|
myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
|
||||||
|
myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
|
||||||
|
relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||||
|
|
||||||
|
// Build a router so we don't have to reason who gets which packet
|
||||||
|
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||||
|
defer r.RenderFlow()
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
relayControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
t.Log("Trigger a handshake from me to them via the relay")
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
||||||
|
|
||||||
|
p := r.RouteForAllUntilTxTun(theirControl)
|
||||||
|
r.Log("Assert the tunnel works")
|
||||||
|
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
||||||
|
|
||||||
|
t.Log("Ensure packet traversal from them to me via the relay")
|
||||||
|
theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them"))
|
||||||
|
|
||||||
|
p = r.RouteForAllUntilTxTun(myControl)
|
||||||
|
r.Log("Assert the tunnel works")
|
||||||
|
assertUdpPacket(t, []byte("Hi from them"), p, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), 80, 80)
|
||||||
|
|
||||||
|
// If we break the relay's connection to 'them', 'me' needs to detect and recover the connection
|
||||||
|
r.Log("Close the tunnel")
|
||||||
|
relayControl.CloseTunnel(theirVpnIpNet.Addr(), true)
|
||||||
|
|
||||||
|
start := len(myControl.GetHostmap().Indexes)
|
||||||
|
curIndexes := len(myControl.GetHostmap().Indexes)
|
||||||
|
for curIndexes >= start {
|
||||||
|
curIndexes = len(myControl.GetHostmap().Indexes)
|
||||||
|
r.Logf("Wait for the dead index to go away:start=%v indexes, current=%v indexes", start, curIndexes)
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me should fail"))
|
||||||
|
|
||||||
|
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
|
||||||
|
return router.RouteAndExit
|
||||||
|
})
|
||||||
|
time.Sleep(2 * time.Second)
|
||||||
|
}
|
||||||
|
r.Log("Dead index went away. Woot!")
|
||||||
|
r.RenderHostmaps("Me removed hostinfo", myControl, relayControl, theirControl)
|
||||||
|
// Next packet should re-establish a relayed connection and work just great.
|
||||||
|
|
||||||
|
t.Logf("Assert the tunnel...")
|
||||||
|
for {
|
||||||
|
t.Log("RouteForAllUntilTxTun")
|
||||||
|
myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
|
||||||
|
myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
|
||||||
|
relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||||
|
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
||||||
|
|
||||||
|
p = r.RouteForAllUntilTxTun(theirControl)
|
||||||
|
r.Log("Assert the tunnel works")
|
||||||
|
packet := gopacket.NewPacket(p, layers.LayerTypeIPv4, gopacket.Lazy)
|
||||||
|
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
|
||||||
|
if slices.Compare(v4.SrcIP, myVpnIpNet.Addr().AsSlice()) != 0 {
|
||||||
|
t.Logf("SrcIP is unexpected...this is not the packet I'm looking for. Keep looking")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if slices.Compare(v4.DstIP, theirVpnIpNet.Addr().AsSlice()) != 0 {
|
||||||
|
t.Logf("DstIP is unexpected...this is not the packet I'm looking for. Keep looking")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
|
||||||
|
if udp == nil {
|
||||||
|
t.Log("Not a UDP packet. This is not the packet I'm looking for. Keep looking")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
data := packet.ApplicationLayer()
|
||||||
|
if data == nil {
|
||||||
|
t.Log("No data found in packet. This is not the packet I'm looking for. Keep looking.")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if string(data.Payload()) != "Hi from me" {
|
||||||
|
t.Logf("Unexpected payload: '%v', keep looking", string(data.Payload()))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
t.Log("I found my lost packet. I am so happy.")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
t.Log("Assert the tunnel works the other way, too")
|
||||||
|
for {
|
||||||
|
t.Log("RouteForAllUntilTxTun")
|
||||||
|
theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them"))
|
||||||
|
|
||||||
|
p = r.RouteForAllUntilTxTun(myControl)
|
||||||
|
r.Log("Assert the tunnel works")
|
||||||
|
packet := gopacket.NewPacket(p, layers.LayerTypeIPv4, gopacket.Lazy)
|
||||||
|
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
|
||||||
|
if slices.Compare(v4.DstIP, myVpnIpNet.Addr().AsSlice()) != 0 {
|
||||||
|
t.Logf("Dst is unexpected...this is not the packet I'm looking for. Keep looking")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if slices.Compare(v4.SrcIP, theirVpnIpNet.Addr().AsSlice()) != 0 {
|
||||||
|
t.Logf("SrcIP is unexpected...this is not the packet I'm looking for. Keep looking")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
|
||||||
|
if udp == nil {
|
||||||
|
t.Log("Not a UDP packet. This is not the packet I'm looking for. Keep looking")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
data := packet.ApplicationLayer()
|
||||||
|
if data == nil {
|
||||||
|
t.Log("No data found in packet. This is not the packet I'm looking for. Keep looking.")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if string(data.Payload()) != "Hi from them" {
|
||||||
|
t.Logf("Unexpected payload: '%v', keep looking", string(data.Payload()))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
t.Log("I found my lost packet. I am so happy.")
|
||||||
|
break
|
||||||
|
}
|
||||||
|
r.RenderHostmaps("Final hostmaps", myControl, relayControl, theirControl)
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
func TestStage1RaceRelays(t *testing.T) {
|
func TestStage1RaceRelays(t *testing.T) {
|
||||||
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
|
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
|
||||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||||
@ -830,9 +963,8 @@ func TestRehandshakingLoser(t *testing.T) {
|
|||||||
t.Log("Stand up a tunnel between me and them")
|
t.Log("Stand up a tunnel between me and them")
|
||||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||||
|
|
||||||
tt1 := myControl.GetHostInfoByVpnIp(theirVpnIpNet.Addr(), false)
|
myControl.GetHostInfoByVpnIp(theirVpnIpNet.Addr(), false)
|
||||||
tt2 := theirControl.GetHostInfoByVpnIp(myVpnIpNet.Addr(), false)
|
theirControl.GetHostInfoByVpnIp(myVpnIpNet.Addr(), false)
|
||||||
fmt.Println(tt1.LocalIndex, tt2.LocalIndex)
|
|
||||||
|
|
||||||
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
||||||
|
|
||||||
|
|||||||
@ -690,6 +690,7 @@ func (r *R) FlushAll() {
|
|||||||
r.Unlock()
|
r.Unlock()
|
||||||
panic("Can't FlushAll for host: " + p.To.String())
|
panic("Can't FlushAll for host: " + p.To.String())
|
||||||
}
|
}
|
||||||
|
receiver.InjectUDPPacket(p)
|
||||||
r.Unlock()
|
r.Unlock()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
55
e2e/tunnels_test.go
Normal file
55
e2e/tunnels_test.go
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
//go:build e2e_testing
|
||||||
|
// +build e2e_testing
|
||||||
|
|
||||||
|
package e2e
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/slackhq/nebula/e2e/router"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDropInactiveTunnels(t *testing.T) {
|
||||||
|
// The goal of this test is to ensure the shortest inactivity timeout will close the tunnel on both sides
|
||||||
|
// under ideal conditions
|
||||||
|
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||||
|
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", "10.128.0.1/24", m{"tunnels": m{"drop_inactive": true, "inactivity_timeout": "5s"}})
|
||||||
|
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", m{"tunnels": m{"drop_inactive": true, "inactivity_timeout": "10m"}})
|
||||||
|
|
||||||
|
// Share our underlay information
|
||||||
|
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||||
|
theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
|
||||||
|
|
||||||
|
// Start the servers
|
||||||
|
myControl.Start()
|
||||||
|
theirControl.Start()
|
||||||
|
|
||||||
|
r := router.NewR(t, myControl, theirControl)
|
||||||
|
|
||||||
|
r.Log("Assert the tunnel between me and them works")
|
||||||
|
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||||
|
|
||||||
|
r.Log("Go inactive and wait for the tunnels to get dropped")
|
||||||
|
waitStart := time.Now()
|
||||||
|
for {
|
||||||
|
myIndexes := len(myControl.GetHostmap().Indexes)
|
||||||
|
theirIndexes := len(theirControl.GetHostmap().Indexes)
|
||||||
|
if myIndexes == 0 && theirIndexes == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
since := time.Since(waitStart)
|
||||||
|
r.Logf("my tunnels: %v; their tunnels: %v; duration: %v", myIndexes, theirIndexes, since)
|
||||||
|
if since > time.Second*30 {
|
||||||
|
t.Fatal("Tunnel should have been declared inactive after 5 seconds and before 30 seconds")
|
||||||
|
}
|
||||||
|
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
r.FlushAll()
|
||||||
|
}
|
||||||
|
|
||||||
|
r.Logf("Inactive tunnels were dropped within %v", time.Since(waitStart))
|
||||||
|
myControl.Stop()
|
||||||
|
theirControl.Stop()
|
||||||
|
}
|
||||||
@ -303,6 +303,18 @@ logging:
|
|||||||
# after receiving the response for lighthouse queries
|
# after receiving the response for lighthouse queries
|
||||||
#trigger_buffer: 64
|
#trigger_buffer: 64
|
||||||
|
|
||||||
|
# Tunnel manager settings
|
||||||
|
#tunnels:
|
||||||
|
# drop_inactive controls whether inactive tunnels are maintained or dropped after the inactive_timeout period has
|
||||||
|
# elapsed.
|
||||||
|
# In general, it is a good idea to enable this setting. It will be enabled by default in a future release.
|
||||||
|
# This setting is reloadable
|
||||||
|
#drop_inactive: false
|
||||||
|
|
||||||
|
# inactivity_timeout controls how long a tunnel MUST NOT see any inbound or outbound traffic before being considered
|
||||||
|
# inactive and eligible to be dropped.
|
||||||
|
# This setting is reloadable
|
||||||
|
#inactivity_timeout: 10m
|
||||||
|
|
||||||
# Nebula security group configuration
|
# Nebula security group configuration
|
||||||
firewall:
|
firewall:
|
||||||
|
|||||||
@ -151,7 +151,7 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
HandshakePacket: make(map[uint8][]byte, 0),
|
HandshakePacket: make(map[uint8][]byte, 0),
|
||||||
lastHandshakeTime: hs.Details.Time,
|
lastHandshakeTime: hs.Details.Time,
|
||||||
relayState: RelayState{
|
relayState: RelayState{
|
||||||
relays: map[netip.Addr]struct{}{},
|
relays: nil,
|
||||||
relayForByIp: map[netip.Addr]*Relay{},
|
relayForByIp: map[netip.Addr]*Relay{},
|
||||||
relayForByIdx: map[uint32]*Relay{},
|
relayForByIdx: map[uint32]*Relay{},
|
||||||
},
|
},
|
||||||
@ -322,6 +322,9 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
|
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
|
||||||
|
// I successfully received a handshake. Just in case I marked this tunnel as 'Disestablished', ensure
|
||||||
|
// it's correctly marked as working.
|
||||||
|
via.relayHI.relayState.UpdateRelayForByIdxState(via.remoteIdx, Established)
|
||||||
f.SendVia(via.relayHI, via.relay, msg, make([]byte, 12), make([]byte, mtu), false)
|
f.SendVia(via.relayHI, via.relay, msg, make([]byte, 12), make([]byte, mtu), false)
|
||||||
f.l.WithField("vpnIp", vpnIp).WithField("relay", via.relayHI.vpnIp).
|
f.l.WithField("vpnIp", vpnIp).WithField("relay", via.relayHI.vpnIp).
|
||||||
WithField("certName", certName).
|
WithField("certName", certName).
|
||||||
@ -332,7 +335,7 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
|||||||
Info("Handshake message sent")
|
Info("Handshake message sent")
|
||||||
}
|
}
|
||||||
|
|
||||||
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
|
f.connectionManager.AddTrafficWatch(hostinfo)
|
||||||
|
|
||||||
hostinfo.remotes.ResetBlockedRemotes()
|
hostinfo.remotes.ResetBlockedRemotes()
|
||||||
|
|
||||||
@ -490,7 +493,7 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
|
|||||||
|
|
||||||
// Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp
|
// Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp
|
||||||
f.handshakeManager.Complete(hostinfo, f)
|
f.handshakeManager.Complete(hostinfo, f)
|
||||||
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
|
f.connectionManager.AddTrafficWatch(hostinfo)
|
||||||
|
|
||||||
if f.l.Level >= logrus.DebugLevel {
|
if f.l.Level >= logrus.DebugLevel {
|
||||||
hostinfo.logger(f.l).Debugf("Sending %d stored packets", len(hh.packetStore))
|
hostinfo.logger(f.l).Debugf("Sending %d stored packets", len(hh.packetStore))
|
||||||
|
|||||||
@ -278,48 +278,8 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
// Check the relay HostInfo to see if we already established a relay through it
|
// Check the relay HostInfo to see if we already established a relay through it
|
||||||
if existingRelay, ok := relayHostInfo.relayState.QueryRelayForByIp(vpnIp); ok {
|
existingRelay, ok := relayHostInfo.relayState.QueryRelayForByIp(vpnIp)
|
||||||
switch existingRelay.State {
|
if !ok {
|
||||||
case Established:
|
|
||||||
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Send handshake via relay")
|
|
||||||
hm.f.SendVia(relayHostInfo, existingRelay, hostinfo.HandshakePacket[0], make([]byte, 12), make([]byte, mtu), false)
|
|
||||||
case Requested:
|
|
||||||
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Re-send CreateRelay request")
|
|
||||||
|
|
||||||
//TODO: IPV6-WORK
|
|
||||||
myVpnIpB := hm.f.myVpnNet.Addr().As4()
|
|
||||||
theirVpnIpB := vpnIp.As4()
|
|
||||||
|
|
||||||
// Re-send the CreateRelay request, in case the previous one was lost.
|
|
||||||
m := NebulaControl{
|
|
||||||
Type: NebulaControl_CreateRelayRequest,
|
|
||||||
InitiatorRelayIndex: existingRelay.LocalIndex,
|
|
||||||
RelayFromIp: binary.BigEndian.Uint32(myVpnIpB[:]),
|
|
||||||
RelayToIp: binary.BigEndian.Uint32(theirVpnIpB[:]),
|
|
||||||
}
|
|
||||||
msg, err := m.Marshal()
|
|
||||||
if err != nil {
|
|
||||||
hostinfo.logger(hm.l).
|
|
||||||
WithError(err).
|
|
||||||
Error("Failed to marshal Control message to create relay")
|
|
||||||
} else {
|
|
||||||
// This must send over the hostinfo, not over hm.Hosts[ip]
|
|
||||||
hm.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
|
||||||
hm.l.WithFields(logrus.Fields{
|
|
||||||
"relayFrom": hm.f.myVpnNet.Addr(),
|
|
||||||
"relayTo": vpnIp,
|
|
||||||
"initiatorRelayIndex": existingRelay.LocalIndex,
|
|
||||||
"relay": relay}).
|
|
||||||
Info("send CreateRelayRequest")
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
hostinfo.logger(hm.l).
|
|
||||||
WithField("vpnIp", vpnIp).
|
|
||||||
WithField("state", existingRelay.State).
|
|
||||||
WithField("relay", relayHostInfo.vpnIp).
|
|
||||||
Errorf("Relay unexpected state")
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// No relays exist or requested yet.
|
// No relays exist or requested yet.
|
||||||
if relayHostInfo.remote.IsValid() {
|
if relayHostInfo.remote.IsValid() {
|
||||||
idx, err := AddRelay(hm.l, relayHostInfo, hm.mainHostMap, vpnIp, nil, TerminalType, Requested)
|
idx, err := AddRelay(hm.l, relayHostInfo, hm.mainHostMap, vpnIp, nil, TerminalType, Requested)
|
||||||
@ -352,6 +312,52 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
|
|||||||
Info("send CreateRelayRequest")
|
Info("send CreateRelayRequest")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch existingRelay.State {
|
||||||
|
case Established:
|
||||||
|
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Send handshake via relay")
|
||||||
|
hm.f.SendVia(relayHostInfo, existingRelay, hostinfo.HandshakePacket[0], make([]byte, 12), make([]byte, mtu), false)
|
||||||
|
case Disestablished:
|
||||||
|
// Mark this relay as 'requested'
|
||||||
|
relayHostInfo.relayState.UpdateRelayForByIpState(vpnIp, Requested)
|
||||||
|
fallthrough
|
||||||
|
case Requested:
|
||||||
|
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Re-send CreateRelay request")
|
||||||
|
// Re-send the CreateRelay request, in case the previous one was lost.
|
||||||
|
relayFrom := hm.f.myVpnNet.Addr().As4()
|
||||||
|
relayTo := vpnIp.As4()
|
||||||
|
m := NebulaControl{
|
||||||
|
Type: NebulaControl_CreateRelayRequest,
|
||||||
|
InitiatorRelayIndex: existingRelay.LocalIndex,
|
||||||
|
RelayFromIp: binary.BigEndian.Uint32(relayFrom[:]),
|
||||||
|
RelayToIp: binary.BigEndian.Uint32(relayTo[:]),
|
||||||
|
}
|
||||||
|
|
||||||
|
msg, err := m.Marshal()
|
||||||
|
if err != nil {
|
||||||
|
hostinfo.logger(hm.l).
|
||||||
|
WithError(err).
|
||||||
|
Error("Failed to marshal Control message to create relay")
|
||||||
|
} else {
|
||||||
|
// This must send over the hostinfo, not over hm.Hosts[ip]
|
||||||
|
hm.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||||
|
hm.l.WithFields(logrus.Fields{
|
||||||
|
"relayFrom": hm.f.myVpnNet,
|
||||||
|
"relayTo": vpnIp,
|
||||||
|
"initiatorRelayIndex": existingRelay.LocalIndex,
|
||||||
|
"relay": relay}).
|
||||||
|
Info("send CreateRelayRequest")
|
||||||
|
}
|
||||||
|
case PeerRequested:
|
||||||
|
// PeerRequested only occurs in Forwarding relays, not Terminal relays, and this is a Terminal relay case.
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
hostinfo.logger(hm.l).
|
||||||
|
WithField("vpnIp", vpnIp).
|
||||||
|
WithField("state", existingRelay.State).
|
||||||
|
WithField("relay", relay).
|
||||||
|
Errorf("Relay unexpected state")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -397,7 +403,7 @@ func (hm *HandshakeManager) StartHandshake(vpnIp netip.Addr, cacheCb func(*Hands
|
|||||||
vpnIp: vpnIp,
|
vpnIp: vpnIp,
|
||||||
HandshakePacket: make(map[uint8][]byte, 0),
|
HandshakePacket: make(map[uint8][]byte, 0),
|
||||||
relayState: RelayState{
|
relayState: RelayState{
|
||||||
relays: map[netip.Addr]struct{}{},
|
relays: nil,
|
||||||
relayForByIp: map[netip.Addr]*Relay{},
|
relayForByIp: map[netip.Addr]*Relay{},
|
||||||
relayForByIdx: map[uint32]*Relay{},
|
relayForByIdx: map[uint32]*Relay{},
|
||||||
},
|
},
|
||||||
|
|||||||
83
hostmap.go
83
hostmap.go
@ -4,6 +4,7 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
|
"slices"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
@ -35,6 +36,7 @@ const (
|
|||||||
Requested = iota
|
Requested = iota
|
||||||
PeerRequested
|
PeerRequested
|
||||||
Established
|
Established
|
||||||
|
Disestablished
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -68,15 +70,42 @@ type HostMap struct {
|
|||||||
type RelayState struct {
|
type RelayState struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
|
|
||||||
relays map[netip.Addr]struct{} // Set of VpnIp's of Hosts to use as relays to access this peer
|
relays []netip.Addr // Ordered set of VpnIp's of Hosts to use as relays to access this peer
|
||||||
relayForByIp map[netip.Addr]*Relay // Maps VpnIps of peers for which this HostInfo is a relay to some Relay info
|
relayForByIp map[netip.Addr]*Relay // Maps VpnIps of peers for which this HostInfo is a relay to some Relay info
|
||||||
relayForByIdx map[uint32]*Relay // Maps a local index to some Relay info
|
relayForByIdx map[uint32]*Relay // Maps a local index to some Relay info
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) DeleteRelay(ip netip.Addr) {
|
func (rs *RelayState) DeleteRelay(ip netip.Addr) {
|
||||||
rs.Lock()
|
rs.Lock()
|
||||||
defer rs.Unlock()
|
defer rs.Unlock()
|
||||||
delete(rs.relays, ip)
|
for idx, val := range rs.relays {
|
||||||
|
if val == ip {
|
||||||
|
rs.relays = append(rs.relays[:idx], rs.relays[idx+1:]...)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RelayState) UpdateRelayForByIpState(vpnIp netip.Addr, state int) {
|
||||||
|
rs.Lock()
|
||||||
|
defer rs.Unlock()
|
||||||
|
if r, ok := rs.relayForByIp[vpnIp]; ok {
|
||||||
|
newRelay := *r
|
||||||
|
newRelay.State = state
|
||||||
|
rs.relayForByIp[newRelay.PeerIp] = &newRelay
|
||||||
|
rs.relayForByIdx[newRelay.LocalIndex] = &newRelay
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (rs *RelayState) UpdateRelayForByIdxState(idx uint32, state int) {
|
||||||
|
rs.Lock()
|
||||||
|
defer rs.Unlock()
|
||||||
|
if r, ok := rs.relayForByIdx[idx]; ok {
|
||||||
|
newRelay := *r
|
||||||
|
newRelay.State = state
|
||||||
|
rs.relayForByIp[newRelay.PeerIp] = &newRelay
|
||||||
|
rs.relayForByIdx[newRelay.LocalIndex] = &newRelay
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) CopyAllRelayFor() []*Relay {
|
func (rs *RelayState) CopyAllRelayFor() []*Relay {
|
||||||
@ -99,16 +128,16 @@ func (rs *RelayState) GetRelayForByIp(ip netip.Addr) (*Relay, bool) {
|
|||||||
func (rs *RelayState) InsertRelayTo(ip netip.Addr) {
|
func (rs *RelayState) InsertRelayTo(ip netip.Addr) {
|
||||||
rs.Lock()
|
rs.Lock()
|
||||||
defer rs.Unlock()
|
defer rs.Unlock()
|
||||||
rs.relays[ip] = struct{}{}
|
if !slices.Contains(rs.relays, ip) {
|
||||||
|
rs.relays = append(rs.relays, ip)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rs *RelayState) CopyRelayIps() []netip.Addr {
|
func (rs *RelayState) CopyRelayIps() []netip.Addr {
|
||||||
|
ret := make([]netip.Addr, len(rs.relays))
|
||||||
rs.RLock()
|
rs.RLock()
|
||||||
defer rs.RUnlock()
|
defer rs.RUnlock()
|
||||||
ret := make([]netip.Addr, 0, len(rs.relays))
|
copy(ret, rs.relays)
|
||||||
for ip := range rs.relays {
|
|
||||||
ret = append(ret, ip)
|
|
||||||
}
|
|
||||||
return ret
|
return ret
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -219,6 +248,14 @@ type HostInfo struct {
|
|||||||
// Used to track other hostinfos for this vpn ip since only 1 can be primary
|
// Used to track other hostinfos for this vpn ip since only 1 can be primary
|
||||||
// Synchronised via hostmap lock and not the hostinfo lock.
|
// Synchronised via hostmap lock and not the hostinfo lock.
|
||||||
next, prev *HostInfo
|
next, prev *HostInfo
|
||||||
|
|
||||||
|
//TODO: in, out, and others might benefit from being an atomic.Int32. We could collapse connectionManager pendingDeletion, relayUsed, and in/out into this 1 thing
|
||||||
|
in, out, pendingDeletion atomic.Bool
|
||||||
|
|
||||||
|
// lastUsed tracks the last time ConnectionManager checked the tunnel and it was in use.
|
||||||
|
// This value will be behind against actual tunnel utilization in the hot path.
|
||||||
|
// This should only be used by the ConnectionManagers ticker routine.
|
||||||
|
lastUsed time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
type ViaSender struct {
|
type ViaSender struct {
|
||||||
@ -361,6 +398,7 @@ func (hm *HostMap) unlockedMakePrimary(hostinfo *HostInfo) {
|
|||||||
|
|
||||||
func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
||||||
primary, ok := hm.Hosts[hostinfo.vpnIp]
|
primary, ok := hm.Hosts[hostinfo.vpnIp]
|
||||||
|
isLastHostinfo := hostinfo.next == nil && hostinfo.prev == nil
|
||||||
if ok && primary == hostinfo {
|
if ok && primary == hostinfo {
|
||||||
// The vpnIp pointer points to the same hostinfo as the local index id, we can remove it
|
// The vpnIp pointer points to the same hostinfo as the local index id, we can remove it
|
||||||
delete(hm.Hosts, hostinfo.vpnIp)
|
delete(hm.Hosts, hostinfo.vpnIp)
|
||||||
@ -410,6 +448,12 @@ func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
|||||||
Debug("Hostmap hostInfo deleted")
|
Debug("Hostmap hostInfo deleted")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if isLastHostinfo {
|
||||||
|
// I have lost connectivity to my peers. My relay tunnel is likely broken. Mark the next
|
||||||
|
// hops as 'Disestablished' so that new relay tunnels are created in the future.
|
||||||
|
hm.unlockedDisestablishVpnAddrRelayFor(hostinfo)
|
||||||
|
}
|
||||||
|
// Clean up any local relay indexes for which I am acting as a relay hop
|
||||||
for _, localRelayIdx := range hostinfo.relayState.CopyRelayForIdxs() {
|
for _, localRelayIdx := range hostinfo.relayState.CopyRelayForIdxs() {
|
||||||
delete(hm.Relays, localRelayIdx)
|
delete(hm.Relays, localRelayIdx)
|
||||||
}
|
}
|
||||||
@ -470,6 +514,27 @@ func (hm *HostMap) QueryVpnIpRelayFor(targetIp, relayHostIp netip.Addr) (*HostIn
|
|||||||
return nil, nil, errors.New("unable to find host with relay")
|
return nil, nil, errors.New("unable to find host with relay")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (hm *HostMap) unlockedDisestablishVpnAddrRelayFor(hi *HostInfo) {
|
||||||
|
for _, relayHostIp := range hi.relayState.CopyRelayIps() {
|
||||||
|
if h, ok := hm.Hosts[relayHostIp]; ok {
|
||||||
|
for h != nil {
|
||||||
|
h.relayState.UpdateRelayForByIpState(hi.vpnIp, Disestablished)
|
||||||
|
h = h.next
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for _, rs := range hi.relayState.CopyAllRelayFor() {
|
||||||
|
if rs.Type == ForwardingType {
|
||||||
|
if h, ok := hm.Hosts[rs.PeerIp]; ok {
|
||||||
|
for h != nil {
|
||||||
|
h.relayState.UpdateRelayForByIpState(hi.vpnIp, Disestablished)
|
||||||
|
h = h.next
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (hm *HostMap) queryVpnIp(vpnIp netip.Addr, promoteIfce *Interface) *HostInfo {
|
func (hm *HostMap) queryVpnIp(vpnIp netip.Addr, promoteIfce *Interface) *HostInfo {
|
||||||
hm.RLock()
|
hm.RLock()
|
||||||
if h, ok := hm.Hosts[vpnIp]; ok {
|
if h, ok := hm.Hosts[vpnIp]; ok {
|
||||||
|
|||||||
@ -7,6 +7,7 @@ import (
|
|||||||
"github.com/slackhq/nebula/config"
|
"github.com/slackhq/nebula/config"
|
||||||
"github.com/slackhq/nebula/test"
|
"github.com/slackhq/nebula/test"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestHostMap_MakePrimary(t *testing.T) {
|
func TestHostMap_MakePrimary(t *testing.T) {
|
||||||
@ -225,3 +226,31 @@ func TestHostMap_reload(t *testing.T) {
|
|||||||
c.ReloadConfigString("preferred_ranges: [1.1.1.1/32]")
|
c.ReloadConfigString("preferred_ranges: [1.1.1.1/32]")
|
||||||
assert.EqualValues(t, []string{"1.1.1.1/32"}, toS(hm.GetPreferredRanges()))
|
assert.EqualValues(t, []string{"1.1.1.1/32"}, toS(hm.GetPreferredRanges()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHostMap_RelayState(t *testing.T) {
|
||||||
|
h1 := &HostInfo{vpnIp: netip.MustParseAddr("0.0.0.1"), localIndexId: 1}
|
||||||
|
a1 := netip.MustParseAddr("::1")
|
||||||
|
a2 := netip.MustParseAddr("2001::1")
|
||||||
|
|
||||||
|
h1.relayState.InsertRelayTo(a1)
|
||||||
|
assert.Equal(t, h1.relayState.relays, []netip.Addr{a1})
|
||||||
|
h1.relayState.InsertRelayTo(a2)
|
||||||
|
assert.Equal(t, h1.relayState.relays, []netip.Addr{a1, a2})
|
||||||
|
// Ensure that the first relay added is the first one returned in the copy
|
||||||
|
currentRelays := h1.relayState.CopyRelayIps()
|
||||||
|
require.Len(t, currentRelays, 2)
|
||||||
|
assert.Equal(t, currentRelays[0], a1)
|
||||||
|
|
||||||
|
// Deleting the last one in the list works ok
|
||||||
|
h1.relayState.DeleteRelay(a2)
|
||||||
|
assert.Equal(t, h1.relayState.relays, []netip.Addr{a1})
|
||||||
|
|
||||||
|
// Deleting an element not in the list works ok
|
||||||
|
h1.relayState.DeleteRelay(a2)
|
||||||
|
assert.Equal(t, h1.relayState.relays, []netip.Addr{a1})
|
||||||
|
|
||||||
|
// Deleting the only element in the list works ok
|
||||||
|
h1.relayState.DeleteRelay(a1)
|
||||||
|
assert.Equal(t, h1.relayState.relays, []netip.Addr{})
|
||||||
|
|
||||||
|
}
|
||||||
|
|||||||
@ -213,7 +213,7 @@ func (f *Interface) SendVia(via *HostInfo,
|
|||||||
c := via.ConnectionState.messageCounter.Add(1)
|
c := via.ConnectionState.messageCounter.Add(1)
|
||||||
|
|
||||||
out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c)
|
out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c)
|
||||||
f.connectionManager.Out(via.localIndexId)
|
f.connectionManager.Out(via)
|
||||||
|
|
||||||
// Authenticate the header and payload, but do not encrypt for this message type.
|
// Authenticate the header and payload, but do not encrypt for this message type.
|
||||||
// The payload consists of the inner, unencrypted Nebula header, as well as the end-to-end encrypted payload.
|
// The payload consists of the inner, unencrypted Nebula header, as well as the end-to-end encrypted payload.
|
||||||
@ -282,7 +282,7 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
|
|||||||
|
|
||||||
//l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p)
|
//l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p)
|
||||||
out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c)
|
out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c)
|
||||||
f.connectionManager.Out(hostinfo.localIndexId)
|
f.connectionManager.Out(hostinfo)
|
||||||
|
|
||||||
// Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against
|
// Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against
|
||||||
// all our IPs and enable a faster roaming.
|
// all our IPs and enable a faster roaming.
|
||||||
|
|||||||
41
interface.go
41
interface.go
@ -24,24 +24,23 @@ import (
|
|||||||
const mtu = 9001
|
const mtu = 9001
|
||||||
|
|
||||||
type InterfaceConfig struct {
|
type InterfaceConfig struct {
|
||||||
HostMap *HostMap
|
HostMap *HostMap
|
||||||
Outside udp.Conn
|
Outside udp.Conn
|
||||||
Inside overlay.Device
|
Inside overlay.Device
|
||||||
pki *PKI
|
pki *PKI
|
||||||
Cipher string
|
Cipher string
|
||||||
Firewall *Firewall
|
Firewall *Firewall
|
||||||
ServeDns bool
|
ServeDns bool
|
||||||
HandshakeManager *HandshakeManager
|
HandshakeManager *HandshakeManager
|
||||||
lightHouse *LightHouse
|
lightHouse *LightHouse
|
||||||
checkInterval time.Duration
|
connectionManager *connectionManager
|
||||||
pendingDeletionInterval time.Duration
|
DropLocalBroadcast bool
|
||||||
DropLocalBroadcast bool
|
DropMulticast bool
|
||||||
DropMulticast bool
|
routines int
|
||||||
routines int
|
MessageMetrics *MessageMetrics
|
||||||
MessageMetrics *MessageMetrics
|
version string
|
||||||
version string
|
relayManager *relayManager
|
||||||
relayManager *relayManager
|
punchy *Punchy
|
||||||
punchy *Punchy
|
|
||||||
|
|
||||||
tryPromoteEvery uint32
|
tryPromoteEvery uint32
|
||||||
reQueryEvery uint32
|
reQueryEvery uint32
|
||||||
@ -154,6 +153,9 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
|||||||
if c.Firewall == nil {
|
if c.Firewall == nil {
|
||||||
return nil, errors.New("no firewall rules")
|
return nil, errors.New("no firewall rules")
|
||||||
}
|
}
|
||||||
|
if c.connectionManager == nil {
|
||||||
|
return nil, errors.New("no connection manager")
|
||||||
|
}
|
||||||
|
|
||||||
certificate := c.pki.GetCertState().Certificate
|
certificate := c.pki.GetCertState().Certificate
|
||||||
|
|
||||||
@ -196,6 +198,7 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
|||||||
readers: make([]io.ReadWriteCloser, c.routines),
|
readers: make([]io.ReadWriteCloser, c.routines),
|
||||||
myVpnNet: myVpnNet,
|
myVpnNet: myVpnNet,
|
||||||
relayManager: c.relayManager,
|
relayManager: c.relayManager,
|
||||||
|
connectionManager: c.connectionManager,
|
||||||
|
|
||||||
conntrackCacheTimeout: c.ConntrackCacheTimeout,
|
conntrackCacheTimeout: c.ConntrackCacheTimeout,
|
||||||
|
|
||||||
@ -219,7 +222,7 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
|||||||
ifce.reQueryEvery.Store(c.reQueryEvery)
|
ifce.reQueryEvery.Store(c.reQueryEvery)
|
||||||
ifce.reQueryWait.Store(int64(c.reQueryWait))
|
ifce.reQueryWait.Store(int64(c.reQueryWait))
|
||||||
|
|
||||||
ifce.connectionManager = newConnectionManager(ctx, c.l, ifce, c.checkInterval, c.pendingDeletionInterval, c.punchy)
|
ifce.connectionManager.intf = ifce
|
||||||
|
|
||||||
return ifce, nil
|
return ifce, nil
|
||||||
}
|
}
|
||||||
|
|||||||
46
main.go
46
main.go
@ -199,6 +199,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
|
|
||||||
hostMap := NewHostMapFromConfig(l, tunCidr, c)
|
hostMap := NewHostMapFromConfig(l, tunCidr, c)
|
||||||
punchy := NewPunchyFromConfig(l, c)
|
punchy := NewPunchyFromConfig(l, c)
|
||||||
|
connManager := newConnectionManagerFromConfig(l, c, hostMap, punchy)
|
||||||
lightHouse, err := NewLightHouseFromConfig(ctx, l, c, tunCidr, udpConns[0], punchy)
|
lightHouse, err := NewLightHouseFromConfig(ctx, l, c, tunCidr, udpConns[0], punchy)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, util.ContextualizeIfNeeded("Failed to initialize lighthouse handler", err)
|
return nil, util.ContextualizeIfNeeded("Failed to initialize lighthouse handler", err)
|
||||||
@ -234,31 +235,27 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
checkInterval := c.GetInt("timers.connection_alive_interval", 5)
|
|
||||||
pendingDeletionInterval := c.GetInt("timers.pending_deletion_interval", 10)
|
|
||||||
|
|
||||||
ifConfig := &InterfaceConfig{
|
ifConfig := &InterfaceConfig{
|
||||||
HostMap: hostMap,
|
HostMap: hostMap,
|
||||||
Inside: tun,
|
Inside: tun,
|
||||||
Outside: udpConns[0],
|
Outside: udpConns[0],
|
||||||
pki: pki,
|
pki: pki,
|
||||||
Cipher: c.GetString("cipher", "aes"),
|
Cipher: c.GetString("cipher", "aes"),
|
||||||
Firewall: fw,
|
Firewall: fw,
|
||||||
ServeDns: serveDns,
|
ServeDns: serveDns,
|
||||||
HandshakeManager: handshakeManager,
|
HandshakeManager: handshakeManager,
|
||||||
lightHouse: lightHouse,
|
connectionManager: connManager,
|
||||||
checkInterval: time.Second * time.Duration(checkInterval),
|
lightHouse: lightHouse,
|
||||||
pendingDeletionInterval: time.Second * time.Duration(pendingDeletionInterval),
|
tryPromoteEvery: c.GetUint32("counters.try_promote", defaultPromoteEvery),
|
||||||
tryPromoteEvery: c.GetUint32("counters.try_promote", defaultPromoteEvery),
|
reQueryEvery: c.GetUint32("counters.requery_every_packets", defaultReQueryEvery),
|
||||||
reQueryEvery: c.GetUint32("counters.requery_every_packets", defaultReQueryEvery),
|
reQueryWait: c.GetDuration("timers.requery_wait_duration", defaultReQueryWait),
|
||||||
reQueryWait: c.GetDuration("timers.requery_wait_duration", defaultReQueryWait),
|
DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false),
|
||||||
DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false),
|
DropMulticast: c.GetBool("tun.drop_multicast", false),
|
||||||
DropMulticast: c.GetBool("tun.drop_multicast", false),
|
routines: routines,
|
||||||
routines: routines,
|
MessageMetrics: messageMetrics,
|
||||||
MessageMetrics: messageMetrics,
|
version: buildVersion,
|
||||||
version: buildVersion,
|
relayManager: NewRelayManager(ctx, l, hostMap, c),
|
||||||
relayManager: NewRelayManager(ctx, l, hostMap, c),
|
punchy: punchy,
|
||||||
punchy: punchy,
|
|
||||||
|
|
||||||
ConntrackCacheTimeout: conntrackCacheTimeout,
|
ConntrackCacheTimeout: conntrackCacheTimeout,
|
||||||
l: l,
|
l: l,
|
||||||
@ -325,5 +322,6 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
|||||||
statsStart,
|
statsStart,
|
||||||
dnsStart,
|
dnsStart,
|
||||||
lightHouse.StartUpdateWorker,
|
lightHouse.StartUpdateWorker,
|
||||||
|
connManager.Start,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -102,7 +102,7 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
// Pull the Roaming parts up here, and return in all call paths.
|
// Pull the Roaming parts up here, and return in all call paths.
|
||||||
f.handleHostRoaming(hostinfo, ip)
|
f.handleHostRoaming(hostinfo, ip)
|
||||||
// Track usage of both the HostInfo and the Relay for the received & authenticated packet
|
// Track usage of both the HostInfo and the Relay for the received & authenticated packet
|
||||||
f.connectionManager.In(hostinfo.localIndexId)
|
f.connectionManager.In(hostinfo)
|
||||||
f.connectionManager.RelayUsed(h.RemoteIndex)
|
f.connectionManager.RelayUsed(h.RemoteIndex)
|
||||||
|
|
||||||
relay, ok := hostinfo.relayState.QueryRelayForByIdx(h.RemoteIndex)
|
relay, ok := hostinfo.relayState.QueryRelayForByIdx(h.RemoteIndex)
|
||||||
@ -246,7 +246,7 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
|||||||
|
|
||||||
f.handleHostRoaming(hostinfo, ip)
|
f.handleHostRoaming(hostinfo, ip)
|
||||||
|
|
||||||
f.connectionManager.In(hostinfo.localIndexId)
|
f.connectionManager.In(hostinfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
// closeTunnel closes a tunnel locally, it does not send a closeTunnel packet to the remote
|
// closeTunnel closes a tunnel locally, it does not send a closeTunnel packet to the remote
|
||||||
@ -418,7 +418,7 @@ func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
f.connectionManager.In(hostinfo.localIndexId)
|
f.connectionManager.In(hostinfo)
|
||||||
_, err = f.readers[q].Write(out)
|
_, err = f.readers[q].Write(out)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.l.WithError(err).Error("Failed to write to tun")
|
f.l.WithError(err).Error("Failed to write to tun")
|
||||||
|
|||||||
21
pki.go
21
pki.go
@ -223,22 +223,13 @@ func loadCAPoolFromConfig(l *logrus.Logger, c *config.C) (*cert.NebulaCAPool, er
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
caPool, err := cert.NewCAPoolFromBytes(rawCA)
|
caPool, warnings, err := cert.NewCAPoolFromBytes(rawCA)
|
||||||
if errors.Is(err, cert.ErrExpired) {
|
for _, w := range warnings {
|
||||||
var expired int
|
l.WithError(w).Warn("parsing a CA certificate failed")
|
||||||
for _, crt := range caPool.CAs {
|
}
|
||||||
if crt.Expired(time.Now()) {
|
|
||||||
expired++
|
|
||||||
l.WithField("cert", crt).Warn("expired certificate present in CA pool")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if expired >= len(caPool.CAs) {
|
if err != nil {
|
||||||
return nil, errors.New("no valid CA certificates present")
|
return nil, fmt.Errorf("could not create CA certificate pool: %s", err)
|
||||||
}
|
|
||||||
|
|
||||||
} else if err != nil {
|
|
||||||
return nil, fmt.Errorf("error while adding CA certificate to CA trust store: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, fp := range c.GetStringSlice("pki.blocklist", []string{}) {
|
for _, fp := range c.GetStringSlice("pki.blocklist", []string{}) {
|
||||||
|
|||||||
136
relay_manager.go
136
relay_manager.go
@ -146,10 +146,14 @@ func (rm *relayManager) handleCreateRelayResponse(h *HostInfo, f *Interface, m *
|
|||||||
rm.l.WithField("relayTo", peerHostInfo.vpnIp).Error("peerRelay does not have Relay state for relayTo")
|
rm.l.WithField("relayTo", peerHostInfo.vpnIp).Error("peerRelay does not have Relay state for relayTo")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if peerRelay.State == PeerRequested {
|
switch peerRelay.State {
|
||||||
|
case Requested:
|
||||||
|
// I initiated the request to this peer, but haven't heard back from the peer yet. I must wait for this peer
|
||||||
|
// to respond to complete the connection.
|
||||||
|
case PeerRequested, Disestablished, Established:
|
||||||
|
peerHostInfo.relayState.UpdateRelayForByIpState(targetAddr, Established)
|
||||||
//TODO: IPV6-WORK
|
//TODO: IPV6-WORK
|
||||||
b = peerHostInfo.vpnIp.As4()
|
b = peerHostInfo.vpnIp.As4()
|
||||||
peerRelay.State = Established
|
|
||||||
resp := NebulaControl{
|
resp := NebulaControl{
|
||||||
Type: NebulaControl_CreateRelayResponse,
|
Type: NebulaControl_CreateRelayResponse,
|
||||||
ResponderRelayIndex: peerRelay.LocalIndex,
|
ResponderRelayIndex: peerRelay.LocalIndex,
|
||||||
@ -215,6 +219,21 @@ func (rm *relayManager) handleCreateRelayRequest(h *HostInfo, f *Interface, m *N
|
|||||||
"existingRemoteIndex": existingRelay.RemoteIndex}).Error("Existing relay mismatch with CreateRelayRequest")
|
"existingRemoteIndex": existingRelay.RemoteIndex}).Error("Existing relay mismatch with CreateRelayRequest")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
case Disestablished:
|
||||||
|
if existingRelay.RemoteIndex != m.InitiatorRelayIndex {
|
||||||
|
// We got a brand new Relay request, because its index is different than what we saw before.
|
||||||
|
// This should never happen. The peer should never change an index, once created.
|
||||||
|
logMsg.WithFields(logrus.Fields{
|
||||||
|
"existingRemoteIndex": existingRelay.RemoteIndex}).Error("Existing relay mismatch with CreateRelayRequest")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// Mark the relay as 'Established' because it's safe to use again
|
||||||
|
h.relayState.UpdateRelayForByIpState(from, Established)
|
||||||
|
case PeerRequested:
|
||||||
|
// I should never be in this state, because I am terminal, not forwarding.
|
||||||
|
logMsg.WithFields(logrus.Fields{
|
||||||
|
"existingRemoteIndex": existingRelay.RemoteIndex,
|
||||||
|
"state": existingRelay.State}).Error("Unexpected Relay State found")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
_, err := AddRelay(rm.l, h, f.hostMap, from, &m.InitiatorRelayIndex, TerminalType, Established)
|
_, err := AddRelay(rm.l, h, f.hostMap, from, &m.InitiatorRelayIndex, TerminalType, Established)
|
||||||
@ -226,7 +245,7 @@ func (rm *relayManager) handleCreateRelayRequest(h *HostInfo, f *Interface, m *N
|
|||||||
|
|
||||||
relay, ok := h.relayState.QueryRelayForByIp(from)
|
relay, ok := h.relayState.QueryRelayForByIp(from)
|
||||||
if !ok {
|
if !ok {
|
||||||
logMsg.Error("Relay State not found")
|
logMsg.WithField("from", from).Error("Relay State not found")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -273,103 +292,52 @@ func (rm *relayManager) handleCreateRelayRequest(h *HostInfo, f *Interface, m *N
|
|||||||
// Only create relays to peers for whom I have a direct connection
|
// Only create relays to peers for whom I have a direct connection
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
sendCreateRequest := false
|
|
||||||
var index uint32
|
var index uint32
|
||||||
var err error
|
var err error
|
||||||
targetRelay, ok := peer.relayState.QueryRelayForByIp(from)
|
targetRelay, ok := peer.relayState.QueryRelayForByIp(from)
|
||||||
if ok {
|
if ok {
|
||||||
index = targetRelay.LocalIndex
|
index = targetRelay.LocalIndex
|
||||||
if targetRelay.State == Requested {
|
|
||||||
sendCreateRequest = true
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
// Allocate an index in the hostMap for this relay peer
|
// Allocate an index in the hostMap for this relay peer
|
||||||
index, err = AddRelay(rm.l, peer, f.hostMap, from, nil, ForwardingType, Requested)
|
index, err = AddRelay(rm.l, peer, f.hostMap, from, nil, ForwardingType, Requested)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
sendCreateRequest = true
|
|
||||||
}
|
}
|
||||||
if sendCreateRequest {
|
peer.relayState.UpdateRelayForByIpState(from, Requested)
|
||||||
//TODO: IPV6-WORK
|
// Send a CreateRelayRequest to the peer.
|
||||||
fromB := h.vpnIp.As4()
|
fromB := from.As4()
|
||||||
targetB := target.As4()
|
targetB := target.As4()
|
||||||
|
req := NebulaControl{
|
||||||
// Send a CreateRelayRequest to the peer.
|
Type: NebulaControl_CreateRelayRequest,
|
||||||
req := NebulaControl{
|
InitiatorRelayIndex: index,
|
||||||
Type: NebulaControl_CreateRelayRequest,
|
RelayFromIp: binary.BigEndian.Uint32(fromB[:]),
|
||||||
InitiatorRelayIndex: index,
|
RelayToIp: binary.BigEndian.Uint32(targetB[:]),
|
||||||
RelayFromIp: binary.BigEndian.Uint32(fromB[:]),
|
|
||||||
RelayToIp: binary.BigEndian.Uint32(targetB[:]),
|
|
||||||
}
|
|
||||||
msg, err := req.Marshal()
|
|
||||||
if err != nil {
|
|
||||||
logMsg.
|
|
||||||
WithError(err).Error("relayManager Failed to marshal Control message to create relay")
|
|
||||||
} else {
|
|
||||||
f.SendMessageToHostInfo(header.Control, 0, peer, msg, make([]byte, 12), make([]byte, mtu))
|
|
||||||
rm.l.WithFields(logrus.Fields{
|
|
||||||
//TODO: IPV6-WORK another lazy used to use the req object
|
|
||||||
"relayFrom": h.vpnIp,
|
|
||||||
"relayTo": target,
|
|
||||||
"initiatorRelayIndex": req.InitiatorRelayIndex,
|
|
||||||
"responderRelayIndex": req.ResponderRelayIndex,
|
|
||||||
"vpnIp": target}).
|
|
||||||
Info("send CreateRelayRequest")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// Also track the half-created Relay state just received
|
msg, err := req.Marshal()
|
||||||
relay, ok := h.relayState.QueryRelayForByIp(target)
|
if err != nil {
|
||||||
if !ok {
|
logMsg.
|
||||||
// Add the relay
|
WithError(err).Error("relayManager Failed to marshal Control message to create relay")
|
||||||
state := PeerRequested
|
|
||||||
if targetRelay != nil && targetRelay.State == Established {
|
|
||||||
state = Established
|
|
||||||
}
|
|
||||||
_, err := AddRelay(rm.l, h, f.hostMap, target, &m.InitiatorRelayIndex, ForwardingType, state)
|
|
||||||
if err != nil {
|
|
||||||
logMsg.
|
|
||||||
WithError(err).Error("relayManager Failed to allocate a local index for relay")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
switch relay.State {
|
f.SendMessageToHostInfo(header.Control, 0, peer, msg, make([]byte, 12), make([]byte, mtu))
|
||||||
case Established:
|
rm.l.WithFields(logrus.Fields{
|
||||||
if relay.RemoteIndex != m.InitiatorRelayIndex {
|
//TODO: IPV6-WORK another lazy used to use the req object
|
||||||
// We got a brand new Relay request, because its index is different than what we saw before.
|
"relayFrom": h.vpnIp,
|
||||||
// This should never happen. The peer should never change an index, once created.
|
"relayTo": target,
|
||||||
logMsg.WithFields(logrus.Fields{
|
"initiatorRelayIndex": req.InitiatorRelayIndex,
|
||||||
"existingRemoteIndex": relay.RemoteIndex}).Error("Existing relay mismatch with CreateRelayRequest")
|
"responderRelayIndex": req.ResponderRelayIndex,
|
||||||
|
"vpnAddr": target}).
|
||||||
|
Info("send CreateRelayRequest")
|
||||||
|
// Also track the half-created Relay state just received
|
||||||
|
_, ok := h.relayState.QueryRelayForByIp(target)
|
||||||
|
if !ok {
|
||||||
|
// Add the relay
|
||||||
|
_, err := AddRelay(rm.l, h, f.hostMap, target, &m.InitiatorRelayIndex, ForwardingType, PeerRequested)
|
||||||
|
if err != nil {
|
||||||
|
logMsg.
|
||||||
|
WithError(err).Error("relayManager Failed to allocate a local index for relay")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
//TODO: IPV6-WORK
|
|
||||||
fromB := h.vpnIp.As4()
|
|
||||||
targetB := target.As4()
|
|
||||||
resp := NebulaControl{
|
|
||||||
Type: NebulaControl_CreateRelayResponse,
|
|
||||||
ResponderRelayIndex: relay.LocalIndex,
|
|
||||||
InitiatorRelayIndex: relay.RemoteIndex,
|
|
||||||
RelayFromIp: binary.BigEndian.Uint32(fromB[:]),
|
|
||||||
RelayToIp: binary.BigEndian.Uint32(targetB[:]),
|
|
||||||
}
|
|
||||||
msg, err := resp.Marshal()
|
|
||||||
if err != nil {
|
|
||||||
rm.l.
|
|
||||||
WithError(err).Error("relayManager Failed to marshal Control CreateRelayResponse message to create relay")
|
|
||||||
} else {
|
|
||||||
f.SendMessageToHostInfo(header.Control, 0, h, msg, make([]byte, 12), make([]byte, mtu))
|
|
||||||
rm.l.WithFields(logrus.Fields{
|
|
||||||
//TODO: IPV6-WORK more lazy, used to use resp object
|
|
||||||
"relayFrom": h.vpnIp,
|
|
||||||
"relayTo": target,
|
|
||||||
"initiatorRelayIndex": resp.InitiatorRelayIndex,
|
|
||||||
"responderRelayIndex": resp.ResponderRelayIndex,
|
|
||||||
"vpnIp": h.vpnIp}).
|
|
||||||
Info("send CreateRelayResponse")
|
|
||||||
}
|
|
||||||
|
|
||||||
case Requested:
|
|
||||||
// Keep waiting for the other relay to complete
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
5
udp/errors.go
Normal file
5
udp/errors.go
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
package udp
|
||||||
|
|
||||||
|
import "errors"
|
||||||
|
|
||||||
|
var ErrInvalidIPv6RemoteForSocket = errors.New("listener is IPv4, but writing to IPv6 remote")
|
||||||
@ -6,17 +6,63 @@ package udp
|
|||||||
// Darwin support is primarily implemented in udp_generic, besides NewListenConfig
|
// Darwin support is primarily implemented in udp_generic, besides NewListenConfig
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
"github.com/slackhq/nebula/config"
|
||||||
|
"github.com/slackhq/nebula/firewall"
|
||||||
|
"github.com/slackhq/nebula/header"
|
||||||
"golang.org/x/sys/unix"
|
"golang.org/x/sys/unix"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
type StdConn struct {
|
||||||
|
*net.UDPConn
|
||||||
|
isV4 bool
|
||||||
|
sysFd uintptr
|
||||||
|
l *logrus.Logger
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Conn = &StdConn{}
|
||||||
|
|
||||||
func NewListener(l *logrus.Logger, ip netip.Addr, port int, multi bool, batch int) (Conn, error) {
|
func NewListener(l *logrus.Logger, ip netip.Addr, port int, multi bool, batch int) (Conn, error) {
|
||||||
return NewGenericListener(l, ip, port, multi, batch)
|
lc := NewListenConfig(multi)
|
||||||
|
pc, err := lc.ListenPacket(context.TODO(), "udp", net.JoinHostPort(ip.String(), fmt.Sprintf("%v", port)))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if uc, ok := pc.(*net.UDPConn); ok {
|
||||||
|
c := &StdConn{UDPConn: uc, l: l}
|
||||||
|
|
||||||
|
rc, err := uc.SyscallConn()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to open udp socket: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = rc.Control(func(fd uintptr) {
|
||||||
|
c.sysFd = fd
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get udp fd: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
la, err := c.LocalAddr()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c.isV4 = la.Addr().Is4()
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil, fmt.Errorf("unexpected PacketConn: %T %#v", pc, pc)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewListenConfig(multi bool) net.ListenConfig {
|
func NewListenConfig(multi bool) net.ListenConfig {
|
||||||
@ -43,16 +89,130 @@ func NewListenConfig(multi bool) net.ListenConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *GenericConn) Rebind() error {
|
//go:linkname sendto golang.org/x/sys/unix.sendto
|
||||||
rc, err := u.UDPConn.SyscallConn()
|
//go:noescape
|
||||||
if err != nil {
|
func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen int32) (err error)
|
||||||
return err
|
|
||||||
|
func (u *StdConn) WriteTo(b []byte, ap netip.AddrPort) error {
|
||||||
|
var sa unsafe.Pointer
|
||||||
|
var addrLen int32
|
||||||
|
|
||||||
|
if u.isV4 {
|
||||||
|
if ap.Addr().Is6() {
|
||||||
|
return ErrInvalidIPv6RemoteForSocket
|
||||||
|
}
|
||||||
|
|
||||||
|
var rsa unix.RawSockaddrInet6
|
||||||
|
rsa.Family = unix.AF_INET6
|
||||||
|
rsa.Addr = ap.Addr().As16()
|
||||||
|
binary.BigEndian.PutUint16((*[2]byte)(unsafe.Pointer(&rsa.Port))[:], ap.Port())
|
||||||
|
sa = unsafe.Pointer(&rsa)
|
||||||
|
addrLen = syscall.SizeofSockaddrInet4
|
||||||
|
} else {
|
||||||
|
var rsa unix.RawSockaddrInet6
|
||||||
|
rsa.Family = unix.AF_INET6
|
||||||
|
rsa.Addr = ap.Addr().As16()
|
||||||
|
binary.BigEndian.PutUint16((*[2]byte)(unsafe.Pointer(&rsa.Port))[:], ap.Port())
|
||||||
|
sa = unsafe.Pointer(&rsa)
|
||||||
|
addrLen = syscall.SizeofSockaddrInet6
|
||||||
}
|
}
|
||||||
|
|
||||||
return rc.Control(func(fd uintptr) {
|
// Golang stdlib doesn't handle EAGAIN correctly in some situations so we do writes ourselves
|
||||||
err := syscall.SetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_BOUND_IF, 0)
|
// See https://github.com/golang/go/issues/73919
|
||||||
if err != nil {
|
for {
|
||||||
u.l.WithError(err).Error("Failed to rebind udp socket")
|
//_, _, err := unix.Syscall6(unix.SYS_SENDTO, u.sysFd, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0, sa, addrLen)
|
||||||
|
err := sendto(int(u.sysFd), b, 0, sa, addrLen)
|
||||||
|
if err == nil {
|
||||||
|
// Written, get out before the error handling
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
})
|
|
||||||
|
if errors.Is(err, syscall.EINTR) {
|
||||||
|
// Write was interrupted, retry
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, syscall.EAGAIN) {
|
||||||
|
return &net.OpError{Op: "sendto", Err: unix.EWOULDBLOCK}
|
||||||
|
}
|
||||||
|
|
||||||
|
if errors.Is(err, syscall.EBADF) {
|
||||||
|
return net.ErrClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
return &net.OpError{Op: "sendto", Err: err}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *StdConn) LocalAddr() (netip.AddrPort, error) {
|
||||||
|
a := u.UDPConn.LocalAddr()
|
||||||
|
|
||||||
|
switch v := a.(type) {
|
||||||
|
case *net.UDPAddr:
|
||||||
|
addr, ok := netip.AddrFromSlice(v.IP)
|
||||||
|
if !ok {
|
||||||
|
return netip.AddrPort{}, fmt.Errorf("LocalAddr returned invalid IP address: %s", v.IP)
|
||||||
|
}
|
||||||
|
return netip.AddrPortFrom(addr, uint16(v.Port)), nil
|
||||||
|
|
||||||
|
default:
|
||||||
|
return netip.AddrPort{}, fmt.Errorf("LocalAddr returned: %#v", a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *StdConn) ReloadConfig(c *config.C) {
|
||||||
|
// TODO
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewUDPStatsEmitter(udpConns []Conn) func() {
|
||||||
|
// No UDP stats for non-linux
|
||||||
|
return func() {}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *StdConn) ListenOut(r EncReader, lhf LightHouseHandlerFunc, cache *firewall.ConntrackCacheTicker, q int) {
|
||||||
|
plaintext := make([]byte, MTU)
|
||||||
|
buffer := make([]byte, MTU)
|
||||||
|
h := &header.H{}
|
||||||
|
fwPacket := &firewall.Packet{}
|
||||||
|
nb := make([]byte, 12, 12)
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Just read one packet at a time
|
||||||
|
n, rua, err := u.ReadFromUDPAddrPort(buffer)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, net.ErrClosed) {
|
||||||
|
u.l.WithError(err).Debug("udp socket is closed, exiting read loop")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
u.l.WithError(err).Error("unexpected udp socket receive error")
|
||||||
|
}
|
||||||
|
|
||||||
|
r(
|
||||||
|
netip.AddrPortFrom(rua.Addr().Unmap(), rua.Port()),
|
||||||
|
plaintext[:0],
|
||||||
|
buffer[:n],
|
||||||
|
h,
|
||||||
|
fwPacket,
|
||||||
|
lhf,
|
||||||
|
nb,
|
||||||
|
q,
|
||||||
|
cache.Get(u.l),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *StdConn) Rebind() error {
|
||||||
|
var err error
|
||||||
|
if u.isV4 {
|
||||||
|
err = syscall.SetsockoptInt(int(u.sysFd), syscall.IPPROTO_IP, syscall.IP_BOUND_IF, 0)
|
||||||
|
} else {
|
||||||
|
err = syscall.SetsockoptInt(int(u.sysFd), syscall.IPPROTO_IPV6, syscall.IPV6_BOUND_IF, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
u.l.WithError(err).Error("Failed to rebind udp socket")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|||||||
@ -1,6 +1,7 @@
|
|||||||
//go:build (!linux || android) && !e2e_testing
|
//go:build (!linux || android) && !e2e_testing && !darwin
|
||||||
// +build !linux android
|
// +build !linux android
|
||||||
// +build !e2e_testing
|
// +build !e2e_testing
|
||||||
|
// +build !darwin
|
||||||
|
|
||||||
// udp_generic implements the nebula UDP interface in pure Go stdlib. This
|
// udp_generic implements the nebula UDP interface in pure Go stdlib. This
|
||||||
// means it can be used on platforms like Darwin and Windows.
|
// means it can be used on platforms like Darwin and Windows.
|
||||||
|
|||||||
@ -243,7 +243,7 @@ func (u *StdConn) writeTo6(b []byte, ip netip.AddrPort) error {
|
|||||||
|
|
||||||
func (u *StdConn) writeTo4(b []byte, ip netip.AddrPort) error {
|
func (u *StdConn) writeTo4(b []byte, ip netip.AddrPort) error {
|
||||||
if !ip.Addr().Is4() {
|
if !ip.Addr().Is4() {
|
||||||
return fmt.Errorf("Listener is IPv4, but writing to IPv6 remote")
|
return ErrInvalidIPv6RemoteForSocket
|
||||||
}
|
}
|
||||||
|
|
||||||
var rsa unix.RawSockaddrInet4
|
var rsa unix.RawSockaddrInet4
|
||||||
|
|||||||
@ -95,6 +95,25 @@ func (u *RIOConn) bind(sa windows.Sockaddr) error {
|
|||||||
// Enable v4 for this socket
|
// Enable v4 for this socket
|
||||||
syscall.SetsockoptInt(syscall.Handle(u.sock), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, 0)
|
syscall.SetsockoptInt(syscall.Handle(u.sock), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, 0)
|
||||||
|
|
||||||
|
// Disable reporting of PORT_UNREACHABLE and NET_UNREACHABLE errors from the UDP socket receive call.
|
||||||
|
// These errors are returned on Windows during UDP receives based on the receipt of ICMP packets. Disable
|
||||||
|
// the UDP receive error returns with these ioctl calls.
|
||||||
|
ret := uint32(0)
|
||||||
|
flag := uint32(0)
|
||||||
|
size := uint32(unsafe.Sizeof(flag))
|
||||||
|
err = syscall.WSAIoctl(syscall.Handle(u.sock), syscall.SIO_UDP_CONNRESET, (*byte)(unsafe.Pointer(&flag)), size, nil, 0, &ret, nil, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
ret = 0
|
||||||
|
flag = 0
|
||||||
|
size = uint32(unsafe.Sizeof(flag))
|
||||||
|
SIO_UDP_NETRESET := uint32(syscall.IOC_IN | syscall.IOC_VENDOR | 15)
|
||||||
|
err = syscall.WSAIoctl(syscall.Handle(u.sock), SIO_UDP_NETRESET, (*byte)(unsafe.Pointer(&flag)), size, nil, 0, &ret, nil, 0)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
err = u.rx.Open()
|
err = u.rx.Open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -129,8 +148,12 @@ func (u *RIOConn) ListenOut(r EncReader, lhf LightHouseHandlerFunc, cache *firew
|
|||||||
// Just read one packet at a time
|
// Just read one packet at a time
|
||||||
n, rua, err := u.receive(buffer)
|
n, rua, err := u.receive(buffer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
u.l.WithError(err).Debug("udp socket is closed, exiting read loop")
|
if errors.Is(err, net.ErrClosed) {
|
||||||
return
|
u.l.WithError(err).Debug("udp socket is closed, exiting read loop")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
u.l.WithError(err).Error("unexpected udp socket receive error")
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
r(
|
r(
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user