Compare commits

..

30 Commits

Author SHA1 Message Date
Wade Simmons
d4aea03dd1 update 2025-04-02 10:41:21 -04:00
Wade Simmons
1c9fdba403 Merge remote-tracking branch 'origin/master' into mutex-debug 2025-04-02 09:22:18 -04:00
Wade Simmons
77eced39dd run smoke test with mutex_debug 2024-05-28 13:28:38 -04:00
Wade Simmons
1704d7f75a allow more locks 2024-05-28 13:22:47 -04:00
Wade Simmons
2030cbf018 Merge remote-tracking branch 'origin/master' into mutex-debug 2024-05-28 12:02:01 -04:00
Wade Simmons
dffaaf38d4 Merge branch 'lighthouse-query-chan-lock' into mutex-debug 2024-04-11 13:25:09 -04:00
Wade Simmons
f2251645bb chanDebug 2024-04-11 13:24:01 -04:00
Wade Simmons
2ff26b261d need to hold lock during cacheCb 2024-04-11 13:02:13 -04:00
Wade Simmons
c7f1bed882 avoid deadlock in lighthouse queryWorker
If the lighthouse queryWorker tries to grab to call StartHandshake on
a lighthouse vpnIp, we can deadlock on the handshake_manager lock. This
change drops the handshake_manager lock before we send on the lighthouse
queryChan (which could block), and also avoids sending to the channel if
this is a lighthouse IP itself.
2024-04-11 12:58:25 -04:00
Wade Simmons
0ccfad1a1e Merge remote-tracking branch 'origin/master' into mutex-debug 2024-04-11 12:15:52 -04:00
Wade Simmons
1be8dc43a7 more 2024-02-05 11:13:20 -05:00
Wade Simmons
94dd14c1a3 Merge remote-tracking branch 'origin/master' into mutex-debug 2024-01-31 09:19:21 -05:00
Wade Simmons
91ec6bb1ff Merge remote-tracking branch 'origin/master' into mutex-debug 2023-12-19 13:30:40 -05:00
Wade Simmons
26f7a9fd45 use terraform dag impl 2023-12-19 11:24:14 -05:00
Wade Simmons
6f27f46965 simplify 2023-12-19 09:10:00 -05:00
Wade Simmons
bcaefce4ac more types 2023-12-18 22:38:52 -05:00
Wade Simmons
540a171ef8 WIP more locks 2023-12-18 22:28:24 -05:00
Wade Simmons
4d88c0711a gofmt 2023-12-18 21:04:05 -05:00
Wade Simmons
5ce8279875 update to work with the latest locks 2023-12-18 21:01:26 -05:00
Wade Simmons
fdb78044ba Merge remote-tracking branch 'origin/master' into mutex-debug 2023-12-17 09:19:48 -05:00
Wade Simmons
4c89b3c6a3 cleanup 2023-08-21 13:09:25 -04:00
Wade Simmons
5cc43ea9cd Merge branch 'master' into mutex-debug 2023-08-21 12:42:36 -04:00
Wade Simmons
92c4245329 Merge remote-tracking branch 'origin/master' into mutex-debug 2023-05-09 12:01:44 -04:00
Wade Simmons
e5789770b1 keep track of what file/line the locks were grabbed on 2023-05-09 11:51:02 -04:00
Wade Simmons
a83f0ca470 Merge remote-tracking branch 'origin/master' into mutex-debug 2023-05-09 11:42:05 -04:00
Wade Simmons
90e9a8e42c use delete 2023-05-09 11:41:53 -04:00
Wade Simmons
9105eba939 also validate hostinfo locks 2023-05-09 11:22:55 -04:00
Wade Simmons
3e5e48f937 use mutex_debug during Github Actions run 2023-05-09 10:39:28 -04:00
Wade Simmons
afde2080d6 Merge remote-tracking branch 'origin/master' into mutex-debug 2023-05-09 10:29:37 -04:00
Wade Simmons
e6eeef785e mutex_debug
experimental test to see if we can have a test mode that verifies
mutexes lock in the order we want, while having no hit on production
performance. Since this uses a build tag, it should all compile out
during the build process and be a no-op unless the tag is set.
2023-05-08 11:17:14 -04:00
36 changed files with 501 additions and 490 deletions

View File

@@ -1,21 +1,13 @@
blank_issues_enabled: true
contact_links:
- name: 💨 Performance Issues
url: https://github.com/slackhq/nebula/discussions/new/choose
about: 'We ask that you create a discussion instead of an issue for performance-related questions. This allows us to have a more open conversation about the issue and helps us to better understand the problem.'
- name: 📄 Documentation Issues
url: https://github.com/definednet/nebula-docs
about: "If you've found an issue with the website documentation, please file it in the nebula-docs repository."
- name: 📱 Mobile Nebula Issues
url: https://github.com/definednet/mobile_nebula
about: "If you're using the mobile Nebula app and have found an issue, please file it in the mobile_nebula repository."
- name: 📘 Documentation
url: https://nebula.defined.net/docs/
about: 'The documentation is the best place to start if you are new to Nebula.'
about: Review documentation.
- name: 💁 Support/Chat
url: https://join.slack.com/t/nebulaoss/shared_invite/zt-2xqe6e7vn-k_KGi8s13nsr7cvHVvHvuQ
about: 'For faster support, join us on Slack for assistance!'
url: https://join.slack.com/t/nebulaoss/shared_invite/enQtOTA5MDI4NDg3MTg4LTkwY2EwNTI4NzQyMzc0M2ZlODBjNWI3NTY1MzhiOThiMmZlZjVkMTI0NGY4YTMyNjUwMWEyNzNkZTJmYzQxOGU
about: 'This issue tracker is not for support questions. Join us on Slack for assistance!'
- name: 📱 Mobile Nebula
url: https://github.com/definednet/mobile_nebula
about: 'This issue tracker is not for mobile support. Try the Mobile Nebula repo instead!'

View File

@@ -1,11 +0,0 @@
<!--
Thank you for taking the time to submit a pull request!
Please be sure to provide a clear description of what you're trying to achieve with the change.
- If you're submitting a new feature, please explain how to use it and document any new config options in the example config.
- If you're submitting a bugfix, please link the related issue or describe the circumstances surrounding the issue.
- If you're changing a default, explain why you believe the new default is appropriate for most users.
P.S. If you're only updating the README or other docs, please file a pull request here instead: https://github.com/DefinedNet/nebula-docs
-->

View File

@@ -26,7 +26,7 @@ jobs:
check-latest: true
- name: build
run: make bin-docker CGO_ENABLED=1 BUILD_ARGS=-race
run: make bin-docker CGO_ENABLED=1 BUILD_ARGS="-race -tags=mutex_debug"
- name: setup docker image
working-directory: ./.github/workflows/smoke

View File

@@ -40,7 +40,7 @@ jobs:
run: make test
- name: End 2 end
run: make e2evv
run: make e2e-mutex-debug TEST_LOGS=1 TEST_FLAGS=-v
- name: Build test mobile
run: make build-test-mobile

View File

@@ -63,6 +63,9 @@ ALL = $(ALL_LINUX) \
e2e:
$(TEST_ENV) go test -tags=e2e_testing -count=1 $(TEST_FLAGS) ./e2e
e2e-mutex-debug:
$(TEST_ENV) go test -tags=mutex_debug,e2e_testing -count=1 $(TEST_FLAGS) ./e2e
e2ev: TEST_FLAGS += -v
e2ev: e2e
@@ -215,6 +218,7 @@ ifeq ($(words $(MAKECMDGOALS)),1)
@$(MAKE) service ${.DEFAULT_GOAL} --no-print-directory
endif
bin-docker: BUILD_ARGS = -tags=mutex_debug
bin-docker: bin build/linux-amd64/nebula build/linux-amd64/nebula-cert
smoke-docker: bin-docker

View File

@@ -4,7 +4,7 @@ It lets you seamlessly connect computers anywhere in the world. Nebula is portab
It can be used to connect a small number of computers, but is also able to connect tens of thousands of computers.
Nebula incorporates a number of existing concepts like encryption, security groups, certificates,
and tunneling.
and tunneling, and each of those individual pieces existed before Nebula in various forms.
What makes Nebula different to existing offerings is that it brings all of these ideas together,
resulting in a sum that is greater than its individual parts.
@@ -28,33 +28,33 @@ Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for
#### Distribution Packages
- [Arch Linux](https://archlinux.org/packages/extra/x86_64/nebula/)
```sh
sudo pacman -S nebula
```
$ sudo pacman -S nebula
```
- [Fedora Linux](https://src.fedoraproject.org/rpms/nebula)
```sh
sudo dnf install nebula
```
$ sudo dnf install nebula
```
- [Debian Linux](https://packages.debian.org/source/stable/nebula)
```sh
sudo apt install nebula
```
$ sudo apt install nebula
```
- [Alpine Linux](https://pkgs.alpinelinux.org/packages?name=nebula)
```sh
sudo apk add nebula
```
$ sudo apk add nebula
```
- [macOS Homebrew](https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/n/nebula.rb)
```sh
brew install nebula
```
$ brew install nebula
```
- [Docker](https://hub.docker.com/r/nebulaoss/nebula)
```sh
docker pull nebulaoss/nebula
```
$ docker pull nebulaoss/nebula
```
#### Mobile
@@ -64,10 +64,10 @@ Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for
## Technical Overview
Nebula is a mutually authenticated peer-to-peer software-defined network based on the [Noise Protocol Framework](https://noiseprotocol.org/).
Nebula is a mutually authenticated peer-to-peer software defined network based on the [Noise Protocol Framework](https://noiseprotocol.org/).
Nebula uses certificates to assert a node's IP address, name, and membership within user-defined groups.
Nebula's user-defined groups allow for provider agnostic traffic filtering between nodes.
Discovery nodes (aka lighthouses) allow individual peers to find each other and optionally use UDP hole punching to establish connections from behind most firewalls or NATs.
Discovery nodes allow individual peers to find each other and optionally use UDP hole punching to establish connections from behind most firewalls or NATs.
Users can move data between nodes in any number of cloud service providers, datacenters, and endpoints, without needing to maintain a particular addressing scheme.
Nebula uses Elliptic-curve Diffie-Hellman (`ECDH`) key exchange and `AES-256-GCM` in its default configuration.
@@ -82,34 +82,28 @@ To set up a Nebula network, you'll need:
#### 2. (Optional, but you really should..) At least one discovery node with a routable IP address, which we call a lighthouse.
Nebula lighthouses allow nodes to find each other, anywhere in the world. A lighthouse is the only node in a Nebula network whose IP should not change. Running a lighthouse requires very few compute resources, and you can easily use the least expensive option from a cloud hosting provider. If you're not sure which provider to use, a number of us have used $6/mo [DigitalOcean](https://digitalocean.com) droplets as lighthouses.
Nebula lighthouses allow nodes to find each other, anywhere in the world. A lighthouse is the only node in a Nebula network whose IP should not change. Running a lighthouse requires very few compute resources, and you can easily use the least expensive option from a cloud hosting provider. If you're not sure which provider to use, a number of us have used $5/mo [DigitalOcean](https://digitalocean.com) droplets as lighthouses.
Once you have launched an instance, ensure that Nebula udp traffic (default port udp/4242) can reach it over the internet.
Once you have launched an instance, ensure that Nebula udp traffic (default port udp/4242) can reach it over the internet.
#### 3. A Nebula certificate authority, which will be the root of trust for a particular Nebula network.
```sh
./nebula-cert ca -name "Myorganization, Inc"
```
This will create files named `ca.key` and `ca.cert` in the current directory. The `ca.key` file is the most sensitive file you'll create, because it is the key used to sign the certificates for individual nebula nodes/hosts. Please store this file somewhere safe, preferably with strong encryption.
**Be aware!** By default, certificate authorities have a 1-year lifetime before expiration. See [this guide](https://nebula.defined.net/docs/guides/rotating-certificate-authority/) for details on rotating a CA.
```
./nebula-cert ca -name "Myorganization, Inc"
```
This will create files named `ca.key` and `ca.cert` in the current directory. The `ca.key` file is the most sensitive file you'll create, because it is the key used to sign the certificates for individual nebula nodes/hosts. Please store this file somewhere safe, preferably with strong encryption.
#### 4. Nebula host keys and certificates generated from that certificate authority
This assumes you have four nodes, named lighthouse1, laptop, server1, host3. You can name the nodes any way you'd like, including FQDN. You'll also need to choose IP addresses and the associated subnet. In this example, we are creating a nebula network that will use 192.168.100.x/24 as its network range. This example also demonstrates nebula groups, which can later be used to define traffic rules in a nebula network.
```sh
```
./nebula-cert sign -name "lighthouse1" -ip "192.168.100.1/24"
./nebula-cert sign -name "laptop" -ip "192.168.100.2/24" -groups "laptop,home,ssh"
./nebula-cert sign -name "server1" -ip "192.168.100.9/24" -groups "servers"
./nebula-cert sign -name "host3" -ip "192.168.100.10/24"
```
By default, host certificates will expire 1 second before the CA expires. Use the `-duration` flag to specify a shorter lifetime.
#### 5. Configuration files for each host
Download a copy of the nebula [example configuration](https://github.com/slackhq/nebula/blob/master/examples/config.yml).
* On the lighthouse node, you'll need to ensure `am_lighthouse: true` is set.
@@ -124,13 +118,10 @@ For each host, copy the nebula binary to the host, along with `config.yml` from
**DO NOT COPY `ca.key` TO INDIVIDUAL NODES.**
#### 7. Run nebula on each host
```sh
```
./nebula -config /path/to/config.yml
```
For more detailed instructions, [find the full documentation here](https://nebula.defined.net/docs/).
## Building Nebula from source
Make sure you have [go](https://go.dev/doc/install) installed and clone this repo. Change to the nebula directory.
@@ -149,10 +140,8 @@ The default curve used for cryptographic handshakes and signatures is Curve25519
In addition, Nebula can be built using the [BoringCrypto GOEXPERIMENT](https://github.com/golang/go/blob/go1.20/src/crypto/internal/boring/README.md) by running either of the following make targets:
```sh
make bin-boringcrypto
make release-boringcrypto
```
make bin-boringcrypto
make release-boringcrypto
This is not the recommended default deployment, but may be useful based on your compliance requirements.
@@ -160,3 +149,5 @@ This is not the recommended default deployment, but may be useful based on your
Nebula was created at Slack Technologies, Inc by Nate Brown and Ryan Huber, with contributions from Oliver Fross, Alan Lam, Wade Simmons, and Lining Wang.

View File

@@ -26,21 +26,21 @@ func TestNewArgon2Parameters(t *testing.T) {
}
func TestDecryptAndUnmarshalSigningPrivateKey(t *testing.T) {
passphrase := []byte("DO NOT USE")
passphrase := []byte("DO NOT USE THIS KEY")
privKey := []byte(`# A good key
-----BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
CjsKC0FFUy0yNTYtR0NNEiwIExCAgAQYAyAEKiCPoDfGQiosxNPTbPn5EsMlc2MI
c0Bt4oz6gTrFQhX3aBJcimhHKeAuhyTGvllD0Z19fe+DFPcLH3h5VrdjVfIAajg0
KrbV3n9UHif/Au5skWmquNJzoW1E4MTdRbvpti6o+WdQ49DxjBFhx0YH8LBqrbPU
0BGkUHmIO7daP24=
CjwKC0FFUy0yNTYtR0NNEi0IExCAgIABGAEgBCognnjujd67Vsv99p22wfAjQaDT
oCMW1mdjkU3gACKNW4MSXOWR9Sts4C81yk1RUku2gvGKs3TB9LYoklLsIizSYOLl
+Vs//O1T0I1Xbml2XBAROsb/VSoDln/6LMqR4B6fn6B3GOsLBBqRI8daDl9lRMPB
qrlJ69wer3ZUHFXA
-----END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
`)
shortKey := []byte(`# A key which, once decrypted, is too short
-----BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
CjsKC0FFUy0yNTYtR0NNEiwIExCAgAQYAyAEKiAVJwdfl3r+eqi/vF6S7OMdpjfo
hAzmTCRnr58Su4AqmBJbCv3zleYCEKYJP6UI3S8ekLMGISsgO4hm5leukCCyqT0Z
cQ76yrberpzkJKoPLGisX8f+xdy4aXSZl7oEYWQte1+vqbtl/eY9PGZhxUQdcyq7
hqzIyrRqfUgVuA==
CjwKC0FFUy0yNTYtR0NNEi0IExCAgIABGAEgBCoga5h8owMEBWRSMMJKzuUvWce7
k0qlBkQmCxiuLh80MuASW70YcKt8jeEIS2axo2V6zAKA9TSMcCsJW1kDDXEtL/xe
GLF5T7sDl5COp4LU3pGxpV+KoeQ/S3gQCAAcnaOtnJQX+aSDnbO3jCHyP7U9CHbs
rQr3bdH3Oy/WiYU=
-----END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
`)
invalidBanner := []byte(`# Invalid banner (not encrypted)

View File

@@ -243,7 +243,7 @@ func (c *C) GetInt(k string, d int) int {
// GetUint32 will get the uint32 for k or return the default d if not found or invalid
func (c *C) GetUint32(k string, d uint32) uint32 {
r := c.GetInt(k, int(d))
if r < 0 || uint64(r) > uint64(math.MaxUint32) {
if uint64(r) > uint64(math.MaxUint32) {
return d
}
return uint32(r)

View File

@@ -5,13 +5,11 @@ import (
"context"
"encoding/binary"
"net/netip"
"sync"
"time"
"github.com/rcrowley/go-metrics"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/header"
)
@@ -27,28 +25,16 @@ const (
sendTestPacket trafficDecision = 6
)
// LastCommunication tracks when we last communicated with a host
type LastCommunication struct {
timestamp time.Time
vpnIp netip.Addr // To help with logging
}
type connectionManager struct {
in map[uint32]struct{}
inLock *sync.RWMutex
inLock syncRWMutex
out map[uint32]struct{}
outLock *sync.RWMutex
outLock syncRWMutex
// relayUsed holds which relay localIndexs are in use
relayUsed map[uint32]struct{}
relayUsedLock *sync.RWMutex
// Track last communication with hosts
lastCommMap map[uint32]time.Time
lastCommLock *sync.RWMutex
inactivityTimer *LockingTimerWheel[uint32]
inactivityTimeout time.Duration
relayUsedLock syncRWMutex
hostMap *HostMap
trafficTimer *LockingTimerWheel[uint32]
@@ -73,15 +59,12 @@ func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface
nc := &connectionManager{
hostMap: intf.hostMap,
in: make(map[uint32]struct{}),
inLock: &sync.RWMutex{},
inLock: newSyncRWMutex("connection-manager-in"),
out: make(map[uint32]struct{}),
outLock: &sync.RWMutex{},
outLock: newSyncRWMutex("connection-manager-out"),
relayUsed: make(map[uint32]struct{}),
relayUsedLock: &sync.RWMutex{},
lastCommMap: make(map[uint32]time.Time),
lastCommLock: &sync.RWMutex{},
inactivityTimeout: 1 * time.Minute, // Default inactivity timeout: 10 minutes
trafficTimer: NewLockingTimerWheel[uint32](time.Millisecond*500, max),
relayUsedLock: newSyncRWMutex("connection-manager-relay-used"),
trafficTimer: NewLockingTimerWheel[uint32]("connection-manager-timer", time.Millisecond*500, max),
intf: intf,
pendingDeletion: make(map[uint32]struct{}),
checkInterval: checkInterval,
@@ -91,31 +74,10 @@ func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface
l: l,
}
// Initialize the inactivity timer wheel - make wheel duration slightly longer than the timeout
nc.inactivityTimer = NewLockingTimerWheel[uint32](time.Minute, nc.inactivityTimeout+time.Minute)
nc.Start(ctx)
return nc
}
func (n *connectionManager) updateLastCommunication(localIndex uint32) {
// Get host info to record VPN IP for better logging
hostInfo := n.hostMap.QueryIndex(localIndex)
if hostInfo == nil {
return
}
now := time.Now()
n.lastCommLock.Lock()
n.lastCommMap[localIndex] = now
n.lastCommLock.Unlock()
// Reset the inactivity timer for this host
n.inactivityTimer.m.Lock()
n.inactivityTimer.t.Add(localIndex, n.inactivityTimeout)
n.inactivityTimer.m.Unlock()
}
func (n *connectionManager) In(localIndex uint32) {
n.inLock.RLock()
// If this already exists, return
@@ -127,9 +89,6 @@ func (n *connectionManager) In(localIndex uint32) {
n.inLock.Lock()
n.in[localIndex] = struct{}{}
n.inLock.Unlock()
// Update last communication time
n.updateLastCommunication(localIndex)
}
func (n *connectionManager) Out(localIndex uint32) {
@@ -143,9 +102,6 @@ func (n *connectionManager) Out(localIndex uint32) {
n.outLock.Lock()
n.out[localIndex] = struct{}{}
n.outLock.Unlock()
// Update last communication time
n.updateLastCommunication(localIndex)
}
func (n *connectionManager) RelayUsed(localIndex uint32) {
@@ -187,134 +143,6 @@ func (n *connectionManager) AddTrafficWatch(localIndex uint32) {
n.outLock.Unlock()
}
// checkInactiveTunnels checks for tunnels that have been inactive for too long and drops them
func (n *connectionManager) checkInactiveTunnels() {
now := time.Now()
// First, advance the timer wheel to the current time
n.inactivityTimer.m.Lock()
n.inactivityTimer.t.Advance(now)
n.inactivityTimer.m.Unlock()
// Check for expired timers (inactive connections)
for {
// Get the next expired tunnel
n.inactivityTimer.m.Lock()
localIndex, ok := n.inactivityTimer.t.Purge()
n.inactivityTimer.m.Unlock()
if !ok {
// No more expired timers
break
}
n.lastCommLock.RLock()
lastComm, exists := n.lastCommMap[localIndex]
n.lastCommLock.RUnlock()
if !exists {
// No last communication record, odd but skip
continue
}
// Calculate inactivity duration
inactiveDuration := now.Sub(lastComm)
// Check if we've exceeded the inactivity timeout
if inactiveDuration >= n.inactivityTimeout {
// Get the host info (if it still exists)
hostInfo := n.hostMap.QueryIndex(localIndex)
if hostInfo == nil {
// Host info is gone, remove from our tracking map
n.lastCommLock.Lock()
delete(n.lastCommMap, localIndex)
n.lastCommLock.Unlock()
continue
}
// Log the inactivity and drop the tunnel
n.l.WithField("vpnIp", hostInfo.vpnAddrs[0]).
WithField("localIndex", localIndex).
WithField("inactiveDuration", inactiveDuration).
WithField("timeout", n.inactivityTimeout).
Info("Dropping tunnel due to inactivity")
// Close the tunnel using the existing mechanism
n.intf.closeTunnel(hostInfo)
// Clean up our tracking map
n.lastCommLock.Lock()
delete(n.lastCommMap, localIndex)
n.lastCommLock.Unlock()
} else {
// Re-add to the timer wheel with the remaining time
remainingTime := n.inactivityTimeout - inactiveDuration
n.inactivityTimer.m.Lock()
n.inactivityTimer.t.Add(localIndex, remainingTime)
n.inactivityTimer.m.Unlock()
}
}
}
// CleanupDeletedHostInfos removes entries from our lastCommMap for hosts that no longer exist
func (n *connectionManager) CleanupDeletedHostInfos() {
n.lastCommLock.Lock()
defer n.lastCommLock.Unlock()
// Find indexes to delete
var toDelete []uint32
for localIndex := range n.lastCommMap {
if n.hostMap.QueryIndex(localIndex) == nil {
toDelete = append(toDelete, localIndex)
}
}
// Delete them
for _, localIndex := range toDelete {
delete(n.lastCommMap, localIndex)
}
if len(toDelete) > 0 && n.l.Level >= logrus.DebugLevel {
n.l.WithField("count", len(toDelete)).Debug("Cleaned up deleted host entries from lastCommMap")
}
}
// ReloadConfig updates the connection manager configuration
func (n *connectionManager) ReloadConfig(c *config.C) {
// Get the inactivity timeout from config
inactivityTimeout := c.GetDuration("timers.inactivity_timeout", 10*time.Minute)
// Only update if different
if inactivityTimeout != n.inactivityTimeout {
n.l.WithField("old", n.inactivityTimeout).
WithField("new", inactivityTimeout).
Info("Updating inactivity timeout")
n.inactivityTimeout = inactivityTimeout
// Recreate the inactivity timer wheel with the new timeout
n.inactivityTimer = NewLockingTimerWheel[uint32](time.Minute, n.inactivityTimeout+time.Minute)
// Re-add all existing hosts to the new timer wheel
n.lastCommLock.RLock()
for localIndex, lastComm := range n.lastCommMap {
// Calculate remaining time based on last communication
now := time.Now()
elapsed := now.Sub(lastComm)
// If the elapsed time exceeds the new timeout, this will be caught
// in the next inactivity check. Otherwise, add with remaining time.
if elapsed < n.inactivityTimeout {
remainingTime := n.inactivityTimeout - elapsed
n.inactivityTimer.m.Lock()
n.inactivityTimer.t.Add(localIndex, remainingTime)
n.inactivityTimer.m.Unlock()
}
}
n.lastCommLock.RUnlock()
}
}
func (n *connectionManager) Start(ctx context.Context) {
go n.Run(ctx)
}
@@ -324,14 +152,6 @@ func (n *connectionManager) Run(ctx context.Context) {
clockSource := time.NewTicker(500 * time.Millisecond)
defer clockSource.Stop()
// Create ticker for inactivity checks (every minute)
inactivityTicker := time.NewTicker(time.Minute)
defer inactivityTicker.Stop()
// Create ticker for cleanup (every 5 minutes)
cleanupTicker := time.NewTicker(5 * time.Minute)
defer cleanupTicker.Stop()
p := []byte("")
nb := make([]byte, 12, 12)
out := make([]byte, mtu)
@@ -351,14 +171,6 @@ func (n *connectionManager) Run(ctx context.Context) {
n.doTrafficCheck(localIndex, p, nb, out, now)
}
case <-inactivityTicker.C:
// Check for inactive tunnels
n.checkInactiveTunnels()
case <-cleanupTicker.C:
// Periodically clean up deleted hosts
n.CleanupDeletedHostInfos()
}
}
}
@@ -672,12 +484,12 @@ func (n *connectionManager) sendPunch(hostinfo *HostInfo) {
if n.punchy.GetTargetEverything() {
hostinfo.remotes.ForEach(n.hostMap.GetPreferredRanges(), func(addr netip.AddrPort, preferred bool) {
n.metricsTxPunchy.Inc(1)
_ = n.intf.outside.WriteTo([]byte{1}, addr)
n.intf.outside.WriteTo([]byte{1}, addr)
})
} else if hostinfo.remote.IsValid() {
n.metricsTxPunchy.Inc(1)
_ = n.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
n.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
}
}
@@ -685,7 +497,7 @@ func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) {
cs := n.intf.pki.getCertState()
curCrt := hostinfo.ConnectionState.myCert
myCrt := cs.getCertificate(curCrt.Version())
if curCrt.Version() >= cs.initiatingVersion && bytes.Equal(curCrt.Signature(), myCrt.Signature()) == true {
if curCrt.Version() >= cs.defaultVersion && bytes.Equal(curCrt.Signature(), myCrt.Signature()) == true {
// The current tunnel is using the latest certificate and version, no need to rehandshake.
return
}

View File

@@ -44,10 +44,10 @@ func Test_NewConnectionManagerTest(t *testing.T) {
hostMap.preferredRanges.Store(&preferredRanges)
cs := &CertState{
initiatingVersion: cert.Version1,
privateKey: []byte{},
v1Cert: &dummyCert{version: cert.Version1},
v1HandshakeBytes: []byte{},
defaultVersion: cert.Version1,
privateKey: []byte{},
v1Cert: &dummyCert{version: cert.Version1},
v1HandshakeBytes: []byte{},
}
lh := newTestLighthouse()
@@ -126,10 +126,10 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
hostMap.preferredRanges.Store(&preferredRanges)
cs := &CertState{
initiatingVersion: cert.Version1,
privateKey: []byte{},
v1Cert: &dummyCert{version: cert.Version1},
v1HandshakeBytes: []byte{},
defaultVersion: cert.Version1,
privateKey: []byte{},
v1Cert: &dummyCert{version: cert.Version1},
v1HandshakeBytes: []byte{},
}
lh := newTestLighthouse()

View File

@@ -4,7 +4,6 @@ import (
"crypto/rand"
"encoding/json"
"fmt"
"sync"
"sync/atomic"
"github.com/flynn/noise"
@@ -24,7 +23,7 @@ type ConnectionState struct {
initiator bool
messageCounter atomic.Uint64
window *Bits
writeLock sync.Mutex
writeLock syncMutex
}
func NewConnectionState(l *logrus.Logger, cs *CertState, crt cert.Certificate, initiator bool, pattern noise.HandshakePattern) (*ConnectionState, error) {
@@ -76,6 +75,8 @@ func NewConnectionState(l *logrus.Logger, cs *CertState, crt cert.Certificate, i
initiator: initiator,
window: b,
myCert: crt,
writeLock: newSyncMutex("connection-state-write"),
}
// always start the counter from 2, as packet 1 and packet 2 are handshake packets.
ci.messageCounter.Add(2)

View File

@@ -131,7 +131,8 @@ func (c *Control) ListHostmapIndexes(pendingMap bool) []ControlHostInfo {
// GetCertByVpnIp returns the authenticated certificate of the given vpn IP, or nil if not found
func (c *Control) GetCertByVpnIp(vpnIp netip.Addr) cert.Certificate {
if c.f.myVpnAddrsTable.Contains(vpnIp) {
_, found := c.f.myVpnAddrsTable.Lookup(vpnIp)
if found {
// Only returning the default certificate since its impossible
// for any other host but ourselves to have more than 1
return c.f.pki.getCertState().GetDefaultCertificate().Copy()

View File

@@ -6,7 +6,6 @@ import (
"net/netip"
"strconv"
"strings"
"sync"
"github.com/gaissmai/bart"
"github.com/miekg/dns"
@@ -21,16 +20,17 @@ var dnsServer *dns.Server
var dnsAddr string
type dnsRecords struct {
sync.RWMutex
syncRWMutex
l *logrus.Logger
dnsMap4 map[string]netip.Addr
dnsMap6 map[string]netip.Addr
hostMap *HostMap
myVpnAddrsTable *bart.Lite
myVpnAddrsTable *bart.Table[struct{}]
}
func newDnsRecords(l *logrus.Logger, cs *CertState, hostMap *HostMap) *dnsRecords {
return &dnsRecords{
syncRWMutex: newSyncRWMutex("dns-records"),
l: l,
dnsMap4: make(map[string]netip.Addr),
dnsMap6: make(map[string]netip.Addr),
@@ -112,8 +112,8 @@ func (d *dnsRecords) isSelfNebulaOrLocalhost(addr string) bool {
return true
}
//if we found it in this table, it's good
return d.myVpnAddrsTable.Contains(b)
_, found := d.myVpnAddrsTable.Lookup(b)
return found //if we found it in this table, it's good
}
func (d *dnsRecords) parseQuery(m *dns.Msg, w dns.ResponseWriter) {

View File

@@ -13,11 +13,11 @@ pki:
# disconnect_invalid is a toggle to force a client to be disconnected if the certificate is expired or invalid.
#disconnect_invalid: true
# initiating_version controls which certificate version is used when initiating handshakes.
# default_version controls which certificate version is used in handshakes.
# This setting only applies if both a v1 and a v2 certificate are configured, in which case it will default to `1`.
# Once all hosts in the mesh are configured with both a v1 and v2 certificate then this should be changed to `2`.
# After all hosts in the mesh are using a v2 certificate then v1 certificates are no longer needed.
# initiating_version: 1
# default_version: 1
# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
@@ -275,10 +275,6 @@ tun:
# On linux only, set to true to manage unsafe routes directly on the system route table with gateway routes instead of
# in nebula configuration files. Default false, not reloadable.
#use_system_route_table: false
# Buffer size for reading routes updates. 0 means default system buffer size. (/proc/sys/net/core/rmem_default).
# If using massive routes updates, for example BGP, you may need to increase this value to avoid packet loss.
# SO_RCVBUFFORCE is used to avoid having to raise the system wide max
#use_system_route_table_buffer_size: 0
# Configure logging level
logging:

View File

@@ -5,12 +5,8 @@ import (
"fmt"
"log"
"net"
"os"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/overlay"
"github.com/slackhq/nebula/service"
)
@@ -63,16 +59,7 @@ pki:
if err := cfg.LoadString(configStr); err != nil {
return err
}
logger := logrus.New()
logger.Out = os.Stdout
ctrl, err := nebula.Main(&cfg, false, "custom-app", logger, overlay.NewUserDeviceFromConfig)
if err != nil {
return err
}
svc, err := service.New(ctrl)
svc, err := service.New(&cfg)
if err != nil {
return err
}

View File

@@ -10,7 +10,6 @@ import (
"reflect"
"strconv"
"strings"
"sync"
"time"
"github.com/gaissmai/bart"
@@ -53,7 +52,7 @@ type Firewall struct {
// routableNetworks describes the vpn addresses as well as any unsafe networks issued to us in the certificate.
// The vpn addresses are a full bit match while the unsafe networks only match the prefix
routableNetworks *bart.Lite
routableNetworks *bart.Table[struct{}]
// assignedNetworks is a list of vpn networks assigned to us in the certificate.
assignedNetworks []netip.Prefix
@@ -76,7 +75,7 @@ type firewallMetrics struct {
}
type FirewallConntrack struct {
sync.Mutex
syncMutex
Conns map[firewall.Packet]*conn
TimerWheel *TimerWheel[firewall.Packet]
@@ -125,7 +124,7 @@ type firewallPort map[int32]*FirewallCA
type firewallLocalCIDR struct {
Any bool
LocalCIDR *bart.Lite
LocalCIDR *bart.Table[struct{}]
}
// NewFirewall creates a new Firewall object. A TimerWheel is created for you from the provided timeouts.
@@ -148,22 +147,23 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D
tmax = defaultTimeout
}
routableNetworks := new(bart.Lite)
routableNetworks := new(bart.Table[struct{}])
var assignedNetworks []netip.Prefix
for _, network := range c.Networks() {
nprefix := netip.PrefixFrom(network.Addr(), network.Addr().BitLen())
routableNetworks.Insert(nprefix)
routableNetworks.Insert(nprefix, struct{}{})
assignedNetworks = append(assignedNetworks, network)
}
hasUnsafeNetworks := false
for _, n := range c.UnsafeNetworks() {
routableNetworks.Insert(n)
routableNetworks.Insert(n, struct{}{})
hasUnsafeNetworks = true
}
return &Firewall{
Conntrack: &FirewallConntrack{
syncMutex: newSyncMutex("firewall-conntrack"),
Conns: make(map[firewall.Packet]*conn),
TimerWheel: NewTimerWheel[firewall.Packet](tmin, tmax),
},
@@ -431,7 +431,8 @@ func (f *Firewall) Drop(fp firewall.Packet, incoming bool, h *HostInfo, caPool *
// Make sure remote address matches nebula certificate
if h.networks != nil {
if !h.networks.Contains(fp.RemoteAddr) {
_, ok := h.networks.Lookup(fp.RemoteAddr)
if !ok {
f.metrics(incoming).droppedRemoteAddr.Inc(1)
return ErrInvalidRemoteIP
}
@@ -444,7 +445,8 @@ func (f *Firewall) Drop(fp firewall.Packet, incoming bool, h *HostInfo, caPool *
}
// Make sure we are supposed to be handling this local ip address
if !f.routableNetworks.Contains(fp.LocalAddr) {
_, ok := f.routableNetworks.Lookup(fp.LocalAddr)
if !ok {
f.metrics(incoming).droppedLocalAddr.Inc(1)
return ErrInvalidLocalIP
}
@@ -750,7 +752,7 @@ func (fc *FirewallCA) match(p firewall.Packet, c *cert.CachedCertificate, caPool
func (fr *FirewallRule) addRule(f *Firewall, groups []string, host string, ip, localCIDR netip.Prefix) error {
flc := func() *firewallLocalCIDR {
return &firewallLocalCIDR{
LocalCIDR: new(bart.Lite),
LocalCIDR: new(bart.Table[struct{}]),
}
}
@@ -877,7 +879,7 @@ func (flc *firewallLocalCIDR) addRule(f *Firewall, localIp netip.Prefix) error {
}
for _, network := range f.assignedNetworks {
flc.LocalCIDR.Insert(network)
flc.LocalCIDR.Insert(network, struct{}{})
}
return nil
@@ -886,7 +888,7 @@ func (flc *firewallLocalCIDR) addRule(f *Firewall, localIp netip.Prefix) error {
return nil
}
flc.LocalCIDR.Insert(localIp)
flc.LocalCIDR.Insert(localIp, struct{}{})
return nil
}
@@ -899,7 +901,8 @@ func (flc *firewallLocalCIDR) match(p firewall.Packet, c *cert.CachedCertificate
return true
}
return flc.LocalCIDR.Contains(p.LocalAddr)
_, ok := flc.LocalCIDR.Lookup(p.LocalAddr)
return ok
}
type rule struct {

21
go.mod
View File

@@ -1,6 +1,6 @@
module github.com/slackhq/nebula
go 1.23.0
go 1.23.6
toolchain go1.24.1
@@ -8,28 +8,30 @@ require (
dario.cat/mergo v1.0.1
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
github.com/armon/go-radix v1.0.0
github.com/clarkmcc/go-dag v0.0.0-20220908000337-9c3ba5b365fc
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
github.com/flynn/noise v1.1.0
github.com/gaissmai/bart v0.20.4
github.com/gaissmai/bart v0.20.1
github.com/gogo/protobuf v1.3.2
github.com/google/gopacket v1.1.19
github.com/kardianos/service v1.2.2
github.com/miekg/dns v1.1.65
github.com/miekg/dns v1.1.64
github.com/miekg/pkcs11 v1.1.2-0.20231115102856-9078ad6b9d4b
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f
github.com/prometheus/client_golang v1.22.0
github.com/prometheus/client_golang v1.21.1
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
github.com/sirupsen/logrus v1.9.3
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6
github.com/stretchr/testify v1.10.0
github.com/timandy/routine v1.1.5
github.com/vishvananda/netlink v1.3.0
golang.org/x/crypto v0.37.0
golang.org/x/crypto v0.36.0
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090
golang.org/x/net v0.39.0
golang.org/x/sync v0.13.0
golang.org/x/sys v0.32.0
golang.org/x/term v0.31.0
golang.org/x/net v0.38.0
golang.org/x/sync v0.12.0
golang.org/x/sys v0.31.0
golang.org/x/term v0.30.0
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b
golang.zx2c4.com/wireguard/windows v0.5.3
@@ -43,6 +45,7 @@ require (
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/google/btree v1.1.2 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.6.1 // indirect

46
go.sum
View File

@@ -17,6 +17,8 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/clarkmcc/go-dag v0.0.0-20220908000337-9c3ba5b365fc h1:6e91sWiDE69Jl0WUsY/LvTCBPRBe6b2j8H7W96JGJ4s=
github.com/clarkmcc/go-dag v0.0.0-20220908000337-9c3ba5b365fc/go.mod h1:RGIcF96ORCYAsdz60Ou9mPBNa4+DjoQFS8nelPniFoY=
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 h1:M5QgkYacWj0Xs8MhpIK/5uwU02icXpEoSo9sM2aRCps=
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.mod h1:xwIwAxMvYnVrGJPe2FKx5prTrnAjGOD8zvDOnxnrrkM=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -24,8 +26,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
github.com/gaissmai/bart v0.20.4 h1:Ik47r1fy3jRVU+1eYzKSW3ho2UgBVTVnUS8O993584U=
github.com/gaissmai/bart v0.20.4/go.mod h1:cEed+ge8dalcbpi8wtS9x9m2hn/fNJH5suhdGQOHnYk=
github.com/gaissmai/bart v0.20.1 h1:igNss0zDsSY8e+ophKgD9KJVPKBOo7uSVjyKCL7nIzo=
github.com/gaissmai/bart v0.20.1/go.mod h1:JJzMAhNF5Rjo4SF4jWBrANuJfqY+FvsFhW7t1UZJ+XY=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
@@ -53,8 +55,8 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
@@ -68,8 +70,8 @@ github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX
github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
@@ -83,8 +85,8 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.1.65 h1:0+tIPHzUW0GCge7IiK3guGP57VAw7hoPDfApjkMD1Fc=
github.com/miekg/dns v1.1.65/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck=
github.com/miekg/dns v1.1.64 h1:wuZgD9wwCE6XMT05UU/mlSko71eRSXEAm2EbjQXLKnQ=
github.com/miekg/dns v1.1.64/go.mod h1:Dzw9769uoKVaLuODMDZz9M6ynFU6Em65csPuoi8G0ck=
github.com/miekg/pkcs11 v1.1.2-0.20231115102856-9078ad6b9d4b h1:J/AzCvg5z0Hn1rqZUJjpbzALUmkKX0Zwbc/i4fw7Sfk=
github.com/miekg/pkcs11 v1.1.2-0.20231115102856-9078ad6b9d4b/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -106,8 +108,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0=
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -145,6 +147,10 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/timandy/routine v1.1.1 h1:6/Z7qLFZj3GrzuRksBFzIG8YGUh8CLhjnnMePBQTrEI=
github.com/timandy/routine v1.1.1/go.mod h1:OZHPOKSvqL/ZvqXFkNZyit0xIVelERptYXdAHH00adQ=
github.com/timandy/routine v1.1.5 h1:LSpm7Iijwb9imIPlucl4krpr2EeCeAUvifiQ9Uf5X+M=
github.com/timandy/routine v1.1.5/go.mod h1:kXslgIosdY8LW0byTyPnenDgn4/azt2euufAq9rK51w=
github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk=
github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
@@ -156,8 +162,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 h1:Di6/M8l0O2lCLc6VVRWhgCiApHV8MnQurBnFSHsQtNY=
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
@@ -176,8 +182,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -185,8 +191,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -204,11 +210,11 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=

View File

@@ -25,7 +25,7 @@ func ixHandshakeStage0(f *Interface, hh *HandshakeHostInfo) bool {
// If we're connecting to a v6 address we must use a v2 cert
cs := f.pki.getCertState()
v := cs.initiatingVersion
v := cs.defaultVersion
for _, a := range hh.hostinfo.vpnAddrs {
if a.Is6() {
v = cert.Version2
@@ -101,7 +101,7 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
if crt == nil {
f.l.WithField("udpAddr", addr).
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).
WithField("certVersion", cs.initiatingVersion).
WithField("certVersion", cs.defaultVersion).
Error("Unable to handshake with host because no certificate is available")
}
@@ -192,7 +192,8 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
for _, network := range remoteCert.Certificate.Networks() {
vpnAddr := network.Addr()
if f.myVpnAddrsTable.Contains(vpnAddr) {
_, found := f.myVpnAddrsTable.Lookup(vpnAddr)
if found {
f.l.WithField("vpnAddr", vpnAddr).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("certVersion", certVersion).
@@ -203,7 +204,7 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
}
// vpnAddrs outside our vpn networks are of no use to us, filter them out
if !f.myVpnNetworksTable.Contains(vpnAddr) {
if _, ok := f.myVpnNetworksTable.Lookup(vpnAddr); !ok {
continue
}
@@ -242,6 +243,7 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
}
hostinfo := &HostInfo{
syncRWMutex: newSyncRWMutex("hostinfo"),
ConnectionState: ci,
localIndexId: myIndex,
remoteIndexId: hs.Details.InitiatorIndex,
@@ -249,6 +251,7 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
HandshakePacket: make(map[uint8][]byte, 0),
lastHandshakeTime: hs.Details.Time,
relayState: RelayState{
syncRWMutex: newSyncRWMutex("relay-state"),
relays: map[netip.Addr]struct{}{},
relayForByAddr: map[netip.Addr]*Relay{},
relayForByIdx: map[uint32]*Relay{},
@@ -578,7 +581,7 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
for _, network := range vpnNetworks {
// vpnAddrs outside our vpn networks are of no use to us, filter them out
vpnAddr := network.Addr()
if !f.myVpnNetworksTable.Contains(vpnAddr) {
if _, ok := f.myVpnNetworksTable.Lookup(vpnAddr); !ok {
continue
}

View File

@@ -8,7 +8,6 @@ import (
"errors"
"net/netip"
"slices"
"sync"
"time"
"github.com/rcrowley/go-metrics"
@@ -45,7 +44,7 @@ type HandshakeConfig struct {
type HandshakeManager struct {
// Mutex for interacting with the vpnIps and indexes maps
sync.RWMutex
syncRWMutex
vpnIps map[netip.Addr]*HandshakeHostInfo
indexes map[uint32]*HandshakeHostInfo
@@ -66,7 +65,7 @@ type HandshakeManager struct {
}
type HandshakeHostInfo struct {
sync.Mutex
syncMutex
startTime time.Time // Time that we first started trying with this handshake
ready bool // Is the handshake ready
@@ -104,6 +103,7 @@ func (hh *HandshakeHostInfo) cachePacket(l *logrus.Logger, t header.MessageType,
func NewHandshakeManager(l *logrus.Logger, mainHostMap *HostMap, lightHouse *LightHouse, outside udp.Conn, config HandshakeConfig) *HandshakeManager {
return &HandshakeManager{
syncRWMutex: newSyncRWMutex("handshake-manager"),
vpnIps: map[netip.Addr]*HandshakeHostInfo{},
indexes: map[uint32]*HandshakeHostInfo{},
mainHostMap: mainHostMap,
@@ -111,7 +111,7 @@ func NewHandshakeManager(l *logrus.Logger, mainHostMap *HostMap, lightHouse *Lig
outside: outside,
config: config,
trigger: make(chan netip.Addr, config.triggerBuffer),
OutboundHandshakeTimer: NewLockingTimerWheel[netip.Addr](config.tryInterval, hsTimeout(config.retries, config.tryInterval)),
OutboundHandshakeTimer: NewLockingTimerWheel[netip.Addr]("handshake-manager-timer", config.tryInterval, hsTimeout(config.retries, config.tryInterval)),
messageMetrics: config.messageMetrics,
metricInitiated: metrics.GetOrRegisterCounter("handshake_manager.initiated", nil),
metricTimedOut: metrics.GetOrRegisterCounter("handshake_manager.timed_out", nil),
@@ -274,7 +274,8 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
}
// Don't relay through the host I'm trying to connect to
if hm.f.myVpnAddrsTable.Contains(relay) {
_, found := hm.f.myVpnAddrsTable.Lookup(relay)
if found {
continue
}
@@ -447,9 +448,11 @@ func (hm *HandshakeManager) StartHandshake(vpnAddr netip.Addr, cacheCb func(*Han
}
hostinfo := &HostInfo{
syncRWMutex: newSyncRWMutex("hostinfo"),
vpnAddrs: []netip.Addr{vpnAddr},
HandshakePacket: make(map[uint8][]byte, 0),
relayState: RelayState{
syncRWMutex: newSyncRWMutex("relay-state"),
relays: map[netip.Addr]struct{}{},
relayForByAddr: map[netip.Addr]*Relay{},
relayForByIdx: map[uint32]*Relay{},
@@ -457,6 +460,7 @@ func (hm *HandshakeManager) StartHandshake(vpnAddr netip.Addr, cacheCb func(*Han
}
hh := &HandshakeHostInfo{
syncMutex: newSyncMutex("handshake-hostinfo"),
hostinfo: hostinfo,
startTime: time.Now(),
}

View File

@@ -24,10 +24,10 @@ func Test_NewHandshakeManagerVpnIp(t *testing.T) {
lh := newTestLighthouse()
cs := &CertState{
initiatingVersion: cert.Version1,
privateKey: []byte{},
v1Cert: &dummyCert{version: cert.Version1},
v1HandshakeBytes: []byte{},
defaultVersion: cert.Version1,
privateKey: []byte{},
v1Cert: &dummyCert{version: cert.Version1},
v1HandshakeBytes: []byte{},
}
blah := NewHandshakeManager(l, mainHM, lh, &udp.NoopConn{}, defaultHandshakeConfig)
@@ -98,5 +98,5 @@ func (mw *mockEncWriter) GetHostInfo(_ netip.Addr) *HostInfo {
}
func (mw *mockEncWriter) GetCertState() *CertState {
return &CertState{initiatingVersion: cert.Version2}
return &CertState{defaultVersion: cert.Version2}
}

View File

@@ -4,7 +4,6 @@ import (
"errors"
"net"
"net/netip"
"sync"
"sync/atomic"
"time"
@@ -53,7 +52,7 @@ type Relay struct {
}
type HostMap struct {
sync.RWMutex //Because we concurrently read and write to our maps
syncRWMutex //Because we concurrently read and write to our maps
Indexes map[uint32]*HostInfo
Relays map[uint32]*HostInfo // Maps a Relay IDX to a Relay HostInfo object
RemoteIndexes map[uint32]*HostInfo
@@ -66,7 +65,7 @@ type HostMap struct {
// struct, make a copy of an existing value, edit the fileds in the copy, and
// then store a pointer to the new copy in both realyForBy* maps.
type RelayState struct {
sync.RWMutex
syncRWMutex
relays map[netip.Addr]struct{} // Set of vpnAddr's of Hosts to use as relays to access this peer
// For data race avoidance, the contents of a *Relay are treated immutably. To update a *Relay, copy the existing data,
@@ -209,6 +208,7 @@ func (rs *RelayState) InsertRelay(ip netip.Addr, idx uint32, r *Relay) {
}
type HostInfo struct {
syncRWMutex
remote netip.AddrPort
remotes *RemoteList
promoteCounter atomic.Uint32
@@ -223,7 +223,7 @@ type HostInfo struct {
recvError atomic.Uint32
// networks are both all vpn and unsafe networks assigned to this host
networks *bart.Lite
networks *bart.Table[struct{}]
relayState RelayState
// HandshakePacket records the packets used to create this hostinfo
@@ -288,6 +288,7 @@ func NewHostMapFromConfig(l *logrus.Logger, c *config.C) *HostMap {
func newHostMap(l *logrus.Logger) *HostMap {
return &HostMap{
syncRWMutex: newSyncRWMutex("hostmap"),
Indexes: map[uint32]*HostInfo{},
Relays: map[uint32]*HostInfo{},
RemoteIndexes: map[uint32]*HostInfo{},
@@ -732,13 +733,13 @@ func (i *HostInfo) buildNetworks(networks, unsafeNetworks []netip.Prefix) {
return
}
i.networks = new(bart.Lite)
i.networks = new(bart.Table[struct{}])
for _, network := range networks {
i.networks.Insert(network)
i.networks.Insert(network, struct{}{})
}
for _, network := range unsafeNetworks {
i.networks.Insert(network)
i.networks.Insert(network, struct{}{})
}
}

View File

@@ -22,12 +22,14 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
// Ignore local broadcast packets
if f.dropLocalBroadcast {
if f.myBroadcastAddrsTable.Contains(fwPacket.RemoteAddr) {
_, found := f.myBroadcastAddrsTable.Lookup(fwPacket.RemoteAddr)
if found {
return
}
}
if f.myVpnAddrsTable.Contains(fwPacket.RemoteAddr) {
_, found := f.myVpnAddrsTable.Lookup(fwPacket.RemoteAddr)
if found {
// Immediately forward packets from self to self.
// This should only happen on Darwin-based and FreeBSD hosts, which
// routes packets from the Nebula addr to the Nebula addr through the Nebula
@@ -128,7 +130,8 @@ func (f *Interface) Handshake(vpnAddr netip.Addr) {
// getOrHandshakeNoRouting returns nil if the vpnAddr is not routable.
// If the 2nd return var is false then the hostinfo is not ready to be used in a tunnel
func (f *Interface) getOrHandshakeNoRouting(vpnAddr netip.Addr, cacheCallback func(*HandshakeHostInfo)) (*HostInfo, bool) {
if f.myVpnNetworksTable.Contains(vpnAddr) {
_, found := f.myVpnNetworksTable.Lookup(vpnAddr)
if found {
return f.handshakeManager.GetOrHandshake(vpnAddr, cacheCallback)
}

View File

@@ -61,11 +61,11 @@ type Interface struct {
serveDns bool
createTime time.Time
lightHouse *LightHouse
myBroadcastAddrsTable *bart.Lite
myVpnAddrs []netip.Addr // A list of addresses assigned to us via our certificate
myVpnAddrsTable *bart.Lite
myVpnNetworks []netip.Prefix // A list of networks assigned to us via our certificate
myVpnNetworksTable *bart.Lite
myBroadcastAddrsTable *bart.Table[struct{}]
myVpnAddrs []netip.Addr // A list of addresses assigned to us via our certificate
myVpnAddrsTable *bart.Table[struct{}] // A table of addresses assigned to us via our certificate
myVpnNetworks []netip.Prefix // A list of networks assigned to us via our certificate
myVpnNetworksTable *bart.Table[struct{}] // A table of networks assigned to us via our certificate
dropLocalBroadcast bool
dropMulticast bool
routines int
@@ -410,7 +410,7 @@ func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
udpStats := udp.NewUDPStatsEmitter(f.writers)
certExpirationGauge := metrics.GetOrRegisterGauge("certificate.ttl_seconds", nil)
certInitiatingVersion := metrics.GetOrRegisterGauge("certificate.initiating_version", nil)
certDefaultVersion := metrics.GetOrRegisterGauge("certificate.default_version", nil)
certMaxVersion := metrics.GetOrRegisterGauge("certificate.max_version", nil)
for {
@@ -425,7 +425,7 @@ func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
certState := f.pki.getCertState()
defaultCrt := certState.GetDefaultCertificate()
certExpirationGauge.Update(int64(defaultCrt.NotAfter().Sub(time.Now()) / time.Second))
certInitiatingVersion.Update(int64(defaultCrt.Version()))
certDefaultVersion.Update(int64(defaultCrt.Version()))
// Report the max certificate version we are capable of using
if certState.v2Cert != nil {

View File

@@ -9,7 +9,6 @@ import (
"net/netip"
"slices"
"strconv"
"sync"
"sync/atomic"
"time"
@@ -27,12 +26,12 @@ var ErrHostNotKnown = errors.New("host not known")
type LightHouse struct {
//TODO: We need a timer wheel to kick out vpnAddrs that haven't reported in a long time
sync.RWMutex //Because we concurrently read and write to our maps
syncRWMutex //Because we concurrently read and write to our maps
ctx context.Context
amLighthouse bool
myVpnNetworks []netip.Prefix
myVpnNetworksTable *bart.Lite
myVpnNetworksTable *bart.Table[struct{}]
punchConn udp.Conn
punchy *Punchy
@@ -96,6 +95,7 @@ func NewLightHouseFromConfig(ctx context.Context, l *logrus.Logger, c *config.C,
}
h := LightHouse{
syncRWMutex: newSyncRWMutex("lighthouse"),
ctx: ctx,
amLighthouse: amLighthouse,
myVpnNetworks: cs.myVpnNetworks,
@@ -201,7 +201,8 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
//TODO: we could technically insert all returned addrs instead of just the first one if a dns lookup was used
addr := addrs[0].Unmap()
if lh.myVpnNetworksTable.Contains(addr) {
_, found := lh.myVpnNetworksTable.Lookup(addr)
if found {
lh.l.WithField("addr", rawAddr).WithField("entry", i+1).
Warn("Ignoring lighthouse.advertise_addrs report because it is within the nebula network range")
continue
@@ -358,7 +359,8 @@ func (lh *LightHouse) parseLighthouses(c *config.C, lhMap map[netip.Addr]struct{
return util.NewContextualError("Unable to parse lighthouse host entry", m{"host": host, "entry": i + 1}, err)
}
if !lh.myVpnNetworksTable.Contains(addr) {
_, found := lh.myVpnNetworksTable.Lookup(addr)
if !found {
return util.NewContextualError("lighthouse host is not in our networks, invalid", m{"vpnAddr": addr, "networks": lh.myVpnNetworks}, nil)
}
lhMap[addr] = struct{}{}
@@ -429,7 +431,8 @@ func (lh *LightHouse) loadStaticMap(c *config.C, staticList map[netip.Addr]struc
return util.NewContextualError("Unable to parse static_host_map entry", m{"host": k, "entry": i + 1}, err)
}
if !lh.myVpnNetworksTable.Contains(vpnAddr) {
_, found := lh.myVpnNetworksTable.Lookup(vpnAddr)
if !found {
return util.NewContextualError("static_host_map key is not in our network, invalid", m{"vpnAddr": vpnAddr, "networks": lh.myVpnNetworks, "entry": i + 1}, nil)
}
@@ -472,6 +475,7 @@ func (lh *LightHouse) QueryServer(vpnAddr netip.Addr) {
return
}
chanDebugSend("lighthouse-query-chan")
lh.queryChan <- vpnAddr
}
@@ -650,7 +654,8 @@ func (lh *LightHouse) shouldAdd(vpnAddr netip.Addr, to netip.Addr) bool {
return false
}
if lh.myVpnNetworksTable.Contains(to) {
_, found := lh.myVpnNetworksTable.Lookup(to)
if found {
return false
}
@@ -670,7 +675,8 @@ func (lh *LightHouse) unlockedShouldAddV4(vpnAddr netip.Addr, to *V4AddrPort) bo
return false
}
if lh.myVpnNetworksTable.Contains(udpAddr.Addr()) {
_, found := lh.myVpnNetworksTable.Lookup(udpAddr.Addr())
if found {
return false
}
@@ -690,7 +696,8 @@ func (lh *LightHouse) unlockedShouldAddV6(vpnAddr netip.Addr, to *V6AddrPort) bo
return false
}
if lh.myVpnNetworksTable.Contains(udpAddr.Addr()) {
_, found := lh.myVpnNetworksTable.Lookup(udpAddr.Addr())
if found {
return false
}
@@ -725,6 +732,8 @@ func (lh *LightHouse) startQueryWorker() {
nb := make([]byte, 12, 12)
out := make([]byte, mtu)
chanDebugRecv("lighthouse-query-chan")
for {
select {
case <-lh.ctx.Done():
@@ -757,7 +766,7 @@ func (lh *LightHouse) innerQueryServer(addr netip.Addr, nb, out []byte) {
if hi != nil {
v = hi.ConnectionState.myCert.Version()
} else {
v = lh.ifce.GetCertState().initiatingVersion
v = lh.ifce.GetCertState().defaultVersion
}
if v == cert.Version1 {
@@ -850,7 +859,8 @@ func (lh *LightHouse) SendUpdate() {
lal := lh.GetLocalAllowList()
for _, e := range localAddrs(lh.l, lal) {
if lh.myVpnNetworksTable.Contains(e) {
_, found := lh.myVpnNetworksTable.Lookup(e)
if found {
continue
}
@@ -876,7 +886,7 @@ func (lh *LightHouse) SendUpdate() {
if hi != nil {
v = hi.ConnectionState.myCert.Version()
} else {
v = lh.ifce.GetCertState().initiatingVersion
v = lh.ifce.GetCertState().defaultVersion
}
if v == cert.Version1 {
if v1Update == nil {
@@ -1107,7 +1117,7 @@ func (lhh *LightHouseHandler) sendHostPunchNotification(n *NebulaMeta, fromVpnAd
targetHI := lhh.lh.ifce.GetHostInfo(punchNotifDest)
var useVersion cert.Version
if targetHI == nil {
useVersion = lhh.lh.ifce.GetCertState().initiatingVersion
useVersion = lhh.lh.ifce.GetCertState().defaultVersion
} else {
crt := targetHI.GetCert().Certificate
useVersion = crt.Version()

View File

@@ -31,8 +31,8 @@ func TestOldIPv4Only(t *testing.T) {
func Test_lhStaticMapping(t *testing.T) {
l := test.NewLogger()
myVpnNet := netip.MustParsePrefix("10.128.0.1/16")
nt := new(bart.Lite)
nt.Insert(myVpnNet)
nt := new(bart.Table[struct{}])
nt.Insert(myVpnNet, struct{}{})
cs := &CertState{
myVpnNetworks: []netip.Prefix{myVpnNet},
myVpnNetworksTable: nt,
@@ -56,8 +56,8 @@ func Test_lhStaticMapping(t *testing.T) {
func TestReloadLighthouseInterval(t *testing.T) {
l := test.NewLogger()
myVpnNet := netip.MustParsePrefix("10.128.0.1/16")
nt := new(bart.Lite)
nt.Insert(myVpnNet)
nt := new(bart.Table[struct{}])
nt.Insert(myVpnNet, struct{}{})
cs := &CertState{
myVpnNetworks: []netip.Prefix{myVpnNet},
myVpnNetworksTable: nt,
@@ -91,8 +91,8 @@ func TestReloadLighthouseInterval(t *testing.T) {
func BenchmarkLighthouseHandleRequest(b *testing.B) {
l := test.NewLogger()
myVpnNet := netip.MustParsePrefix("10.128.0.1/0")
nt := new(bart.Lite)
nt.Insert(myVpnNet)
nt := new(bart.Table[struct{}])
nt.Insert(myVpnNet, struct{}{})
cs := &CertState{
myVpnNetworks: []netip.Prefix{myVpnNet},
myVpnNetworksTable: nt,
@@ -196,8 +196,8 @@ func TestLighthouse_Memory(t *testing.T) {
c.Settings["listen"] = map[string]any{"port": 4242}
myVpnNet := netip.MustParsePrefix("10.128.0.1/24")
nt := new(bart.Lite)
nt.Insert(myVpnNet)
nt := new(bart.Table[struct{}])
nt.Insert(myVpnNet, struct{}{})
cs := &CertState{
myVpnNetworks: []netip.Prefix{myVpnNet},
myVpnNetworksTable: nt,
@@ -281,8 +281,8 @@ func TestLighthouse_reload(t *testing.T) {
c.Settings["listen"] = map[string]any{"port": 4242}
myVpnNet := netip.MustParsePrefix("10.128.0.1/24")
nt := new(bart.Lite)
nt.Insert(myVpnNet)
nt := new(bart.Table[struct{}])
nt.Insert(myVpnNet, struct{}{})
cs := &CertState{
myVpnNetworks: []netip.Prefix{myVpnNet},
myVpnNetworksTable: nt,
@@ -417,7 +417,7 @@ func (tw *testEncWriter) GetHostInfo(vpnIp netip.Addr) *HostInfo {
}
func (tw *testEncWriter) GetCertState() *CertState {
return &CertState{initiatingVersion: tw.protocolVersion}
return &CertState{defaultVersion: tw.protocolVersion}
}
// assertIp4InArray asserts every address in want is at the same position in have and that the lengths match

195
mutex_debug.go Normal file
View File

@@ -0,0 +1,195 @@
//go:build mutex_debug
// +build mutex_debug
package nebula
import (
"fmt"
"runtime"
"sync"
"github.com/clarkmcc/go-dag"
"github.com/timandy/routine"
)
type mutexKey = string
// For each Key in this map, the Value is a list of lock types you can already have
// when you want to grab that Key. This ensures that locks are always fetched
// in the same order, to prevent deadlocks.
var allowedConcurrentLocks = map[mutexKey][]mutexKey{
"connection-manager-in": {"hostmap"},
"connection-manager-out": {"connection-manager-in", "handshake-hostinfo", "handshake-manager"},
"connection-manager-relay-used": {"handshake-hostinfo"},
"connection-manager-timer": {"connection-manager-out"},
// "connection-state-write": {"hostmap"},
"firewall-conntrack": {"handshake-hostinfo"},
"handshake-manager": {"handshake-hostinfo", "hostmap"},
"handshake-manager-timer": {"handshake-manager"},
"hostmap": {"lighthouse-query-chan", "handshake-hostinfo"},
"lighthouse": {"handshake-hostinfo"},
"relay-state": {"connection-manager-relay-used", "hostmap"},
"remote-list": {"lighthouse", "handshake-manager"},
"lighthouse-query-chan": {"handshake-hostinfo"},
}
type mutexValue struct {
file string
line int
}
func (m mutexValue) String() string {
return fmt.Sprintf("%s:%d", m.file, m.line)
}
var threadLocal = routine.NewThreadLocalWithInitial[map[mutexKey]mutexValue](func() map[mutexKey]mutexValue { return map[mutexKey]mutexValue{} })
var allowedDAG dag.AcyclicGraph
// We build a directed acyclic graph to assert that the locks can only be
// acquired in a determined order, If there are cycles in the DAG, then we
// know that the locking order is not guaranteed.
func init() {
for k, v := range allowedConcurrentLocks {
allowedDAG.Add(k)
for _, t := range v {
allowedDAG.Add(t)
}
}
for k, v := range allowedConcurrentLocks {
for _, t := range v {
allowedDAG.Connect(dag.BasicEdge(k, t))
}
}
if cycles := allowedDAG.Cycles(); len(cycles) > 0 {
panic(fmt.Errorf("Cycles found in allowedConcurrentLocks: %v", cycles))
}
// Rebuild allowedConcurrentLocks as a flattened list of all possibilities
for k := range allowedConcurrentLocks {
ancestors, err := allowedDAG.Ancestors(k)
if err != nil {
panic(err)
}
var allowed []mutexKey
for t := range ancestors {
allowed = append(allowed, t.(mutexKey))
}
allowedConcurrentLocks[k] = allowed
}
}
type syncRWMutex struct {
sync.RWMutex
mutexKey
}
type syncMutex struct {
sync.Mutex
mutexKey
}
func newSyncRWMutex(key mutexKey) syncRWMutex {
return syncRWMutex{
mutexKey: key,
}
}
func newSyncMutex(key mutexKey) syncMutex {
return syncMutex{
mutexKey: key,
}
}
func alertMutex(err error) {
panic(err)
// NOTE: you could switch to this log Line and remove the panic if you want
// to log all failures instead of panicking on the first one
//log.Print(err, string(debug.Stack()))
}
func checkMutex(state map[mutexKey]mutexValue, add mutexKey) {
if add == "" {
alertMutex(fmt.Errorf("mutex not initialized with mutexKey"))
}
allowedConcurrent := allowedConcurrentLocks[add]
for k, v := range state {
if add == k {
alertMutex(fmt.Errorf("re-entrant lock: %s. previous allocation: %s", add, v))
}
// TODO use slices.Contains, but requires go1.21
var found bool
for _, a := range allowedConcurrent {
if a == k {
found = true
break
}
}
if !found {
alertMutex(fmt.Errorf("grabbing %s lock and already have these locks: %s", add, state))
}
}
}
func chanDebugRecv(key mutexKey) {
m := threadLocal.Get()
checkMutex(m, key)
v := mutexValue{}
_, v.file, v.line, _ = runtime.Caller(1)
m[key] = v
}
func chanDebugSend(key mutexKey) {
m := threadLocal.Get()
checkMutex(m, key)
}
func (s *syncRWMutex) Lock() {
m := threadLocal.Get()
checkMutex(m, s.mutexKey)
v := mutexValue{}
_, v.file, v.line, _ = runtime.Caller(1)
m[s.mutexKey] = v
s.RWMutex.Lock()
}
func (s *syncRWMutex) Unlock() {
m := threadLocal.Get()
delete(m, s.mutexKey)
s.RWMutex.Unlock()
}
func (s *syncRWMutex) RLock() {
m := threadLocal.Get()
checkMutex(m, s.mutexKey)
v := mutexValue{}
_, v.file, v.line, _ = runtime.Caller(1)
m[s.mutexKey] = v
s.RWMutex.RLock()
}
func (s *syncRWMutex) RUnlock() {
m := threadLocal.Get()
delete(m, s.mutexKey)
s.RWMutex.RUnlock()
}
func (s *syncMutex) Lock() {
m := threadLocal.Get()
checkMutex(m, s.mutexKey)
v := mutexValue{}
_, v.file, v.line, _ = runtime.Caller(1)
m[s.mutexKey] = v
s.Mutex.Lock()
}
func (s *syncMutex) Unlock() {
m := threadLocal.Get()
delete(m, s.mutexKey)
s.Mutex.Unlock()
}

23
mutex_nodebug.go Normal file
View File

@@ -0,0 +1,23 @@
//go:build !mutex_debug
// +build !mutex_debug
package nebula
import (
"sync"
)
type mutexKey = string
type syncRWMutex = sync.RWMutex
type syncMutex = sync.Mutex
func newSyncRWMutex(mutexKey) syncRWMutex {
return sync.RWMutex{}
}
func newSyncMutex(mutexKey) syncMutex {
return sync.Mutex{}
}
func chanDebugRecv(key mutexKey) {}
func chanDebugSend(key mutexKey) {}

View File

@@ -31,7 +31,8 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
//l.Error("in packet ", header, packet[HeaderLen:])
if ip.IsValid() {
if f.myVpnNetworksTable.Contains(ip.Addr()) {
_, found := f.myVpnNetworksTable.Lookup(ip.Addr())
if found {
if f.l.Level >= logrus.DebugLevel {
f.l.WithField("udpAddr", ip).Debug("Refusing to process double encrypted packet")
}

View File

@@ -34,11 +34,10 @@ type tun struct {
deviceIndex int
ioctlFd uintptr
Routes atomic.Pointer[[]Route]
routeTree atomic.Pointer[bart.Table[routing.Gateways]]
routeChan chan struct{}
useSystemRoutes bool
useSystemRoutesBufferSize int
Routes atomic.Pointer[[]Route]
routeTree atomic.Pointer[bart.Table[routing.Gateways]]
routeChan chan struct{}
useSystemRoutes bool
l *logrus.Logger
}
@@ -125,13 +124,12 @@ func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, multiqueu
func newTunGeneric(c *config.C, l *logrus.Logger, file *os.File, vpnNetworks []netip.Prefix) (*tun, error) {
t := &tun{
ReadWriteCloser: file,
fd: int(file.Fd()),
vpnNetworks: vpnNetworks,
TXQueueLen: c.GetInt("tun.tx_queue", 500),
useSystemRoutes: c.GetBool("tun.use_system_route_table", false),
useSystemRoutesBufferSize: c.GetInt("tun.use_system_route_table_buffer_size", 0),
l: l,
ReadWriteCloser: file,
fd: int(file.Fd()),
vpnNetworks: vpnNetworks,
TXQueueLen: c.GetInt("tun.tx_queue", 500),
useSystemRoutes: c.GetBool("tun.use_system_route_table", false),
l: l,
}
err := t.reload(c, true)
@@ -533,13 +531,7 @@ func (t *tun) watchRoutes() {
rch := make(chan netlink.RouteUpdate)
doneChan := make(chan struct{})
netlinkOptions := netlink.RouteSubscribeOptions{
ReceiveBufferSize: t.useSystemRoutesBufferSize,
ReceiveBufferForceSize: t.useSystemRoutesBufferSize != 0,
ErrorCallback: func(e error) { t.l.WithError(e).Errorf("netlink error") },
}
if err := netlink.RouteSubscribeWithOptions(rch, doneChan, netlinkOptions); err != nil {
if err := netlink.RouteSubscribe(rch, doneChan); err != nil {
t.l.WithError(err).Errorf("failed to subscribe to system route changes")
return
}
@@ -549,14 +541,8 @@ func (t *tun) watchRoutes() {
go func() {
for {
select {
case r, ok := <-rch:
if ok {
t.updateRoutes(r)
} else {
// may be should do something here as
// netlink stops sending updates
return
}
case r := <-rch:
t.updateRoutes(r)
case <-doneChan:
// netlink.RouteSubscriber will close the rch for us
return

58
pki.go
View File

@@ -33,16 +33,16 @@ type CertState struct {
v2Cert cert.Certificate
v2HandshakeBytes []byte
initiatingVersion cert.Version
privateKey []byte
pkcs11Backed bool
cipher string
defaultVersion cert.Version
privateKey []byte
pkcs11Backed bool
cipher string
myVpnNetworks []netip.Prefix
myVpnNetworksTable *bart.Lite
myVpnNetworksTable *bart.Table[struct{}]
myVpnAddrs []netip.Addr
myVpnAddrsTable *bart.Lite
myVpnBroadcastAddrsTable *bart.Lite
myVpnAddrsTable *bart.Table[struct{}]
myVpnBroadcastAddrsTable *bart.Table[struct{}]
}
func NewPKIFromConfig(l *logrus.Logger, c *config.C) (*PKI, error) {
@@ -194,7 +194,7 @@ func (p *PKI) reloadCAPool(c *config.C) *util.ContextualError {
}
func (cs *CertState) GetDefaultCertificate() cert.Certificate {
c := cs.getCertificate(cs.initiatingVersion)
c := cs.getCertificate(cs.defaultVersion)
if c == nil {
panic("No default certificate found")
}
@@ -317,37 +317,37 @@ func newCertStateFromConfig(c *config.C) (*CertState, error) {
return nil, errors.New("no certificates found in pki.cert")
}
useInitiatingVersion := uint32(1)
useDefaultVersion := uint32(1)
if v1 == nil {
// The only condition that requires v2 as the default is if only a v2 certificate is present
// We do this to avoid having to configure it specifically in the config file
useInitiatingVersion = 2
useDefaultVersion = 2
}
rawInitiatingVersion := c.GetUint32("pki.initiating_version", useInitiatingVersion)
var initiatingVersion cert.Version
switch rawInitiatingVersion {
rawDefaultVersion := c.GetUint32("pki.default_version", useDefaultVersion)
var defaultVersion cert.Version
switch rawDefaultVersion {
case 1:
if v1 == nil {
return nil, fmt.Errorf("can not use pki.initiating_version 1 without a v1 certificate in pki.cert")
return nil, fmt.Errorf("can not use pki.default_version 1 without a v1 certificate in pki.cert")
}
initiatingVersion = cert.Version1
defaultVersion = cert.Version1
case 2:
initiatingVersion = cert.Version2
defaultVersion = cert.Version2
default:
return nil, fmt.Errorf("unknown pki.initiating_version: %v", rawInitiatingVersion)
return nil, fmt.Errorf("unknown pki.default_version: %v", rawDefaultVersion)
}
return newCertState(initiatingVersion, v1, v2, isPkcs11, curve, rawKey)
return newCertState(defaultVersion, v1, v2, isPkcs11, curve, rawKey)
}
func newCertState(dv cert.Version, v1, v2 cert.Certificate, pkcs11backed bool, privateKeyCurve cert.Curve, privateKey []byte) (*CertState, error) {
cs := CertState{
privateKey: privateKey,
pkcs11Backed: pkcs11backed,
myVpnNetworksTable: new(bart.Lite),
myVpnAddrsTable: new(bart.Lite),
myVpnBroadcastAddrsTable: new(bart.Lite),
myVpnNetworksTable: new(bart.Table[struct{}]),
myVpnAddrsTable: new(bart.Table[struct{}]),
myVpnBroadcastAddrsTable: new(bart.Table[struct{}]),
}
if v1 != nil && v2 != nil {
@@ -361,7 +361,7 @@ func newCertState(dv cert.Version, v1, v2 cert.Certificate, pkcs11backed bool, p
//TODO: CERT-V2 make sure v2 has v1s address
cs.initiatingVersion = dv
cs.defaultVersion = dv
}
if v1 != nil {
@@ -380,8 +380,8 @@ func newCertState(dv cert.Version, v1, v2 cert.Certificate, pkcs11backed bool, p
cs.v1Cert = v1
cs.v1HandshakeBytes = v1hs
if cs.initiatingVersion == 0 {
cs.initiatingVersion = cert.Version1
if cs.defaultVersion == 0 {
cs.defaultVersion = cert.Version1
}
}
@@ -401,8 +401,8 @@ func newCertState(dv cert.Version, v1, v2 cert.Certificate, pkcs11backed bool, p
cs.v2Cert = v2
cs.v2HandshakeBytes = v2hs
if cs.initiatingVersion == 0 {
cs.initiatingVersion = cert.Version2
if cs.defaultVersion == 0 {
cs.defaultVersion = cert.Version2
}
}
@@ -415,16 +415,16 @@ func newCertState(dv cert.Version, v1, v2 cert.Certificate, pkcs11backed bool, p
for _, network := range crt.Networks() {
cs.myVpnNetworks = append(cs.myVpnNetworks, network)
cs.myVpnNetworksTable.Insert(network)
cs.myVpnNetworksTable.Insert(network, struct{}{})
cs.myVpnAddrs = append(cs.myVpnAddrs, network.Addr())
cs.myVpnAddrsTable.Insert(netip.PrefixFrom(network.Addr(), network.Addr().BitLen()))
cs.myVpnAddrsTable.Insert(netip.PrefixFrom(network.Addr(), network.Addr().BitLen()), struct{}{})
if network.Addr().Is4() {
addr := network.Masked().Addr().As4()
mask := net.CIDRMask(network.Bits(), network.Addr().BitLen())
binary.BigEndian.PutUint32(addr[:], binary.BigEndian.Uint32(addr[:])|^binary.BigEndian.Uint32(mask))
cs.myVpnBroadcastAddrsTable.Insert(netip.PrefixFrom(netip.AddrFrom4(addr), network.Addr().BitLen()))
cs.myVpnBroadcastAddrsTable.Insert(netip.PrefixFrom(netip.AddrFrom4(addr), network.Addr().BitLen()), struct{}{})
}
}

View File

@@ -241,13 +241,15 @@ func (rm *relayManager) handleCreateRelayRequest(v cert.Version, h *HostInfo, f
logMsg.Info("handleCreateRelayRequest")
// Is the source of the relay me? This should never happen, but did happen due to
// an issue migrating relays over to newly re-handshaked host info objects.
if f.myVpnAddrsTable.Contains(from) {
_, found := f.myVpnAddrsTable.Lookup(from)
if found {
logMsg.WithField("myIP", from).Error("Discarding relay request from myself")
return
}
// Is the target of the relay me?
if f.myVpnAddrsTable.Contains(target) {
_, found = f.myVpnAddrsTable.Lookup(target)
if found {
existingRelay, ok := h.relayState.QueryRelayForByIp(from)
if ok {
switch existingRelay.State {

View File

@@ -7,7 +7,6 @@ import (
"slices"
"sort"
"strconv"
"sync"
"sync/atomic"
"time"
@@ -185,7 +184,7 @@ func (hr *hostnamesResults) GetAddrs() []netip.AddrPort {
// It serves as a local cache of query replies, host update notifications, and locally learned addresses
type RemoteList struct {
// Every interaction with internals requires a lock!
sync.RWMutex
syncRWMutex
// The full list of vpn addresses assigned to this host
vpnAddrs []netip.Addr
@@ -215,11 +214,12 @@ type RemoteList struct {
// NewRemoteList creates a new empty RemoteList
func NewRemoteList(vpnAddrs []netip.Addr, shouldAdd func(netip.Addr) bool) *RemoteList {
r := &RemoteList{
vpnAddrs: make([]netip.Addr, len(vpnAddrs)),
addrs: make([]netip.AddrPort, 0),
relays: make([]netip.Addr, 0),
cache: make(map[netip.Addr]*cache),
shouldAdd: shouldAdd,
syncRWMutex: newSyncRWMutex("remote-list"),
vpnAddrs: make([]netip.Addr, len(vpnAddrs)),
addrs: make([]netip.AddrPort, 0),
relays: make([]netip.Addr, 0),
cache: make(map[netip.Addr]*cache),
shouldAdd: shouldAdd,
}
copy(r.vpnAddrs, vpnAddrs)
return r

View File

@@ -9,10 +9,13 @@ import (
"math"
"net"
"net/netip"
"os"
"strings"
"sync"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/overlay"
"golang.org/x/sync/errgroup"
"gvisor.dev/gvisor/pkg/buffer"
@@ -43,7 +46,14 @@ type Service struct {
}
}
func New(control *nebula.Control) (*Service, error) {
func New(config *config.C) (*Service, error) {
logger := logrus.New()
logger.Out = os.Stdout
control, err := nebula.Main(config, false, "custom-app", logger, overlay.NewUserDeviceFromConfig)
if err != nil {
return nil, err
}
control.Start()
ctx := control.Context()

View File

@@ -5,17 +5,13 @@ import (
"context"
"errors"
"net/netip"
"os"
"testing"
"time"
"dario.cat/mergo"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/cert_test"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/overlay"
"golang.org/x/sync/errgroup"
"gopkg.in/yaml.v3"
)
@@ -75,15 +71,7 @@ func newSimpleService(caCrt cert.Certificate, caKey []byte, name string, udpIp n
panic(err)
}
logger := logrus.New()
logger.Out = os.Stdout
control, err := nebula.Main(&c, false, "custom-app", logger, overlay.NewUserDeviceFromConfig)
if err != nil {
panic(err)
}
s, err := New(control)
s, err := New(&c)
if err != nil {
panic(err)
}

View File

@@ -1,7 +1,6 @@
package nebula
import (
"sync"
"time"
)
@@ -34,7 +33,7 @@ type TimerWheel[T any] struct {
}
type LockingTimerWheel[T any] struct {
m sync.Mutex
m syncMutex
t *TimerWheel[T]
}
@@ -81,8 +80,9 @@ func NewTimerWheel[T any](min, max time.Duration) *TimerWheel[T] {
}
// NewLockingTimerWheel is version of TimerWheel that is safe for concurrent use with a small performance penalty
func NewLockingTimerWheel[T any](min, max time.Duration) *LockingTimerWheel[T] {
func NewLockingTimerWheel[T any](name string, min, max time.Duration) *LockingTimerWheel[T] {
return &LockingTimerWheel[T]{
m: newSyncMutex(name),
t: NewTimerWheel[T](min, max),
}
}