Compare commits

...

18 Commits

Author SHA1 Message Date
Wade Simmons
b418a081a8 cleanup 2025-07-25 14:57:49 -04:00
Wade Simmons
fd3fa57e79 comments 2025-07-25 14:42:54 -04:00
Wade Simmons
0eb92dcab4 WIP 2025-07-25 14:32:37 -04:00
Wade Simmons
f6b206d96c cleanup 2025-07-25 10:38:52 -04:00
Wade Simmons
31cc3a4169 Merge remote-tracking branch 'origin/master' into fips140 2025-07-24 13:57:12 -04:00
Wade Simmons
6da314aa6b WIP 2025-07-24 13:56:42 -04:00
Wade Simmons
3da3d41fb5 log if fips140 in use 2025-07-24 12:37:33 -04:00
brad-defined
91eff03418 Update slack OSS invite link (#1435)
Some checks failed
gofmt / Run gofmt (push) Successful in 38s
smoke-extra / Run extra smoke tests (push) Failing after 21s
smoke / Run multi node smoke test (push) Failing after 1m21s
Build and test / Build all and test on ubuntu-linux (push) Failing after 19m47s
Build and test / Build and test on linux with boringcrypto (push) Failing after 2m24s
Build and test / Build and test on linux with pkcs11 (push) Failing after 2m29s
Build and test / Build and test on macos-latest (push) Has been cancelled
Build and test / Build and test on windows-latest (push) Has been cancelled
2025-07-15 16:05:28 -04:00
Nate Brown
52623820c2 Drop inactive tunnels (#1427) 2025-07-03 09:58:37 -05:00
Nate Brown
c2420642a0 Darwin udp fix (#1428) 2025-07-02 15:50:22 -05:00
brad-defined
b3a1f7b0a3 Disable UDP receive error returns due to ICMP messages on Windows. (#1412) (#1415) 2025-07-02 11:37:41 -04:00
brad-defined
94142aded5 Fix relay migration panic by covering every possible relay state (#1414) 2025-07-02 08:48:02 -04:00
brad-defined
b158eb0c4c Use a list for relay IPs instead of a map (#1423)
* Use a list for relay IPs instead of a map

* linter
2025-07-02 08:47:05 -04:00
dependabot[bot]
e4b7dbcfb0 Bump dario.cat/mergo from 1.0.1 to 1.0.2 (#1408)
Bumps [dario.cat/mergo](https://github.com/imdario/mergo) from 1.0.1 to 1.0.2.
- [Release notes](https://github.com/imdario/mergo/releases)
- [Commits](https://github.com/imdario/mergo/compare/v1.0.1...v1.0.2)

---
updated-dependencies:
- dependency-name: dario.cat/mergo
  dependency-version: 1.0.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-01 23:30:40 -05:00
dependabot[bot]
882edf11d7 Bump github.com/vishvananda/netlink from 1.3.0 to 1.3.1 (#1407)
Bumps [github.com/vishvananda/netlink](https://github.com/vishvananda/netlink) from 1.3.0 to 1.3.1.
- [Release notes](https://github.com/vishvananda/netlink/releases)
- [Commits](https://github.com/vishvananda/netlink/compare/v1.3.0...v1.3.1)

---
updated-dependencies:
- dependency-name: github.com/vishvananda/netlink
  dependency-version: 1.3.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-01 23:29:15 -05:00
dependabot[bot]
d34c2b8e06 Bump golangci/golangci-lint-action from 7 to 8 (#1400)
* Bump golangci/golangci-lint-action from 7 to 8

Bumps [golangci/golangci-lint-action](https://github.com/golangci/golangci-lint-action) from 7 to 8.
- [Release notes](https://github.com/golangci/golangci-lint-action/releases)
- [Commits](https://github.com/golangci/golangci-lint-action/compare/v7...v8)

---
updated-dependencies:
- dependency-name: golangci/golangci-lint-action
  dependency-version: '8'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

* bump golangci-lint version

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Wade Simmons <wsimmons@slack-corp.com>
2025-07-01 23:25:24 -05:00
brad-defined
442a52879b Fix off by one error in IPv6 packet parser (#1419)
Some checks failed
gofmt / Run gofmt (push) Successful in 39s
smoke-extra / Run extra smoke tests (push) Failing after 21s
smoke / Run multi node smoke test (push) Failing after 1m21s
Build and test / Build all and test on ubuntu-linux (push) Failing after 18m35s
Build and test / Build and test on linux with boringcrypto (push) Failing after 2m36s
Build and test / Build and test on linux with pkcs11 (push) Failing after 2m28s
Build and test / Build and test on macos-latest (push) Has been cancelled
Build and test / Build and test on windows-latest (push) Has been cancelled
2025-06-11 15:15:15 -04:00
Wade Simmons
4485c47641 WIP support new Go fips140 module
This will replace boring crypto at some point.

We should modify our protocol a bit and instead change to
NewGCMWithRandomNonce.
2025-03-31 12:08:58 -04:00
34 changed files with 992 additions and 370 deletions

View File

@@ -17,5 +17,5 @@ contact_links:
about: 'The documentation is the best place to start if you are new to Nebula.' about: 'The documentation is the best place to start if you are new to Nebula.'
- name: 💁 Support/Chat - name: 💁 Support/Chat
url: https://join.slack.com/t/nebulaoss/shared_invite/zt-2xqe6e7vn-k_KGi8s13nsr7cvHVvHvuQ url: https://join.slack.com/t/nebulaoss/shared_invite/zt-39pk4xopc-CUKlGcb5Z39dQ0cK1v7ehA
about: 'For faster support, join us on Slack for assistance!' about: 'For faster support, join us on Slack for assistance!'

View File

@@ -52,4 +52,12 @@ jobs:
working-directory: ./.github/workflows/smoke working-directory: ./.github/workflows/smoke
run: NAME="smoke-p256" ./smoke.sh run: NAME="smoke-p256" ./smoke.sh
- name: setup docker image for fips140
working-directory: ./.github/workflows/smoke
run: NAME="smoke-fips140" CURVE=P256 GOFIPS140=v1.0.0 LDFLAGS=-checklinkname=0 ./build.sh
- name: run smoke-fips140
working-directory: ./.github/workflows/smoke
run: NAME="smoke-fips140" ./smoke.sh
timeout-minutes: 10 timeout-minutes: 10

View File

@@ -32,9 +32,9 @@ jobs:
run: make vet run: make vet
- name: golangci-lint - name: golangci-lint
uses: golangci/golangci-lint-action@v7 uses: golangci/golangci-lint-action@v8
with: with:
version: v2.0 version: v2.1
- name: Test - name: Test
run: make test run: make test
@@ -115,9 +115,9 @@ jobs:
run: make vet run: make vet
- name: golangci-lint - name: golangci-lint
uses: golangci/golangci-lint-action@v7 uses: golangci/golangci-lint-action@v8
with: with:
version: v2.0 version: v2.1
- name: Test - name: Test
run: make test run: make test

View File

@@ -121,12 +121,12 @@ bin-pkcs11: CGO_ENABLED = 1
bin-pkcs11: bin bin-pkcs11: bin
bin: bin:
go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula${NEBULA_CMD_SUFFIX} ${NEBULA_CMD_PATH} $(GOENV) go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula${NEBULA_CMD_SUFFIX} ${NEBULA_CMD_PATH}
go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula-cert${NEBULA_CMD_SUFFIX} ./cmd/nebula-cert $(GOENV) go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula-cert${NEBULA_CMD_SUFFIX} ./cmd/nebula-cert
install: install:
go install $(BUILD_ARGS) -ldflags "$(LDFLAGS)" ${NEBULA_CMD_PATH} $(GOENV) go install $(BUILD_ARGS) -ldflags "$(LDFLAGS)" ${NEBULA_CMD_PATH}
go install $(BUILD_ARGS) -ldflags "$(LDFLAGS)" ./cmd/nebula-cert $(GOENV) go install $(BUILD_ARGS) -ldflags "$(LDFLAGS)" ./cmd/nebula-cert
build/linux-arm-%: GOENV += GOARM=$(word 3, $(subst -, ,$*)) build/linux-arm-%: GOENV += GOARM=$(word 3, $(subst -, ,$*))
build/linux-mips-%: GOENV += GOMIPS=$(word 3, $(subst -, ,$*)) build/linux-mips-%: GOENV += GOMIPS=$(word 3, $(subst -, ,$*))
@@ -215,6 +215,14 @@ ifeq ($(words $(MAKECMDGOALS)),1)
@$(MAKE) service ${.DEFAULT_GOAL} --no-print-directory @$(MAKE) service ${.DEFAULT_GOAL} --no-print-directory
endif endif
fips140:
@echo > $(NULL_FILE)
$(eval GOENV += GOFIPS140=v1.0.0)
$(eval LDFLAGS += -checklinkname=0)
ifeq ($(words $(MAKECMDGOALS)),1)
@$(MAKE) fips140 ${.DEFAULT_GOAL} --no-print-directory
endif
bin-docker: bin build/linux-amd64/nebula build/linux-amd64/nebula-cert bin-docker: bin build/linux-amd64/nebula build/linux-amd64/nebula-cert
smoke-docker: bin-docker smoke-docker: bin-docker
@@ -236,5 +244,5 @@ smoke-vagrant/%: bin-docker build/%/nebula
cd .github/workflows/smoke/ && ./smoke-vagrant.sh $* cd .github/workflows/smoke/ && ./smoke-vagrant.sh $*
.FORCE: .FORCE:
.PHONY: bench bench-cpu bench-cpu-long bin build-test-mobile e2e e2ev e2evv e2evvv e2evvvv proto release service smoke-docker smoke-docker-race test test-cov-html smoke-vagrant/% .PHONY: bench bench-cpu bench-cpu-long bin build-test-mobile e2e e2ev e2evv e2evvv e2evvvv fips140 proto release service smoke-docker smoke-docker-race test test-cov-html smoke-vagrant/%
.DEFAULT_GOAL := bin .DEFAULT_GOAL := bin

View File

@@ -12,7 +12,7 @@ Further documentation can be found [here](https://nebula.defined.net/docs/).
You can read more about Nebula [here](https://medium.com/p/884110a5579). You can read more about Nebula [here](https://medium.com/p/884110a5579).
You can also join the NebulaOSS Slack group [here](https://join.slack.com/t/nebulaoss/shared_invite/zt-2xqe6e7vn-k_KGi8s13nsr7cvHVvHvuQ). You can also join the NebulaOSS Slack group [here](https://join.slack.com/t/nebulaoss/shared_invite/zt-39pk4xopc-CUKlGcb5Z39dQ0cK1v7ehA).
## Supported Platforms ## Supported Platforms
@@ -143,17 +143,24 @@ To build nebula for a specific platform (ex, Windows):
See the [Makefile](Makefile) for more details on build targets See the [Makefile](Makefile) for more details on build targets
## Curve P256 and BoringCrypto ## Curve P256, BoringCrypto and FIPS 140-3 mode
The default curve used for cryptographic handshakes and signatures is Curve25519. This is the recommended setting for most users. If your deployment has certain compliance requirements, you have the option of creating your CA using `nebula-cert ca -curve P256` to use NIST Curve P256. The CA will then sign certificates using ECDSA P256, and any hosts using these certificates will use P256 for ECDH handshakes. The default curve used for cryptographic handshakes and signatures is Curve25519. This is the recommended setting for most users. If your deployment has certain compliance requirements, you have the option of creating your CA using `nebula-cert ca -curve P256` to use NIST Curve P256. The CA will then sign certificates using ECDSA P256, and any hosts using these certificates will use P256 for ECDH handshakes.
In addition, Nebula can be built using the [BoringCrypto GOEXPERIMENT](https://github.com/golang/go/blob/go1.20/src/crypto/internal/boring/README.md) by running either of the following make targets: Nebula can be built using the [BoringCrypto GOEXPERIMENT](https://github.com/golang/go/blob/go1.20/src/crypto/internal/boring/README.md) by running either of the following make targets:
```sh ```sh
make bin-boringcrypto make bin-boringcrypto
make release-boringcrypto make release-boringcrypto
``` ```
Nebula can also be built using the [FIPS 140-3](https://go.dev/doc/security/fips140) mode of Go by running either of the following make targets:
```sh
make fips140
make fips140 release
```
This is not the recommended default deployment, but may be useful based on your compliance requirements. This is not the recommended default deployment, but may be useful based on your compliance requirements.
## Credits ## Credits

View File

@@ -4,13 +4,16 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/binary" "encoding/binary"
"fmt"
"net/netip" "net/netip"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/rcrowley/go-metrics" "github.com/rcrowley/go-metrics"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/header" "github.com/slackhq/nebula/header"
) )
@@ -27,130 +30,124 @@ const (
) )
type connectionManager struct { type connectionManager struct {
in map[uint32]struct{}
inLock *sync.RWMutex
out map[uint32]struct{}
outLock *sync.RWMutex
// relayUsed holds which relay localIndexs are in use // relayUsed holds which relay localIndexs are in use
relayUsed map[uint32]struct{} relayUsed map[uint32]struct{}
relayUsedLock *sync.RWMutex relayUsedLock *sync.RWMutex
hostMap *HostMap hostMap *HostMap
trafficTimer *LockingTimerWheel[uint32] trafficTimer *LockingTimerWheel[uint32]
intf *Interface intf *Interface
pendingDeletion map[uint32]struct{} punchy *Punchy
punchy *Punchy
// Configuration settings
checkInterval time.Duration checkInterval time.Duration
pendingDeletionInterval time.Duration pendingDeletionInterval time.Duration
metricsTxPunchy metrics.Counter inactivityTimeout atomic.Int64
dropInactive atomic.Bool
metricsTxPunchy metrics.Counter
l *logrus.Logger l *logrus.Logger
} }
func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval time.Duration, punchy *Punchy) *connectionManager { func newConnectionManagerFromConfig(l *logrus.Logger, c *config.C, hm *HostMap, p *Punchy) *connectionManager {
var max time.Duration cm := &connectionManager{
if checkInterval < pendingDeletionInterval { hostMap: hm,
max = pendingDeletionInterval l: l,
} else { punchy: p,
max = checkInterval relayUsed: make(map[uint32]struct{}),
relayUsedLock: &sync.RWMutex{},
metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
} }
nc := &connectionManager{ cm.reload(c, true)
hostMap: intf.hostMap, c.RegisterReloadCallback(func(c *config.C) {
in: make(map[uint32]struct{}), cm.reload(c, false)
inLock: &sync.RWMutex{}, })
out: make(map[uint32]struct{}),
outLock: &sync.RWMutex{},
relayUsed: make(map[uint32]struct{}),
relayUsedLock: &sync.RWMutex{},
trafficTimer: NewLockingTimerWheel[uint32](time.Millisecond*500, max),
intf: intf,
pendingDeletion: make(map[uint32]struct{}),
checkInterval: checkInterval,
pendingDeletionInterval: pendingDeletionInterval,
punchy: punchy,
metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
l: l,
}
nc.Start(ctx) return cm
return nc
} }
func (n *connectionManager) In(localIndex uint32) { func (cm *connectionManager) reload(c *config.C, initial bool) {
n.inLock.RLock() if initial {
// If this already exists, return cm.checkInterval = time.Duration(c.GetInt("timers.connection_alive_interval", 5)) * time.Second
if _, ok := n.in[localIndex]; ok { cm.pendingDeletionInterval = time.Duration(c.GetInt("timers.pending_deletion_interval", 10)) * time.Second
n.inLock.RUnlock()
return // We want at least a minimum resolution of 500ms per tick so that we can hit these intervals
// pretty close to their configured duration.
// The inactivity duration is checked each time a hostinfo ticks through so we don't need the wheel to contain it.
minDuration := min(time.Millisecond*500, cm.checkInterval, cm.pendingDeletionInterval)
maxDuration := max(cm.checkInterval, cm.pendingDeletionInterval)
cm.trafficTimer = NewLockingTimerWheel[uint32](minDuration, maxDuration)
}
if initial || c.HasChanged("tunnels.inactivity_timeout") {
old := cm.getInactivityTimeout()
cm.inactivityTimeout.Store((int64)(c.GetDuration("tunnels.inactivity_timeout", 10*time.Minute)))
if !initial {
cm.l.WithField("oldDuration", old).
WithField("newDuration", cm.getInactivityTimeout()).
Info("Inactivity timeout has changed")
}
}
if initial || c.HasChanged("tunnels.drop_inactive") {
old := cm.dropInactive.Load()
cm.dropInactive.Store(c.GetBool("tunnels.drop_inactive", false))
if !initial {
cm.l.WithField("oldBool", old).
WithField("newBool", cm.dropInactive.Load()).
Info("Drop inactive setting has changed")
}
} }
n.inLock.RUnlock()
n.inLock.Lock()
n.in[localIndex] = struct{}{}
n.inLock.Unlock()
} }
func (n *connectionManager) Out(localIndex uint32) { func (cm *connectionManager) getInactivityTimeout() time.Duration {
n.outLock.RLock() return (time.Duration)(cm.inactivityTimeout.Load())
// If this already exists, return
if _, ok := n.out[localIndex]; ok {
n.outLock.RUnlock()
return
}
n.outLock.RUnlock()
n.outLock.Lock()
n.out[localIndex] = struct{}{}
n.outLock.Unlock()
} }
func (n *connectionManager) RelayUsed(localIndex uint32) { func (cm *connectionManager) In(h *HostInfo) {
n.relayUsedLock.RLock() h.in.Store(true)
}
func (cm *connectionManager) Out(h *HostInfo) {
h.out.Store(true)
}
func (cm *connectionManager) RelayUsed(localIndex uint32) {
cm.relayUsedLock.RLock()
// If this already exists, return // If this already exists, return
if _, ok := n.relayUsed[localIndex]; ok { if _, ok := cm.relayUsed[localIndex]; ok {
n.relayUsedLock.RUnlock() cm.relayUsedLock.RUnlock()
return return
} }
n.relayUsedLock.RUnlock() cm.relayUsedLock.RUnlock()
n.relayUsedLock.Lock() cm.relayUsedLock.Lock()
n.relayUsed[localIndex] = struct{}{} cm.relayUsed[localIndex] = struct{}{}
n.relayUsedLock.Unlock() cm.relayUsedLock.Unlock()
} }
// getAndResetTrafficCheck returns if there was any inbound or outbound traffic within the last tick and // getAndResetTrafficCheck returns if there was any inbound or outbound traffic within the last tick and
// resets the state for this local index // resets the state for this local index
func (n *connectionManager) getAndResetTrafficCheck(localIndex uint32) (bool, bool) { func (cm *connectionManager) getAndResetTrafficCheck(h *HostInfo, now time.Time) (bool, bool) {
n.inLock.Lock() in := h.in.Swap(false)
n.outLock.Lock() out := h.out.Swap(false)
_, in := n.in[localIndex] if in || out {
_, out := n.out[localIndex] h.lastUsed = now
delete(n.in, localIndex) }
delete(n.out, localIndex)
n.inLock.Unlock()
n.outLock.Unlock()
return in, out return in, out
} }
func (n *connectionManager) AddTrafficWatch(localIndex uint32) { // AddTrafficWatch must be called for every new HostInfo.
// Use a write lock directly because it should be incredibly rare that we are ever already tracking this index // We will continue to monitor the HostInfo until the tunnel is dropped.
n.outLock.Lock() func (cm *connectionManager) AddTrafficWatch(h *HostInfo) {
if _, ok := n.out[localIndex]; ok { if h.out.Swap(true) == false {
n.outLock.Unlock() cm.trafficTimer.Add(h.localIndexId, cm.checkInterval)
return
} }
n.out[localIndex] = struct{}{}
n.trafficTimer.Add(localIndex, n.checkInterval)
n.outLock.Unlock()
} }
func (n *connectionManager) Start(ctx context.Context) { func (cm *connectionManager) Start(ctx context.Context) {
go n.Run(ctx) clockSource := time.NewTicker(cm.trafficTimer.t.tickDuration)
}
func (n *connectionManager) Run(ctx context.Context) {
//TODO: this tick should be based on the min wheel tick? Check firewall
clockSource := time.NewTicker(500 * time.Millisecond)
defer clockSource.Stop() defer clockSource.Stop()
p := []byte("") p := []byte("")
@@ -163,61 +160,61 @@ func (n *connectionManager) Run(ctx context.Context) {
return return
case now := <-clockSource.C: case now := <-clockSource.C:
n.trafficTimer.Advance(now) cm.trafficTimer.Advance(now)
for { for {
localIndex, has := n.trafficTimer.Purge() localIndex, has := cm.trafficTimer.Purge()
if !has { if !has {
break break
} }
n.doTrafficCheck(localIndex, p, nb, out, now) cm.doTrafficCheck(localIndex, p, nb, out, now)
} }
} }
} }
} }
func (n *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) { func (cm *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
decision, hostinfo, primary := n.makeTrafficDecision(localIndex, now) decision, hostinfo, primary := cm.makeTrafficDecision(localIndex, now)
switch decision { switch decision {
case deleteTunnel: case deleteTunnel:
if n.hostMap.DeleteHostInfo(hostinfo) { if cm.hostMap.DeleteHostInfo(hostinfo) {
// Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap // Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap
n.intf.lightHouse.DeleteVpnAddrs(hostinfo.vpnAddrs) cm.intf.lightHouse.DeleteVpnAddrs(hostinfo.vpnAddrs)
} }
case closeTunnel: case closeTunnel:
n.intf.sendCloseTunnel(hostinfo) cm.intf.sendCloseTunnel(hostinfo)
n.intf.closeTunnel(hostinfo) cm.intf.closeTunnel(hostinfo)
case swapPrimary: case swapPrimary:
n.swapPrimary(hostinfo, primary) cm.swapPrimary(hostinfo, primary)
case migrateRelays: case migrateRelays:
n.migrateRelayUsed(hostinfo, primary) cm.migrateRelayUsed(hostinfo, primary)
case tryRehandshake: case tryRehandshake:
n.tryRehandshake(hostinfo) cm.tryRehandshake(hostinfo)
case sendTestPacket: case sendTestPacket:
n.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out) cm.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
} }
n.resetRelayTrafficCheck(hostinfo) cm.resetRelayTrafficCheck(hostinfo)
} }
func (n *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) { func (cm *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) {
if hostinfo != nil { if hostinfo != nil {
n.relayUsedLock.Lock() cm.relayUsedLock.Lock()
defer n.relayUsedLock.Unlock() defer cm.relayUsedLock.Unlock()
// No need to migrate any relays, delete usage info now. // No need to migrate any relays, delete usage info now.
for _, idx := range hostinfo.relayState.CopyRelayForIdxs() { for _, idx := range hostinfo.relayState.CopyRelayForIdxs() {
delete(n.relayUsed, idx) delete(cm.relayUsed, idx)
} }
} }
} }
func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) { func (cm *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) {
relayFor := oldhostinfo.relayState.CopyAllRelayFor() relayFor := oldhostinfo.relayState.CopyAllRelayFor()
for _, r := range relayFor { for _, r := range relayFor {
@@ -227,46 +224,51 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
var relayFrom netip.Addr var relayFrom netip.Addr
var relayTo netip.Addr var relayTo netip.Addr
switch { switch {
case ok && existing.State == Established: case ok:
// This relay already exists in newhostinfo, then do nothing. switch existing.State {
continue case Established, PeerRequested, Disestablished:
case ok && existing.State == Requested: // This relay already exists in newhostinfo, then do nothing.
// The relay exists in a Requested state; re-send the request continue
index = existing.LocalIndex case Requested:
switch r.Type { // The relay exists in a Requested state; re-send the request
case TerminalType: index = existing.LocalIndex
relayFrom = n.intf.myVpnAddrs[0] switch r.Type {
relayTo = existing.PeerAddr case TerminalType:
case ForwardingType: relayFrom = cm.intf.myVpnAddrs[0]
relayFrom = existing.PeerAddr relayTo = existing.PeerAddr
relayTo = newhostinfo.vpnAddrs[0] case ForwardingType:
default: relayFrom = existing.PeerAddr
// should never happen relayTo = newhostinfo.vpnAddrs[0]
default:
// should never happen
panic(fmt.Sprintf("Migrating unknown relay type: %v", r.Type))
}
} }
case !ok: case !ok:
n.relayUsedLock.RLock() cm.relayUsedLock.RLock()
if _, relayUsed := n.relayUsed[r.LocalIndex]; !relayUsed { if _, relayUsed := cm.relayUsed[r.LocalIndex]; !relayUsed {
// The relay hasn't been used; don't migrate it. // The relay hasn't been used; don't migrate it.
n.relayUsedLock.RUnlock() cm.relayUsedLock.RUnlock()
continue continue
} }
n.relayUsedLock.RUnlock() cm.relayUsedLock.RUnlock()
// The relay doesn't exist at all; create some relay state and send the request. // The relay doesn't exist at all; create some relay state and send the request.
var err error var err error
index, err = AddRelay(n.l, newhostinfo, n.hostMap, r.PeerAddr, nil, r.Type, Requested) index, err = AddRelay(cm.l, newhostinfo, cm.hostMap, r.PeerAddr, nil, r.Type, Requested)
if err != nil { if err != nil {
n.l.WithError(err).Error("failed to migrate relay to new hostinfo") cm.l.WithError(err).Error("failed to migrate relay to new hostinfo")
continue continue
} }
switch r.Type { switch r.Type {
case TerminalType: case TerminalType:
relayFrom = n.intf.myVpnAddrs[0] relayFrom = cm.intf.myVpnAddrs[0]
relayTo = r.PeerAddr relayTo = r.PeerAddr
case ForwardingType: case ForwardingType:
relayFrom = r.PeerAddr relayFrom = r.PeerAddr
relayTo = newhostinfo.vpnAddrs[0] relayTo = newhostinfo.vpnAddrs[0]
default: default:
// should never happen // should never happen
panic(fmt.Sprintf("Migrating unknown relay type: %v", r.Type))
} }
} }
@@ -279,12 +281,12 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
switch newhostinfo.GetCert().Certificate.Version() { switch newhostinfo.GetCert().Certificate.Version() {
case cert.Version1: case cert.Version1:
if !relayFrom.Is4() { if !relayFrom.Is4() {
n.l.Error("can not migrate v1 relay with a v6 network because the relay is not running a current nebula version") cm.l.Error("can not migrate v1 relay with a v6 network because the relay is not running a current nebula version")
continue continue
} }
if !relayTo.Is4() { if !relayTo.Is4() {
n.l.Error("can not migrate v1 relay with a v6 remote network because the relay is not running a current nebula version") cm.l.Error("can not migrate v1 relay with a v6 remote network because the relay is not running a current nebula version")
continue continue
} }
@@ -296,16 +298,16 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
req.RelayFromAddr = netAddrToProtoAddr(relayFrom) req.RelayFromAddr = netAddrToProtoAddr(relayFrom)
req.RelayToAddr = netAddrToProtoAddr(relayTo) req.RelayToAddr = netAddrToProtoAddr(relayTo)
default: default:
newhostinfo.logger(n.l).Error("Unknown certificate version found while attempting to migrate relay") newhostinfo.logger(cm.l).Error("Unknown certificate version found while attempting to migrate relay")
continue continue
} }
msg, err := req.Marshal() msg, err := req.Marshal()
if err != nil { if err != nil {
n.l.WithError(err).Error("failed to marshal Control message to migrate relay") cm.l.WithError(err).Error("failed to marshal Control message to migrate relay")
} else { } else {
n.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu)) cm.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
n.l.WithFields(logrus.Fields{ cm.l.WithFields(logrus.Fields{
"relayFrom": req.RelayFromAddr, "relayFrom": req.RelayFromAddr,
"relayTo": req.RelayToAddr, "relayTo": req.RelayToAddr,
"initiatorRelayIndex": req.InitiatorRelayIndex, "initiatorRelayIndex": req.InitiatorRelayIndex,
@@ -316,46 +318,45 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
} }
} }
func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time) (trafficDecision, *HostInfo, *HostInfo) { func (cm *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
n.hostMap.RLock() // Read lock the main hostmap to order decisions based on tunnels being the primary tunnel
defer n.hostMap.RUnlock() cm.hostMap.RLock()
defer cm.hostMap.RUnlock()
hostinfo := n.hostMap.Indexes[localIndex] hostinfo := cm.hostMap.Indexes[localIndex]
if hostinfo == nil { if hostinfo == nil {
n.l.WithField("localIndex", localIndex).Debugf("Not found in hostmap") cm.l.WithField("localIndex", localIndex).Debugln("Not found in hostmap")
delete(n.pendingDeletion, localIndex)
return doNothing, nil, nil return doNothing, nil, nil
} }
if n.isInvalidCertificate(now, hostinfo) { if cm.isInvalidCertificate(now, hostinfo) {
delete(n.pendingDeletion, hostinfo.localIndexId)
return closeTunnel, hostinfo, nil return closeTunnel, hostinfo, nil
} }
primary := n.hostMap.Hosts[hostinfo.vpnAddrs[0]] primary := cm.hostMap.Hosts[hostinfo.vpnAddrs[0]]
mainHostInfo := true mainHostInfo := true
if primary != nil && primary != hostinfo { if primary != nil && primary != hostinfo {
mainHostInfo = false mainHostInfo = false
} }
// Check for traffic on this hostinfo // Check for traffic on this hostinfo
inTraffic, outTraffic := n.getAndResetTrafficCheck(localIndex) inTraffic, outTraffic := cm.getAndResetTrafficCheck(hostinfo, now)
// A hostinfo is determined alive if there is incoming traffic // A hostinfo is determined alive if there is incoming traffic
if inTraffic { if inTraffic {
decision := doNothing decision := doNothing
if n.l.Level >= logrus.DebugLevel { if cm.l.Level >= logrus.DebugLevel {
hostinfo.logger(n.l). hostinfo.logger(cm.l).
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}). WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
Debug("Tunnel status") Debug("Tunnel status")
} }
delete(n.pendingDeletion, hostinfo.localIndexId) hostinfo.pendingDeletion.Store(false)
if mainHostInfo { if mainHostInfo {
decision = tryRehandshake decision = tryRehandshake
} else { } else {
if n.shouldSwapPrimary(hostinfo, primary) { if cm.shouldSwapPrimary(hostinfo, primary) {
decision = swapPrimary decision = swapPrimary
} else { } else {
// migrate the relays to the primary, if in use. // migrate the relays to the primary, if in use.
@@ -363,46 +364,55 @@ func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time
} }
} }
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval) cm.trafficTimer.Add(hostinfo.localIndexId, cm.checkInterval)
if !outTraffic { if !outTraffic {
// Send a punch packet to keep the NAT state alive // Send a punch packet to keep the NAT state alive
n.sendPunch(hostinfo) cm.sendPunch(hostinfo)
} }
return decision, hostinfo, primary return decision, hostinfo, primary
} }
if _, ok := n.pendingDeletion[hostinfo.localIndexId]; ok { if hostinfo.pendingDeletion.Load() {
// We have already sent a test packet and nothing was returned, this hostinfo is dead // We have already sent a test packet and nothing was returned, this hostinfo is dead
hostinfo.logger(n.l). hostinfo.logger(cm.l).
WithField("tunnelCheck", m{"state": "dead", "method": "active"}). WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
Info("Tunnel status") Info("Tunnel status")
delete(n.pendingDeletion, hostinfo.localIndexId)
return deleteTunnel, hostinfo, nil return deleteTunnel, hostinfo, nil
} }
decision := doNothing decision := doNothing
if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo { if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo {
if !outTraffic { if !outTraffic {
inactiveFor, isInactive := cm.isInactive(hostinfo, now)
if isInactive {
// Tunnel is inactive, tear it down
hostinfo.logger(cm.l).
WithField("inactiveDuration", inactiveFor).
WithField("primary", mainHostInfo).
Info("Dropping tunnel due to inactivity")
return closeTunnel, hostinfo, primary
}
// If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel. // If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel.
// Just maintain NAT state if configured to do so. // Just maintain NAT state if configured to do so.
n.sendPunch(hostinfo) cm.sendPunch(hostinfo)
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval) cm.trafficTimer.Add(hostinfo.localIndexId, cm.checkInterval)
return doNothing, nil, nil return doNothing, nil, nil
} }
if n.punchy.GetTargetEverything() { if cm.punchy.GetTargetEverything() {
// This is similar to the old punchy behavior with a slight optimization. // This is similar to the old punchy behavior with a slight optimization.
// We aren't receiving traffic but we are sending it, punch on all known // We aren't receiving traffic but we are sending it, punch on all known
// ips in case we need to re-prime NAT state // ips in case we need to re-prime NAT state
n.sendPunch(hostinfo) cm.sendPunch(hostinfo)
} }
if n.l.Level >= logrus.DebugLevel { if cm.l.Level >= logrus.DebugLevel {
hostinfo.logger(n.l). hostinfo.logger(cm.l).
WithField("tunnelCheck", m{"state": "testing", "method": "active"}). WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
Debug("Tunnel status") Debug("Tunnel status")
} }
@@ -411,17 +421,33 @@ func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time
decision = sendTestPacket decision = sendTestPacket
} else { } else {
if n.l.Level >= logrus.DebugLevel { if cm.l.Level >= logrus.DebugLevel {
hostinfo.logger(n.l).Debugf("Hostinfo sadness") hostinfo.logger(cm.l).Debugf("Hostinfo sadness")
} }
} }
n.pendingDeletion[hostinfo.localIndexId] = struct{}{} hostinfo.pendingDeletion.Store(true)
n.trafficTimer.Add(hostinfo.localIndexId, n.pendingDeletionInterval) cm.trafficTimer.Add(hostinfo.localIndexId, cm.pendingDeletionInterval)
return decision, hostinfo, nil return decision, hostinfo, nil
} }
func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool { func (cm *connectionManager) isInactive(hostinfo *HostInfo, now time.Time) (time.Duration, bool) {
if cm.dropInactive.Load() == false {
// We aren't configured to drop inactive tunnels
return 0, false
}
inactiveDuration := now.Sub(hostinfo.lastUsed)
if inactiveDuration < cm.getInactivityTimeout() {
// It's not considered inactive
return inactiveDuration, false
}
// The tunnel is inactive
return inactiveDuration, true
}
func (cm *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
// The primary tunnel is the most recent handshake to complete locally and should work entirely fine. // The primary tunnel is the most recent handshake to complete locally and should work entirely fine.
// If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary. // If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary.
// Let's sort this out. // Let's sort this out.
@@ -429,73 +455,80 @@ func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
// Only one side should swap because if both swap then we may never resolve to a single tunnel. // Only one side should swap because if both swap then we may never resolve to a single tunnel.
// vpn addr is static across all tunnels for this host pair so lets // vpn addr is static across all tunnels for this host pair so lets
// use that to determine if we should consider swapping. // use that to determine if we should consider swapping.
if current.vpnAddrs[0].Compare(n.intf.myVpnAddrs[0]) < 0 { if current.vpnAddrs[0].Compare(cm.intf.myVpnAddrs[0]) < 0 {
// Their primary vpn addr is less than mine. Do not swap. // Their primary vpn addr is less than mine. Do not swap.
return false return false
} }
crt := n.intf.pki.getCertState().getCertificate(current.ConnectionState.myCert.Version()) crt := cm.intf.pki.getCertState().getCertificate(current.ConnectionState.myCert.Version())
// If this tunnel is using the latest certificate then we should swap it to primary for a bit and see if things // If this tunnel is using the latest certificate then we should swap it to primary for a bit and see if things
// settle down. // settle down.
return bytes.Equal(current.ConnectionState.myCert.Signature(), crt.Signature()) return bytes.Equal(current.ConnectionState.myCert.Signature(), crt.Signature())
} }
func (n *connectionManager) swapPrimary(current, primary *HostInfo) { func (cm *connectionManager) swapPrimary(current, primary *HostInfo) {
n.hostMap.Lock() cm.hostMap.Lock()
// Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake. // Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake.
if n.hostMap.Hosts[current.vpnAddrs[0]] == primary { if cm.hostMap.Hosts[current.vpnAddrs[0]] == primary {
n.hostMap.unlockedMakePrimary(current) cm.hostMap.unlockedMakePrimary(current)
} }
n.hostMap.Unlock() cm.hostMap.Unlock()
} }
// isInvalidCertificate will check if we should destroy a tunnel if pki.disconnect_invalid is true and // isInvalidCertificate will check if we should destroy a tunnel if pki.disconnect_invalid is true and
// the certificate is no longer valid. Block listed certificates will skip the pki.disconnect_invalid // the certificate is no longer valid. Block listed certificates will skip the pki.disconnect_invalid
// check and return true. // check and return true.
func (n *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool { func (cm *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool {
remoteCert := hostinfo.GetCert() remoteCert := hostinfo.GetCert()
if remoteCert == nil { if remoteCert == nil {
return false return false
} }
caPool := n.intf.pki.GetCAPool() caPool := cm.intf.pki.GetCAPool()
err := caPool.VerifyCachedCertificate(now, remoteCert) err := caPool.VerifyCachedCertificate(now, remoteCert)
if err == nil { if err == nil {
return false return false
} }
if !n.intf.disconnectInvalid.Load() && err != cert.ErrBlockListed { if !cm.intf.disconnectInvalid.Load() && err != cert.ErrBlockListed {
// Block listed certificates should always be disconnected // Block listed certificates should always be disconnected
return false return false
} }
hostinfo.logger(n.l).WithError(err). hostinfo.logger(cm.l).WithError(err).
WithField("fingerprint", remoteCert.Fingerprint). WithField("fingerprint", remoteCert.Fingerprint).
Info("Remote certificate is no longer valid, tearing down the tunnel") Info("Remote certificate is no longer valid, tearing down the tunnel")
return true return true
} }
func (n *connectionManager) sendPunch(hostinfo *HostInfo) { func (cm *connectionManager) sendPunch(hostinfo *HostInfo) {
if !n.punchy.GetPunch() { if !cm.punchy.GetPunch() {
// Punching is disabled // Punching is disabled
return return
} }
if n.punchy.GetTargetEverything() { if cm.intf.lightHouse.IsAnyLighthouseAddr(hostinfo.vpnAddrs) {
hostinfo.remotes.ForEach(n.hostMap.GetPreferredRanges(), func(addr netip.AddrPort, preferred bool) { // Do not punch to lighthouses, we assume our lighthouse update interval is good enough.
n.metricsTxPunchy.Inc(1) // In the event the update interval is not sufficient to maintain NAT state then a publicly available lighthouse
n.intf.outside.WriteTo([]byte{1}, addr) // would lose the ability to notify us and punchy.respond would become unreliable.
return
}
if cm.punchy.GetTargetEverything() {
hostinfo.remotes.ForEach(cm.hostMap.GetPreferredRanges(), func(addr netip.AddrPort, preferred bool) {
cm.metricsTxPunchy.Inc(1)
cm.intf.outside.WriteTo([]byte{1}, addr)
}) })
} else if hostinfo.remote.IsValid() { } else if hostinfo.remote.IsValid() {
n.metricsTxPunchy.Inc(1) cm.metricsTxPunchy.Inc(1)
n.intf.outside.WriteTo([]byte{1}, hostinfo.remote) cm.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
} }
} }
func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) { func (cm *connectionManager) tryRehandshake(hostinfo *HostInfo) {
cs := n.intf.pki.getCertState() cs := cm.intf.pki.getCertState()
curCrt := hostinfo.ConnectionState.myCert curCrt := hostinfo.ConnectionState.myCert
myCrt := cs.getCertificate(curCrt.Version()) myCrt := cs.getCertificate(curCrt.Version())
if curCrt.Version() >= cs.initiatingVersion && bytes.Equal(curCrt.Signature(), myCrt.Signature()) == true { if curCrt.Version() >= cs.initiatingVersion && bytes.Equal(curCrt.Signature(), myCrt.Signature()) == true {
@@ -503,9 +536,9 @@ func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) {
return return
} }
n.l.WithField("vpnAddrs", hostinfo.vpnAddrs). cm.l.WithField("vpnAddrs", hostinfo.vpnAddrs).
WithField("reason", "local certificate is not current"). WithField("reason", "local certificate is not current").
Info("Re-handshaking with remote") Info("Re-handshaking with remote")
n.intf.handshakeManager.StartHandshake(hostinfo.vpnAddrs[0], nil) cm.intf.handshakeManager.StartHandshake(hostinfo.vpnAddrs[0], nil)
} }

View File

@@ -1,7 +1,6 @@
package nebula package nebula
import ( import (
"context"
"crypto/ed25519" "crypto/ed25519"
"crypto/rand" "crypto/rand"
"net/netip" "net/netip"
@@ -64,10 +63,10 @@ func Test_NewConnectionManagerTest(t *testing.T) {
ifce.pki.cs.Store(cs) ifce.pki.cs.Store(cs)
// Create manager // Create manager
ctx, cancel := context.WithCancel(context.Background()) conf := config.NewC(l)
defer cancel() punchy := NewPunchyFromConfig(l, conf)
punchy := NewPunchyFromConfig(l, config.NewC(l)) nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy) nc.intf = ifce
p := []byte("") p := []byte("")
nb := make([]byte, 12, 12) nb := make([]byte, 12, 12)
out := make([]byte, mtu) out := make([]byte, mtu)
@@ -85,32 +84,33 @@ func Test_NewConnectionManagerTest(t *testing.T) {
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce) nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
// We saw traffic out to vpnIp // We saw traffic out to vpnIp
nc.Out(hostinfo.localIndexId) nc.Out(hostinfo)
nc.In(hostinfo.localIndexId) nc.In(hostinfo)
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId) assert.False(t, hostinfo.pendingDeletion.Load())
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0]) assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId) assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.Contains(t, nc.out, hostinfo.localIndexId) assert.True(t, hostinfo.out.Load())
assert.True(t, hostinfo.in.Load())
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded // Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now()) nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId) assert.False(t, hostinfo.pendingDeletion.Load())
assert.NotContains(t, nc.out, hostinfo.localIndexId) assert.False(t, hostinfo.out.Load())
assert.NotContains(t, nc.in, hostinfo.localIndexId) assert.False(t, hostinfo.in.Load())
// Do another traffic check tick, this host should be pending deletion now // Do another traffic check tick, this host should be pending deletion now
nc.Out(hostinfo.localIndexId) nc.Out(hostinfo)
assert.True(t, hostinfo.out.Load())
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now()) nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId) assert.True(t, hostinfo.pendingDeletion.Load())
assert.NotContains(t, nc.out, hostinfo.localIndexId) assert.False(t, hostinfo.out.Load())
assert.NotContains(t, nc.in, hostinfo.localIndexId) assert.False(t, hostinfo.in.Load())
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId) assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0]) assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
// Do a final traffic check tick, the host should now be removed // Do a final traffic check tick, the host should now be removed
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now()) nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId) assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs)
assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId) assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
} }
@@ -146,10 +146,10 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
ifce.pki.cs.Store(cs) ifce.pki.cs.Store(cs)
// Create manager // Create manager
ctx, cancel := context.WithCancel(context.Background()) conf := config.NewC(l)
defer cancel() punchy := NewPunchyFromConfig(l, conf)
punchy := NewPunchyFromConfig(l, config.NewC(l)) nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy) nc.intf = ifce
p := []byte("") p := []byte("")
nb := make([]byte, 12, 12) nb := make([]byte, 12, 12)
out := make([]byte, mtu) out := make([]byte, mtu)
@@ -167,33 +167,129 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce) nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
// We saw traffic out to vpnIp // We saw traffic out to vpnIp
nc.Out(hostinfo.localIndexId) nc.Out(hostinfo)
nc.In(hostinfo.localIndexId) nc.In(hostinfo)
assert.NotContains(t, nc.pendingDeletion, hostinfo.vpnAddrs[0]) assert.True(t, hostinfo.in.Load())
assert.True(t, hostinfo.out.Load())
assert.False(t, hostinfo.pendingDeletion.Load())
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0]) assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId) assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded // Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now()) nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId) assert.False(t, hostinfo.pendingDeletion.Load())
assert.NotContains(t, nc.out, hostinfo.localIndexId) assert.False(t, hostinfo.out.Load())
assert.NotContains(t, nc.in, hostinfo.localIndexId) assert.False(t, hostinfo.in.Load())
// Do another traffic check tick, this host should be pending deletion now // Do another traffic check tick, this host should be pending deletion now
nc.Out(hostinfo.localIndexId) nc.Out(hostinfo)
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now()) nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId) assert.True(t, hostinfo.pendingDeletion.Load())
assert.NotContains(t, nc.out, hostinfo.localIndexId) assert.False(t, hostinfo.out.Load())
assert.NotContains(t, nc.in, hostinfo.localIndexId) assert.False(t, hostinfo.in.Load())
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId) assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0]) assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
// We saw traffic, should no longer be pending deletion // We saw traffic, should no longer be pending deletion
nc.In(hostinfo.localIndexId) nc.In(hostinfo)
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now()) nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId) assert.False(t, hostinfo.pendingDeletion.Load())
assert.NotContains(t, nc.out, hostinfo.localIndexId) assert.False(t, hostinfo.out.Load())
assert.NotContains(t, nc.in, hostinfo.localIndexId) assert.False(t, hostinfo.in.Load())
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
}
func Test_NewConnectionManager_DisconnectInactive(t *testing.T) {
l := test.NewLogger()
localrange := netip.MustParsePrefix("10.1.1.1/24")
vpnAddrs := []netip.Addr{netip.MustParseAddr("172.1.1.2")}
preferredRanges := []netip.Prefix{localrange}
// Very incomplete mock objects
hostMap := newHostMap(l)
hostMap.preferredRanges.Store(&preferredRanges)
cs := &CertState{
initiatingVersion: cert.Version1,
privateKey: []byte{},
v1Cert: &dummyCert{version: cert.Version1},
v1HandshakeBytes: []byte{},
}
lh := newTestLighthouse()
ifce := &Interface{
hostMap: hostMap,
inside: &test.NoopTun{},
outside: &udp.NoopConn{},
firewall: &Firewall{},
lightHouse: lh,
pki: &PKI{},
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
l: l,
}
ifce.pki.cs.Store(cs)
// Create manager
conf := config.NewC(l)
conf.Settings["tunnels"] = map[string]any{
"drop_inactive": true,
}
punchy := NewPunchyFromConfig(l, conf)
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
assert.True(t, nc.dropInactive.Load())
nc.intf = ifce
// Add an ip we have established a connection w/ to hostmap
hostinfo := &HostInfo{
vpnAddrs: vpnAddrs,
localIndexId: 1099,
remoteIndexId: 9901,
}
hostinfo.ConnectionState = &ConnectionState{
myCert: &dummyCert{version: cert.Version1},
H: &noise.HandshakeState{},
}
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
// Do a traffic check tick, in and out should be cleared but should not be pending deletion
nc.Out(hostinfo)
nc.In(hostinfo)
assert.True(t, hostinfo.out.Load())
assert.True(t, hostinfo.in.Load())
now := time.Now()
decision, _, _ := nc.makeTrafficDecision(hostinfo.localIndexId, now)
assert.Equal(t, tryRehandshake, decision)
assert.Equal(t, now, hostinfo.lastUsed)
assert.False(t, hostinfo.pendingDeletion.Load())
assert.False(t, hostinfo.out.Load())
assert.False(t, hostinfo.in.Load())
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Second*5))
assert.Equal(t, doNothing, decision)
assert.Equal(t, now, hostinfo.lastUsed)
assert.False(t, hostinfo.pendingDeletion.Load())
assert.False(t, hostinfo.out.Load())
assert.False(t, hostinfo.in.Load())
// Do another traffic check tick, should still not be pending deletion
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Second*10))
assert.Equal(t, doNothing, decision)
assert.Equal(t, now, hostinfo.lastUsed)
assert.False(t, hostinfo.pendingDeletion.Load())
assert.False(t, hostinfo.out.Load())
assert.False(t, hostinfo.in.Load())
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
// Finally advance beyond the inactivity timeout
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Minute*10))
assert.Equal(t, closeTunnel, decision)
assert.Equal(t, now, hostinfo.lastUsed)
assert.False(t, hostinfo.pendingDeletion.Load())
assert.False(t, hostinfo.out.Load())
assert.False(t, hostinfo.in.Load())
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId) assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0]) assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
} }
@@ -264,10 +360,10 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
ifce.disconnectInvalid.Store(true) ifce.disconnectInvalid.Store(true)
// Create manager // Create manager
ctx, cancel := context.WithCancel(context.Background()) conf := config.NewC(l)
defer cancel() punchy := NewPunchyFromConfig(l, conf)
punchy := NewPunchyFromConfig(l, config.NewC(l)) nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy) nc.intf = ifce
ifce.connectionManager = nc ifce.connectionManager = nc
hostinfo := &HostInfo{ hostinfo := &HostInfo{

View File

@@ -26,14 +26,15 @@ type controlHostLister interface {
} }
type Control struct { type Control struct {
f *Interface f *Interface
l *logrus.Logger l *logrus.Logger
ctx context.Context ctx context.Context
cancel context.CancelFunc cancel context.CancelFunc
sshStart func() sshStart func()
statsStart func() statsStart func()
dnsStart func() dnsStart func()
lighthouseStart func() lighthouseStart func()
connectionManagerStart func(context.Context)
} }
type ControlHostInfo struct { type ControlHostInfo struct {
@@ -63,6 +64,9 @@ func (c *Control) Start() {
if c.dnsStart != nil { if c.dnsStart != nil {
go c.dnsStart() go c.dnsStart()
} }
if c.connectionManagerStart != nil {
go c.connectionManagerStart(c.ctx)
}
if c.lighthouseStart != nil { if c.lighthouseStart != nil {
c.lighthouseStart() c.lighthouseStart()
} }

View File

@@ -53,7 +53,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
localIndexId: 201, localIndexId: 201,
vpnAddrs: []netip.Addr{vpnIp}, vpnAddrs: []netip.Addr{vpnIp},
relayState: RelayState{ relayState: RelayState{
relays: map[netip.Addr]struct{}{}, relays: nil,
relayForByAddr: map[netip.Addr]*Relay{}, relayForByAddr: map[netip.Addr]*Relay{},
relayForByIdx: map[uint32]*Relay{}, relayForByIdx: map[uint32]*Relay{},
}, },
@@ -72,7 +72,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
localIndexId: 201, localIndexId: 201,
vpnAddrs: []netip.Addr{vpnIp2}, vpnAddrs: []netip.Addr{vpnIp2},
relayState: RelayState{ relayState: RelayState{
relays: map[netip.Addr]struct{}{}, relays: nil,
relayForByAddr: map[netip.Addr]*Relay{}, relayForByAddr: map[netip.Addr]*Relay{},
relayForByIdx: map[uint32]*Relay{}, relayForByIdx: map[uint32]*Relay{},
}, },

View File

@@ -506,7 +506,7 @@ func TestReestablishRelays(t *testing.T) {
curIndexes := len(myControl.GetHostmap().Indexes) curIndexes := len(myControl.GetHostmap().Indexes)
for curIndexes >= start { for curIndexes >= start {
curIndexes = len(myControl.GetHostmap().Indexes) curIndexes = len(myControl.GetHostmap().Indexes)
r.Logf("Wait for the dead index to go away:start=%v indexes, currnet=%v indexes", start, curIndexes) r.Logf("Wait for the dead index to go away:start=%v indexes, current=%v indexes", start, curIndexes)
myControl.InjectTunUDPPacket(theirVpnIpNet[0].Addr(), 80, myVpnIpNet[0].Addr(), 80, []byte("Hi from me should fail")) myControl.InjectTunUDPPacket(theirVpnIpNet[0].Addr(), 80, myVpnIpNet[0].Addr(), 80, []byte("Hi from me should fail"))
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType { r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
@@ -1052,6 +1052,9 @@ func TestRehandshakingLoser(t *testing.T) {
t.Log("Stand up a tunnel between me and them") t.Log("Stand up a tunnel between me and them")
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r) assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
myControl.GetHostInfoByVpnAddr(theirVpnIpNet[0].Addr(), false)
theirControl.GetHostInfoByVpnAddr(myVpnIpNet[0].Addr(), false)
r.RenderHostmaps("Starting hostmaps", myControl, theirControl) r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
r.Log("Renew their certificate and spin until mine sees it") r.Log("Renew their certificate and spin until mine sees it")

View File

@@ -700,6 +700,7 @@ func (r *R) FlushAll() {
r.Unlock() r.Unlock()
panic("Can't FlushAll for host: " + p.To.String()) panic("Can't FlushAll for host: " + p.To.String())
} }
receiver.InjectUDPPacket(p)
r.Unlock() r.Unlock()
} }
} }

57
e2e/tunnels_test.go Normal file
View File

@@ -0,0 +1,57 @@
//go:build e2e_testing
// +build e2e_testing
package e2e
import (
"testing"
"time"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/cert_test"
"github.com/slackhq/nebula/e2e/router"
)
func TestDropInactiveTunnels(t *testing.T) {
// The goal of this test is to ensure the shortest inactivity timeout will close the tunnel on both sides
// under ideal conditions
ca, _, caKey, _ := cert_test.NewTestCaCert(cert.Version1, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(cert.Version1, ca, caKey, "me", "10.128.0.1/24", m{"tunnels": m{"drop_inactive": true, "inactivity_timeout": "5s"}})
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(cert.Version1, ca, caKey, "them", "10.128.0.2/24", m{"tunnels": m{"drop_inactive": true, "inactivity_timeout": "10m"}})
// Share our underlay information
myControl.InjectLightHouseAddr(theirVpnIpNet[0].Addr(), theirUdpAddr)
theirControl.InjectLightHouseAddr(myVpnIpNet[0].Addr(), myUdpAddr)
// Start the servers
myControl.Start()
theirControl.Start()
r := router.NewR(t, myControl, theirControl)
r.Log("Assert the tunnel between me and them works")
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
r.Log("Go inactive and wait for the tunnels to get dropped")
waitStart := time.Now()
for {
myIndexes := len(myControl.GetHostmap().Indexes)
theirIndexes := len(theirControl.GetHostmap().Indexes)
if myIndexes == 0 && theirIndexes == 0 {
break
}
since := time.Since(waitStart)
r.Logf("my tunnels: %v; their tunnels: %v; duration: %v", myIndexes, theirIndexes, since)
if since > time.Second*30 {
t.Fatal("Tunnel should have been declared inactive after 5 seconds and before 30 seconds")
}
time.Sleep(1 * time.Second)
r.FlushAll()
}
r.Logf("Inactive tunnels were dropped within %v", time.Since(waitStart))
myControl.Stop()
theirControl.Stop()
}

View File

@@ -338,6 +338,18 @@ logging:
# after receiving the response for lighthouse queries # after receiving the response for lighthouse queries
#trigger_buffer: 64 #trigger_buffer: 64
# Tunnel manager settings
#tunnels:
# drop_inactive controls whether inactive tunnels are maintained or dropped after the inactive_timeout period has
# elapsed.
# In general, it is a good idea to enable this setting. It will be enabled by default in a future release.
# This setting is reloadable
#drop_inactive: false
# inactivity_timeout controls how long a tunnel MUST NOT see any inbound or outbound traffic before being considered
# inactive and eligible to be dropped.
# This setting is reloadable
#inactivity_timeout: 10m
# Nebula security group configuration # Nebula security group configuration
firewall: firewall:

8
go.mod
View File

@@ -1,11 +1,11 @@
module github.com/slackhq/nebula module github.com/slackhq/nebula
go 1.23.0 go 1.24.0
toolchain go1.24.1 toolchain go1.24.1
require ( require (
dario.cat/mergo v1.0.1 dario.cat/mergo v1.0.2
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
github.com/armon/go-radix v1.0.0 github.com/armon/go-radix v1.0.0
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
@@ -23,7 +23,7 @@ require (
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6
github.com/stretchr/testify v1.10.0 github.com/stretchr/testify v1.10.0
github.com/vishvananda/netlink v1.3.0 github.com/vishvananda/netlink v1.3.1
golang.org/x/crypto v0.37.0 golang.org/x/crypto v0.37.0
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 golang.org/x/exp v0.0.0-20230725093048-515e97ebf090
golang.org/x/net v0.39.0 golang.org/x/net v0.39.0
@@ -48,7 +48,7 @@ require (
github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect
github.com/vishvananda/netns v0.0.4 // indirect github.com/vishvananda/netns v0.0.5 // indirect
golang.org/x/mod v0.23.0 // indirect golang.org/x/mod v0.23.0 // indirect
golang.org/x/time v0.5.0 // indirect golang.org/x/time v0.5.0 // indirect
golang.org/x/tools v0.30.0 // indirect golang.org/x/tools v0.30.0 // indirect

12
go.sum
View File

@@ -1,6 +1,6 @@
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s= dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@@ -145,10 +145,10 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk= github.com/vishvananda/netlink v1.3.1 h1:3AEMt62VKqz90r0tmNhog0r/PpWKmrEShJU0wJW6bV0=
github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs= github.com/vishvananda/netlink v1.3.1/go.mod h1:ARtKouGSTGchR8aMwmkzC0qiNPrrWO5JS/XMVl45+b4=
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY=
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=

View File

@@ -249,7 +249,7 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
HandshakePacket: make(map[uint8][]byte, 0), HandshakePacket: make(map[uint8][]byte, 0),
lastHandshakeTime: hs.Details.Time, lastHandshakeTime: hs.Details.Time,
relayState: RelayState{ relayState: RelayState{
relays: map[netip.Addr]struct{}{}, relays: nil,
relayForByAddr: map[netip.Addr]*Relay{}, relayForByAddr: map[netip.Addr]*Relay{},
relayForByIdx: map[uint32]*Relay{}, relayForByIdx: map[uint32]*Relay{},
}, },
@@ -457,7 +457,7 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
Info("Handshake message sent") Info("Handshake message sent")
} }
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId) f.connectionManager.AddTrafficWatch(hostinfo)
hostinfo.remotes.ResetBlockedRemotes() hostinfo.remotes.ResetBlockedRemotes()
@@ -652,7 +652,7 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
// Complete our handshake and update metrics, this will replace any existing tunnels for the vpnAddrs here // Complete our handshake and update metrics, this will replace any existing tunnels for the vpnAddrs here
f.handshakeManager.Complete(hostinfo, f) f.handshakeManager.Complete(hostinfo, f)
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId) f.connectionManager.AddTrafficWatch(hostinfo)
if f.l.Level >= logrus.DebugLevel { if f.l.Level >= logrus.DebugLevel {
hostinfo.logger(f.l).Debugf("Sending %d stored packets", len(hh.packetStore)) hostinfo.logger(f.l).Debugf("Sending %d stored packets", len(hh.packetStore))

View File

@@ -450,7 +450,7 @@ func (hm *HandshakeManager) StartHandshake(vpnAddr netip.Addr, cacheCb func(*Han
vpnAddrs: []netip.Addr{vpnAddr}, vpnAddrs: []netip.Addr{vpnAddr},
HandshakePacket: make(map[uint8][]byte, 0), HandshakePacket: make(map[uint8][]byte, 0),
relayState: RelayState{ relayState: RelayState{
relays: map[netip.Addr]struct{}{}, relays: nil,
relayForByAddr: map[netip.Addr]*Relay{}, relayForByAddr: map[netip.Addr]*Relay{},
relayForByIdx: map[uint32]*Relay{}, relayForByIdx: map[uint32]*Relay{},
}, },

View File

@@ -4,6 +4,7 @@ import (
"errors" "errors"
"net" "net"
"net/netip" "net/netip"
"slices"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@@ -68,7 +69,7 @@ type HostMap struct {
type RelayState struct { type RelayState struct {
sync.RWMutex sync.RWMutex
relays map[netip.Addr]struct{} // Set of vpnAddr's of Hosts to use as relays to access this peer relays []netip.Addr // Ordered set of VpnAddrs of Hosts to use as relays to access this peer
// For data race avoidance, the contents of a *Relay are treated immutably. To update a *Relay, copy the existing data, // For data race avoidance, the contents of a *Relay are treated immutably. To update a *Relay, copy the existing data,
// modify what needs to be updated, and store the new modified copy in the relayForByIp and relayForByIdx maps (with // modify what needs to be updated, and store the new modified copy in the relayForByIp and relayForByIdx maps (with
// the RelayState Lock held) // the RelayState Lock held)
@@ -79,7 +80,12 @@ type RelayState struct {
func (rs *RelayState) DeleteRelay(ip netip.Addr) { func (rs *RelayState) DeleteRelay(ip netip.Addr) {
rs.Lock() rs.Lock()
defer rs.Unlock() defer rs.Unlock()
delete(rs.relays, ip) for idx, val := range rs.relays {
if val == ip {
rs.relays = append(rs.relays[:idx], rs.relays[idx+1:]...)
return
}
}
} }
func (rs *RelayState) UpdateRelayForByIpState(vpnIp netip.Addr, state int) { func (rs *RelayState) UpdateRelayForByIpState(vpnIp netip.Addr, state int) {
@@ -124,16 +130,16 @@ func (rs *RelayState) GetRelayForByAddr(addr netip.Addr) (*Relay, bool) {
func (rs *RelayState) InsertRelayTo(ip netip.Addr) { func (rs *RelayState) InsertRelayTo(ip netip.Addr) {
rs.Lock() rs.Lock()
defer rs.Unlock() defer rs.Unlock()
rs.relays[ip] = struct{}{} if !slices.Contains(rs.relays, ip) {
rs.relays = append(rs.relays, ip)
}
} }
func (rs *RelayState) CopyRelayIps() []netip.Addr { func (rs *RelayState) CopyRelayIps() []netip.Addr {
ret := make([]netip.Addr, len(rs.relays))
rs.RLock() rs.RLock()
defer rs.RUnlock() defer rs.RUnlock()
ret := make([]netip.Addr, 0, len(rs.relays)) copy(ret, rs.relays)
for ip := range rs.relays {
ret = append(ret, ip)
}
return ret return ret
} }
@@ -250,6 +256,14 @@ type HostInfo struct {
// Used to track other hostinfos for this vpn ip since only 1 can be primary // Used to track other hostinfos for this vpn ip since only 1 can be primary
// Synchronised via hostmap lock and not the hostinfo lock. // Synchronised via hostmap lock and not the hostinfo lock.
next, prev *HostInfo next, prev *HostInfo
//TODO: in, out, and others might benefit from being an atomic.Int32. We could collapse connectionManager pendingDeletion, relayUsed, and in/out into this 1 thing
in, out, pendingDeletion atomic.Bool
// lastUsed tracks the last time ConnectionManager checked the tunnel and it was in use.
// This value will be behind against actual tunnel utilization in the hot path.
// This should only be used by the ConnectionManagers ticker routine.
lastUsed time.Time
} }
type ViaSender struct { type ViaSender struct {

View File

@@ -7,6 +7,7 @@ import (
"github.com/slackhq/nebula/config" "github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/test" "github.com/slackhq/nebula/test"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestHostMap_MakePrimary(t *testing.T) { func TestHostMap_MakePrimary(t *testing.T) {
@@ -215,3 +216,31 @@ func TestHostMap_reload(t *testing.T) {
c.ReloadConfigString("preferred_ranges: [1.1.1.1/32]") c.ReloadConfigString("preferred_ranges: [1.1.1.1/32]")
assert.Equal(t, []string{"1.1.1.1/32"}, toS(hm.GetPreferredRanges())) assert.Equal(t, []string{"1.1.1.1/32"}, toS(hm.GetPreferredRanges()))
} }
func TestHostMap_RelayState(t *testing.T) {
h1 := &HostInfo{vpnAddrs: []netip.Addr{netip.MustParseAddr("0.0.0.1")}, localIndexId: 1}
a1 := netip.MustParseAddr("::1")
a2 := netip.MustParseAddr("2001::1")
h1.relayState.InsertRelayTo(a1)
assert.Equal(t, []netip.Addr{a1}, h1.relayState.relays)
h1.relayState.InsertRelayTo(a2)
assert.Equal(t, []netip.Addr{a1, a2}, h1.relayState.relays)
// Ensure that the first relay added is the first one returned in the copy
currentRelays := h1.relayState.CopyRelayIps()
require.Len(t, currentRelays, 2)
assert.Equal(t, a1, currentRelays[0])
// Deleting the last one in the list works ok
h1.relayState.DeleteRelay(a2)
assert.Equal(t, []netip.Addr{a1}, h1.relayState.relays)
// Deleting an element not in the list works ok
h1.relayState.DeleteRelay(a2)
assert.Equal(t, []netip.Addr{a1}, h1.relayState.relays)
// Deleting the only element in the list works ok
h1.relayState.DeleteRelay(a1)
assert.Equal(t, []netip.Addr{}, h1.relayState.relays)
}

View File

@@ -288,7 +288,7 @@ func (f *Interface) SendVia(via *HostInfo,
c := via.ConnectionState.messageCounter.Add(1) c := via.ConnectionState.messageCounter.Add(1)
out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c) out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c)
f.connectionManager.Out(via.localIndexId) f.connectionManager.Out(via)
// Authenticate the header and payload, but do not encrypt for this message type. // Authenticate the header and payload, but do not encrypt for this message type.
// The payload consists of the inner, unencrypted Nebula header, as well as the end-to-end encrypted payload. // The payload consists of the inner, unencrypted Nebula header, as well as the end-to-end encrypted payload.
@@ -356,7 +356,7 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
//l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p) //l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p)
out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c) out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c)
f.connectionManager.Out(hostinfo.localIndexId) f.connectionManager.Out(hostinfo)
// Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against // Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against
// all our addrs and enable a faster roaming. // all our addrs and enable a faster roaming.

View File

@@ -2,6 +2,7 @@ package nebula
import ( import (
"context" "context"
"crypto/fips140"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@@ -24,23 +25,23 @@ import (
const mtu = 9001 const mtu = 9001
type InterfaceConfig struct { type InterfaceConfig struct {
HostMap *HostMap HostMap *HostMap
Outside udp.Conn Outside udp.Conn
Inside overlay.Device Inside overlay.Device
pki *PKI pki *PKI
Firewall *Firewall Cipher string
ServeDns bool Firewall *Firewall
HandshakeManager *HandshakeManager ServeDns bool
lightHouse *LightHouse HandshakeManager *HandshakeManager
checkInterval time.Duration lightHouse *LightHouse
pendingDeletionInterval time.Duration connectionManager *connectionManager
DropLocalBroadcast bool DropLocalBroadcast bool
DropMulticast bool DropMulticast bool
routines int routines int
MessageMetrics *MessageMetrics MessageMetrics *MessageMetrics
version string version string
relayManager *relayManager relayManager *relayManager
punchy *Punchy punchy *Punchy
tryPromoteEvery uint32 tryPromoteEvery uint32
reQueryEvery uint32 reQueryEvery uint32
@@ -157,6 +158,9 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
if c.Firewall == nil { if c.Firewall == nil {
return nil, errors.New("no firewall rules") return nil, errors.New("no firewall rules")
} }
if c.connectionManager == nil {
return nil, errors.New("no connection manager")
}
cs := c.pki.getCertState() cs := c.pki.getCertState()
ifce := &Interface{ ifce := &Interface{
@@ -181,7 +185,7 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
myVpnAddrsTable: cs.myVpnAddrsTable, myVpnAddrsTable: cs.myVpnAddrsTable,
myBroadcastAddrsTable: cs.myVpnBroadcastAddrsTable, myBroadcastAddrsTable: cs.myVpnBroadcastAddrsTable,
relayManager: c.relayManager, relayManager: c.relayManager,
connectionManager: c.connectionManager,
conntrackCacheTimeout: c.ConntrackCacheTimeout, conntrackCacheTimeout: c.ConntrackCacheTimeout,
metricHandshakes: metrics.GetOrRegisterHistogram("handshakes", nil, metrics.NewExpDecaySample(1028, 0.015)), metricHandshakes: metrics.GetOrRegisterHistogram("handshakes", nil, metrics.NewExpDecaySample(1028, 0.015)),
@@ -198,7 +202,7 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
ifce.reQueryEvery.Store(c.reQueryEvery) ifce.reQueryEvery.Store(c.reQueryEvery)
ifce.reQueryWait.Store(int64(c.reQueryWait)) ifce.reQueryWait.Store(int64(c.reQueryWait))
ifce.connectionManager = newConnectionManager(ctx, c.l, ifce, c.checkInterval, c.pendingDeletionInterval, c.punchy) ifce.connectionManager.intf = ifce
return ifce, nil return ifce, nil
} }
@@ -217,6 +221,7 @@ func (f *Interface) activate() {
f.l.WithField("interface", f.inside.Name()).WithField("networks", f.myVpnNetworks). f.l.WithField("interface", f.inside.Name()).WithField("networks", f.myVpnNetworks).
WithField("build", f.version).WithField("udpAddr", addr). WithField("build", f.version).WithField("udpAddr", addr).
WithField("boringcrypto", boringEnabled()). WithField("boringcrypto", boringEnabled()).
WithField("fips140", fips140.Enabled()).
Info("Nebula interface is active") Info("Nebula interface is active")
metrics.GetOrRegisterGauge("routines", nil).Update(int64(f.routines)) metrics.GetOrRegisterGauge("routines", nil).Update(int64(f.routines))

45
main.go
View File

@@ -185,6 +185,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
hostMap := NewHostMapFromConfig(l, c) hostMap := NewHostMapFromConfig(l, c)
punchy := NewPunchyFromConfig(l, c) punchy := NewPunchyFromConfig(l, c)
connManager := newConnectionManagerFromConfig(l, c, hostMap, punchy)
lightHouse, err := NewLightHouseFromConfig(ctx, l, c, pki.getCertState(), udpConns[0], punchy) lightHouse, err := NewLightHouseFromConfig(ctx, l, c, pki.getCertState(), udpConns[0], punchy)
if err != nil { if err != nil {
return nil, util.ContextualizeIfNeeded("Failed to initialize lighthouse handler", err) return nil, util.ContextualizeIfNeeded("Failed to initialize lighthouse handler", err)
@@ -220,31 +221,26 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
} }
} }
checkInterval := c.GetInt("timers.connection_alive_interval", 5)
pendingDeletionInterval := c.GetInt("timers.pending_deletion_interval", 10)
ifConfig := &InterfaceConfig{ ifConfig := &InterfaceConfig{
HostMap: hostMap, HostMap: hostMap,
Inside: tun, Inside: tun,
Outside: udpConns[0], Outside: udpConns[0],
pki: pki, pki: pki,
Firewall: fw, Firewall: fw,
ServeDns: serveDns, ServeDns: serveDns,
HandshakeManager: handshakeManager, HandshakeManager: handshakeManager,
lightHouse: lightHouse, connectionManager: connManager,
checkInterval: time.Second * time.Duration(checkInterval), lightHouse: lightHouse,
pendingDeletionInterval: time.Second * time.Duration(pendingDeletionInterval), tryPromoteEvery: c.GetUint32("counters.try_promote", defaultPromoteEvery),
tryPromoteEvery: c.GetUint32("counters.try_promote", defaultPromoteEvery), reQueryEvery: c.GetUint32("counters.requery_every_packets", defaultReQueryEvery),
reQueryEvery: c.GetUint32("counters.requery_every_packets", defaultReQueryEvery), reQueryWait: c.GetDuration("timers.requery_wait_duration", defaultReQueryWait),
reQueryWait: c.GetDuration("timers.requery_wait_duration", defaultReQueryWait), DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false),
DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false), DropMulticast: c.GetBool("tun.drop_multicast", false),
DropMulticast: c.GetBool("tun.drop_multicast", false), routines: routines,
routines: routines, MessageMetrics: messageMetrics,
MessageMetrics: messageMetrics, version: buildVersion,
version: buildVersion, relayManager: NewRelayManager(ctx, l, hostMap, c),
relayManager: NewRelayManager(ctx, l, hostMap, c), punchy: punchy,
punchy: punchy,
ConntrackCacheTimeout: conntrackCacheTimeout, ConntrackCacheTimeout: conntrackCacheTimeout,
l: l, l: l,
} }
@@ -296,5 +292,6 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
statsStart, statsStart,
dnsStart, dnsStart,
lightHouse.StartUpdateWorker, lightHouse.StartUpdateWorker,
connManager.Start,
}, nil }, nil
} }

View File

@@ -25,6 +25,11 @@ func NewNebulaCipherState(s *noise.CipherState) *NebulaCipherState {
} }
type cipherAEADDanger interface {
EncryptDanger(out, ad, plaintext []byte, n uint64, nb []byte) ([]byte, error)
DecryptDanger(out, ad, plaintext []byte, n uint64, nb []byte) ([]byte, error)
}
// EncryptDanger encrypts and authenticates a given payload. // EncryptDanger encrypts and authenticates a given payload.
// //
// out is a destination slice to hold the output of the EncryptDanger operation. // out is a destination slice to hold the output of the EncryptDanger operation.
@@ -35,20 +40,25 @@ func NewNebulaCipherState(s *noise.CipherState) *NebulaCipherState {
// be re-used by callers to minimize garbage collection. // be re-used by callers to minimize garbage collection.
func (s *NebulaCipherState) EncryptDanger(out, ad, plaintext []byte, n uint64, nb []byte) ([]byte, error) { func (s *NebulaCipherState) EncryptDanger(out, ad, plaintext []byte, n uint64, nb []byte) ([]byte, error) {
if s != nil { if s != nil {
// TODO: Is this okay now that we have made messageCounter atomic? switch ce := s.c.(type) {
// Alternative may be to split the counter space into ranges case cipherAEADDanger:
//if n <= s.n { return ce.EncryptDanger(out, ad, plaintext, n, nb)
// return nil, errors.New("CRITICAL: a duplicate counter value was used") default:
//} // TODO: Is this okay now that we have made messageCounter atomic?
//s.n = n // Alternative may be to split the counter space into ranges
nb[0] = 0 //if n <= s.n {
nb[1] = 0 // return nil, errors.New("CRITICAL: a duplicate counter value was used")
nb[2] = 0 //}
nb[3] = 0 //s.n = n
noiseEndianness.PutUint64(nb[4:], n) nb[0] = 0
out = s.c.(cipher.AEAD).Seal(out, nb, plaintext, ad) nb[1] = 0
//l.Debugf("Encryption: outlen: %d, nonce: %d, ad: %s, plainlen %d", len(out), n, ad, len(plaintext)) nb[2] = 0
return out, nil nb[3] = 0
noiseEndianness.PutUint64(nb[4:], n)
out = s.c.(cipher.AEAD).Seal(out, nb, plaintext, ad)
//l.Debugf("Encryption: outlen: %d, nonce: %d, ad: %s, plainlen %d", len(out), n, ad, len(plaintext))
return out, nil
}
} else { } else {
return nil, errors.New("no cipher state available to encrypt") return nil, errors.New("no cipher state available to encrypt")
} }
@@ -56,12 +66,17 @@ func (s *NebulaCipherState) EncryptDanger(out, ad, plaintext []byte, n uint64, n
func (s *NebulaCipherState) DecryptDanger(out, ad, ciphertext []byte, n uint64, nb []byte) ([]byte, error) { func (s *NebulaCipherState) DecryptDanger(out, ad, ciphertext []byte, n uint64, nb []byte) ([]byte, error) {
if s != nil { if s != nil {
nb[0] = 0 switch ce := s.c.(type) {
nb[1] = 0 case cipherAEADDanger:
nb[2] = 0 return ce.DecryptDanger(out, ad, ciphertext, n, nb)
nb[3] = 0 default:
noiseEndianness.PutUint64(nb[4:], n) nb[0] = 0
return s.c.(cipher.AEAD).Open(out, nb, ciphertext, ad) nb[1] = 0
nb[2] = 0
nb[3] = 0
noiseEndianness.PutUint64(nb[4:], n)
return s.c.(cipher.AEAD).Open(out, nb, ciphertext, ad)
}
} else { } else {
return []byte{}, nil return []byte{}, nil
} }

78
noiseutil/fips140.go Normal file
View File

@@ -0,0 +1,78 @@
//go:build fips140v1.0
// +build fips140v1.0
package noiseutil
import (
"crypto/cipher"
"encoding/binary"
// unsafe needed for go:linkname
_ "unsafe"
"github.com/flynn/noise"
)
// EncryptLockNeeded indicates if calls to Encrypt need a lock
// This is true for fips140 because the Seal function verifies that the
// nonce is strictly increasing.
const EncryptLockNeeded = true
// TODO: Use NewGCMWithCounterNonce once available:
// - https://github.com/golang/go/issues/73110
// Using tls.aeadAESGCM gives us the TLS 1.2 GCM, which also verifies
// that the nonce is strictly increasing.
//
//go:linkname aeadAESGCM crypto/tls.aeadAESGCM
func aeadAESGCM(key, noncePrefix []byte) cipher.AEAD
type cipherFn struct {
fn func([32]byte) noise.Cipher
name string
}
func (c cipherFn) Cipher(k [32]byte) noise.Cipher { return c.fn(k) }
func (c cipherFn) CipherName() string { return c.name }
// CipherAESGCM is the AES256-GCM AEAD cipher (using aeadAESGCM when fips140 is enabled)
var CipherAESGCM noise.CipherFunc = cipherFn{cipherAESGCM, "AESGCM"}
// tls.aeadAESGCM uses a 4 byte static prefix and an 8 byte nonce
var emptyPrefix = []byte{0, 0, 0, 0}
func cipherAESGCM(k [32]byte) noise.Cipher {
gcm := aeadAESGCM(k[:], emptyPrefix)
return aeadCipher{
gcm,
func(n uint64) []byte {
// tls.aeadAESGCM uses a 4 byte static prefix and an 8 byte nonce
var nonce [8]byte
binary.BigEndian.PutUint64(nonce[:], n)
return nonce[:]
},
}
}
type aeadCipher struct {
cipher.AEAD
nonce func(uint64) []byte
}
func (c aeadCipher) Encrypt(out []byte, n uint64, ad, plaintext []byte) []byte {
return c.Seal(out, c.nonce(n), plaintext, ad)
}
func (c aeadCipher) Decrypt(out []byte, n uint64, ad, ciphertext []byte) ([]byte, error) {
return c.Open(out, c.nonce(n), ciphertext, ad)
}
func (c aeadCipher) EncryptDanger(out, ad, plaintext []byte, n uint64, nb []byte) ([]byte, error) {
binary.BigEndian.PutUint64(nb[4:], n)
out = c.Seal(out, nb[4:], plaintext, ad)
return out, nil
}
func (c aeadCipher) DecryptDanger(out, ad, ciphertext []byte, n uint64, nb []byte) ([]byte, error) {
binary.BigEndian.PutUint64(nb[4:], n)
return c.Open(out, nb[4:], ciphertext, ad)
}

42
noiseutil/fips140_test.go Normal file
View File

@@ -0,0 +1,42 @@
//go:build fips140v1.0
// +build fips140v1.0
package noiseutil
import (
"crypto/fips140"
"encoding/hex"
"log"
"testing"
"github.com/stretchr/testify/assert"
)
func TestEncryptLockNeeded(t *testing.T) {
assert.True(t, EncryptLockNeeded)
}
// Ensure NewAESGCM validates the nonce is non-repeating
func TestNewAESGCM(t *testing.T) {
assert.True(t, fips140.Enabled())
key, _ := hex.DecodeString("feffe9928665731c6d6a8f9467308308feffe9928665731c6d6a8f9467308308")
iv, _ := hex.DecodeString("00000000facedbaddecaf888")
plaintext, _ := hex.DecodeString("d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39")
aad, _ := hex.DecodeString("feedfacedeadbeeffeedfacedeadbeefabaddad2")
expected, _ := hex.DecodeString("72ce2ea385f88c20d856e9d1248c2ca08562bbe8a61459ffae06ec393540518e9b6b4c40a146053f26a3df83c5384a48d273148b15aba64d970107432b2892741359275676441c1572c3fa9e")
var keyArray [32]byte
copy(keyArray[:], key)
c := CipherAESGCM.Cipher(keyArray)
aead := c.(aeadCipher).AEAD
dst := aead.Seal([]byte{}, iv, plaintext, aad)
log.Printf("%x", dst)
assert.Equal(t, expected, dst)
// We expect this to fail since we are re-encrypting with a repeat IV
assert.PanicsWithValue(t, "crypto/cipher: counter decreased", func() {
dst = aead.Seal([]byte{}, iv, plaintext, aad)
})
}

View File

@@ -1,5 +1,5 @@
//go:build !boringcrypto //go:build !boringcrypto && !fips140v1.0
// +build !boringcrypto // +build !boringcrypto,!fips140v1.0
package noiseutil package noiseutil

View File

@@ -1,5 +1,5 @@
//go:build !boringcrypto //go:build !boringcrypto && !fips140v1.0
// +build !boringcrypto // +build !boringcrypto,!fips140v1.0
package noiseutil package noiseutil

View File

@@ -81,7 +81,7 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
// Pull the Roaming parts up here, and return in all call paths. // Pull the Roaming parts up here, and return in all call paths.
f.handleHostRoaming(hostinfo, ip) f.handleHostRoaming(hostinfo, ip)
// Track usage of both the HostInfo and the Relay for the received & authenticated packet // Track usage of both the HostInfo and the Relay for the received & authenticated packet
f.connectionManager.In(hostinfo.localIndexId) f.connectionManager.In(hostinfo)
f.connectionManager.RelayUsed(h.RemoteIndex) f.connectionManager.RelayUsed(h.RemoteIndex)
relay, ok := hostinfo.relayState.QueryRelayForByIdx(h.RemoteIndex) relay, ok := hostinfo.relayState.QueryRelayForByIdx(h.RemoteIndex)
@@ -213,7 +213,7 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
f.handleHostRoaming(hostinfo, ip) f.handleHostRoaming(hostinfo, ip)
f.connectionManager.In(hostinfo.localIndexId) f.connectionManager.In(hostinfo)
} }
// closeTunnel closes a tunnel locally, it does not send a closeTunnel packet to the remote // closeTunnel closes a tunnel locally, it does not send a closeTunnel packet to the remote
@@ -312,12 +312,11 @@ func parseV6(data []byte, incoming bool, fp *firewall.Packet) error {
offset := ipv6.HeaderLen // Start at the end of the ipv6 header offset := ipv6.HeaderLen // Start at the end of the ipv6 header
next := 0 next := 0
for { for {
if dataLen < offset { if protoAt >= dataLen {
break break
} }
proto := layers.IPProtocol(data[protoAt]) proto := layers.IPProtocol(data[protoAt])
//fmt.Println(proto, protoAt)
switch proto { switch proto {
case layers.IPProtocolICMPv6, layers.IPProtocolESP, layers.IPProtocolNoNextHeader: case layers.IPProtocolICMPv6, layers.IPProtocolESP, layers.IPProtocolNoNextHeader:
fp.Protocol = uint8(proto) fp.Protocol = uint8(proto)
@@ -365,7 +364,7 @@ func parseV6(data []byte, incoming bool, fp *firewall.Packet) error {
case layers.IPProtocolAH: case layers.IPProtocolAH:
// Auth headers, used by IPSec, have a different meaning for header length // Auth headers, used by IPSec, have a different meaning for header length
if dataLen < offset+1 { if dataLen <= offset+1 {
break break
} }
@@ -373,7 +372,7 @@ func parseV6(data []byte, incoming bool, fp *firewall.Packet) error {
default: default:
// Normal ipv6 header length processing // Normal ipv6 header length processing
if dataLen < offset+1 { if dataLen <= offset+1 {
break break
} }
@@ -499,7 +498,7 @@ func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out
return false return false
} }
f.connectionManager.In(hostinfo.localIndexId) f.connectionManager.In(hostinfo)
_, err = f.readers[q].Write(out) _, err = f.readers[q].Write(out)
if err != nil { if err != nil {
f.l.WithError(err).Error("Failed to write to tun") f.l.WithError(err).Error("Failed to write to tun")

View File

@@ -117,6 +117,45 @@ func Test_newPacket_v6(t *testing.T) {
err = newPacket(buffer.Bytes(), true, p) err = newPacket(buffer.Bytes(), true, p)
require.ErrorIs(t, err, ErrIPv6CouldNotFindPayload) require.ErrorIs(t, err, ErrIPv6CouldNotFindPayload)
// A v6 packet with a hop-by-hop extension
// ICMPv6 Payload (Echo Request)
icmpLayer := layers.ICMPv6{
TypeCode: layers.ICMPv6TypeEchoRequest,
}
// Hop-by-Hop Extension Header
hopOption := layers.IPv6HopByHopOption{}
hopOption.OptionData = []byte{0, 0, 0, 0}
hopByHop := layers.IPv6HopByHop{}
hopByHop.Options = append(hopByHop.Options, &hopOption)
ip = layers.IPv6{
Version: 6,
HopLimit: 128,
NextHeader: layers.IPProtocolIPv6Destination,
SrcIP: net.IPv6linklocalallrouters,
DstIP: net.IPv6linklocalallnodes,
}
buffer.Clear()
err = gopacket.SerializeLayers(buffer, gopacket.SerializeOptions{
ComputeChecksums: false,
FixLengths: true,
}, &ip, &hopByHop, &icmpLayer)
if err != nil {
panic(err)
}
// Ensure buffer length checks during parsing with the next 2 tests.
// A full IPv6 header and 1 byte in the first extension, but missing
// the length byte.
err = newPacket(buffer.Bytes()[:41], true, p)
require.ErrorIs(t, err, ErrIPv6CouldNotFindPayload)
// A full IPv6 header plus 1 full extension, but only 1 byte of the
// next layer, missing length byte
err = newPacket(buffer.Bytes()[:49], true, p)
require.ErrorIs(t, err, ErrIPv6CouldNotFindPayload)
// A good ICMP packet // A good ICMP packet
ip = layers.IPv6{ ip = layers.IPv6{
Version: 6, Version: 6,
@@ -288,6 +327,10 @@ func Test_newPacket_v6(t *testing.T) {
assert.Equal(t, uint16(22), p.LocalPort) assert.Equal(t, uint16(22), p.LocalPort)
assert.False(t, p.Fragment) assert.False(t, p.Fragment)
// Ensure buffer bounds checking during processing
err = newPacket(b[:41], true, p)
require.ErrorIs(t, err, ErrIPv6PacketTooShort)
// Invalid AH header // Invalid AH header
b = buffer.Bytes() b = buffer.Bytes()
err = newPacket(b, true, p) err = newPacket(b, true, p)

5
udp/errors.go Normal file
View File

@@ -0,0 +1,5 @@
package udp
import "errors"
var ErrInvalidIPv6RemoteForSocket = errors.New("listener is IPv4, but writing to IPv6 remote")

View File

@@ -3,20 +3,62 @@
package udp package udp
// Darwin support is primarily implemented in udp_generic, besides NewListenConfig
import ( import (
"context"
"encoding/binary"
"errors"
"fmt" "fmt"
"net" "net"
"net/netip" "net/netip"
"syscall" "syscall"
"unsafe"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/config"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
type StdConn struct {
*net.UDPConn
isV4 bool
sysFd uintptr
l *logrus.Logger
}
var _ Conn = &StdConn{}
func NewListener(l *logrus.Logger, ip netip.Addr, port int, multi bool, batch int) (Conn, error) { func NewListener(l *logrus.Logger, ip netip.Addr, port int, multi bool, batch int) (Conn, error) {
return NewGenericListener(l, ip, port, multi, batch) lc := NewListenConfig(multi)
pc, err := lc.ListenPacket(context.TODO(), "udp", net.JoinHostPort(ip.String(), fmt.Sprintf("%v", port)))
if err != nil {
return nil, err
}
if uc, ok := pc.(*net.UDPConn); ok {
c := &StdConn{UDPConn: uc, l: l}
rc, err := uc.SyscallConn()
if err != nil {
return nil, fmt.Errorf("failed to open udp socket: %w", err)
}
err = rc.Control(func(fd uintptr) {
c.sysFd = fd
})
if err != nil {
return nil, fmt.Errorf("failed to get udp fd: %w", err)
}
la, err := c.LocalAddr()
if err != nil {
return nil, err
}
c.isV4 = la.Addr().Is4()
return c, nil
}
return nil, fmt.Errorf("unexpected PacketConn: %T %#v", pc, pc)
} }
func NewListenConfig(multi bool) net.ListenConfig { func NewListenConfig(multi bool) net.ListenConfig {
@@ -43,16 +85,116 @@ func NewListenConfig(multi bool) net.ListenConfig {
} }
} }
func (u *GenericConn) Rebind() error { //go:linkname sendto golang.org/x/sys/unix.sendto
rc, err := u.UDPConn.SyscallConn() //go:noescape
if err != nil { func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen int32) (err error)
return err
func (u *StdConn) WriteTo(b []byte, ap netip.AddrPort) error {
var sa unsafe.Pointer
var addrLen int32
if u.isV4 {
if ap.Addr().Is6() {
return ErrInvalidIPv6RemoteForSocket
}
var rsa unix.RawSockaddrInet6
rsa.Family = unix.AF_INET6
rsa.Addr = ap.Addr().As16()
binary.BigEndian.PutUint16((*[2]byte)(unsafe.Pointer(&rsa.Port))[:], ap.Port())
sa = unsafe.Pointer(&rsa)
addrLen = syscall.SizeofSockaddrInet4
} else {
var rsa unix.RawSockaddrInet6
rsa.Family = unix.AF_INET6
rsa.Addr = ap.Addr().As16()
binary.BigEndian.PutUint16((*[2]byte)(unsafe.Pointer(&rsa.Port))[:], ap.Port())
sa = unsafe.Pointer(&rsa)
addrLen = syscall.SizeofSockaddrInet6
} }
return rc.Control(func(fd uintptr) { // Golang stdlib doesn't handle EAGAIN correctly in some situations so we do writes ourselves
err := syscall.SetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_BOUND_IF, 0) // See https://github.com/golang/go/issues/73919
if err != nil { for {
u.l.WithError(err).Error("Failed to rebind udp socket") //_, _, err := unix.Syscall6(unix.SYS_SENDTO, u.sysFd, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0, sa, addrLen)
err := sendto(int(u.sysFd), b, 0, sa, addrLen)
if err == nil {
// Written, get out before the error handling
return nil
} }
})
if errors.Is(err, syscall.EINTR) {
// Write was interrupted, retry
continue
}
if errors.Is(err, syscall.EAGAIN) {
return &net.OpError{Op: "sendto", Err: unix.EWOULDBLOCK}
}
if errors.Is(err, syscall.EBADF) {
return net.ErrClosed
}
return &net.OpError{Op: "sendto", Err: err}
}
}
func (u *StdConn) LocalAddr() (netip.AddrPort, error) {
a := u.UDPConn.LocalAddr()
switch v := a.(type) {
case *net.UDPAddr:
addr, ok := netip.AddrFromSlice(v.IP)
if !ok {
return netip.AddrPort{}, fmt.Errorf("LocalAddr returned invalid IP address: %s", v.IP)
}
return netip.AddrPortFrom(addr, uint16(v.Port)), nil
default:
return netip.AddrPort{}, fmt.Errorf("LocalAddr returned: %#v", a)
}
}
func (u *StdConn) ReloadConfig(c *config.C) {
// TODO
}
func NewUDPStatsEmitter(udpConns []Conn) func() {
// No UDP stats for non-linux
return func() {}
}
func (u *StdConn) ListenOut(r EncReader) {
buffer := make([]byte, MTU)
for {
// Just read one packet at a time
n, rua, err := u.ReadFromUDPAddrPort(buffer)
if err != nil {
if errors.Is(err, net.ErrClosed) {
u.l.WithError(err).Debug("udp socket is closed, exiting read loop")
return
}
u.l.WithError(err).Error("unexpected udp socket receive error")
}
r(netip.AddrPortFrom(rua.Addr().Unmap(), rua.Port()), buffer[:n])
}
}
func (u *StdConn) Rebind() error {
var err error
if u.isV4 {
err = syscall.SetsockoptInt(int(u.sysFd), syscall.IPPROTO_IP, syscall.IP_BOUND_IF, 0)
} else {
err = syscall.SetsockoptInt(int(u.sysFd), syscall.IPPROTO_IPV6, syscall.IPV6_BOUND_IF, 0)
}
if err != nil {
u.l.WithError(err).Error("Failed to rebind udp socket")
}
return nil
} }

View File

@@ -1,6 +1,7 @@
//go:build (!linux || android) && !e2e_testing //go:build (!linux || android) && !e2e_testing && !darwin
// +build !linux android // +build !linux android
// +build !e2e_testing // +build !e2e_testing
// +build !darwin
// udp_generic implements the nebula UDP interface in pure Go stdlib. This // udp_generic implements the nebula UDP interface in pure Go stdlib. This
// means it can be used on platforms like Darwin and Windows. // means it can be used on platforms like Darwin and Windows.

View File

@@ -221,7 +221,7 @@ func (u *StdConn) writeTo6(b []byte, ip netip.AddrPort) error {
func (u *StdConn) writeTo4(b []byte, ip netip.AddrPort) error { func (u *StdConn) writeTo4(b []byte, ip netip.AddrPort) error {
if !ip.Addr().Is4() { if !ip.Addr().Is4() {
return fmt.Errorf("Listener is IPv4, but writing to IPv6 remote") return ErrInvalidIPv6RemoteForSocket
} }
var rsa unix.RawSockaddrInet4 var rsa unix.RawSockaddrInet4

View File

@@ -92,6 +92,25 @@ func (u *RIOConn) bind(sa windows.Sockaddr) error {
// Enable v4 for this socket // Enable v4 for this socket
syscall.SetsockoptInt(syscall.Handle(u.sock), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, 0) syscall.SetsockoptInt(syscall.Handle(u.sock), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, 0)
// Disable reporting of PORT_UNREACHABLE and NET_UNREACHABLE errors from the UDP socket receive call.
// These errors are returned on Windows during UDP receives based on the receipt of ICMP packets. Disable
// the UDP receive error returns with these ioctl calls.
ret := uint32(0)
flag := uint32(0)
size := uint32(unsafe.Sizeof(flag))
err = syscall.WSAIoctl(syscall.Handle(u.sock), syscall.SIO_UDP_CONNRESET, (*byte)(unsafe.Pointer(&flag)), size, nil, 0, &ret, nil, 0)
if err != nil {
return err
}
ret = 0
flag = 0
size = uint32(unsafe.Sizeof(flag))
SIO_UDP_NETRESET := uint32(syscall.IOC_IN | syscall.IOC_VENDOR | 15)
err = syscall.WSAIoctl(syscall.Handle(u.sock), SIO_UDP_NETRESET, (*byte)(unsafe.Pointer(&flag)), size, nil, 0, &ret, nil, 0)
if err != nil {
return err
}
err = u.rx.Open() err = u.rx.Open()
if err != nil { if err != nil {
return err return err
@@ -122,8 +141,12 @@ func (u *RIOConn) ListenOut(r EncReader) {
// Just read one packet at a time // Just read one packet at a time
n, rua, err := u.receive(buffer) n, rua, err := u.receive(buffer)
if err != nil { if err != nil {
u.l.WithError(err).Debug("udp socket is closed, exiting read loop") if errors.Is(err, net.ErrClosed) {
return u.l.WithError(err).Debug("udp socket is closed, exiting read loop")
return
}
u.l.WithError(err).Error("unexpected udp socket receive error")
continue
} }
r(netip.AddrPortFrom(netip.AddrFrom16(rua.Addr).Unmap(), (rua.Port>>8)|((rua.Port&0xff)<<8)), buffer[:n]) r(netip.AddrPortFrom(netip.AddrFrom16(rua.Addr).Unmap(), (rua.Port>>8)|((rua.Port&0xff)<<8)), buffer[:n])