Compare commits

..

1 Commits

Author SHA1 Message Date
Jay Wren
2400e2392b lint
* reduce staticcheck warnings
2025-04-14 14:33:46 -04:00
78 changed files with 690 additions and 1403 deletions

View File

@@ -1,21 +1,13 @@
blank_issues_enabled: true blank_issues_enabled: true
contact_links: contact_links:
- name: 💨 Performance Issues
url: https://github.com/slackhq/nebula/discussions/new/choose
about: 'We ask that you create a discussion instead of an issue for performance-related questions. This allows us to have a more open conversation about the issue and helps us to better understand the problem.'
- name: 📄 Documentation Issues
url: https://github.com/definednet/nebula-docs
about: "If you've found an issue with the website documentation, please file it in the nebula-docs repository."
- name: 📱 Mobile Nebula Issues
url: https://github.com/definednet/mobile_nebula
about: "If you're using the mobile Nebula app and have found an issue, please file it in the mobile_nebula repository."
- name: 📘 Documentation - name: 📘 Documentation
url: https://nebula.defined.net/docs/ url: https://nebula.defined.net/docs/
about: 'The documentation is the best place to start if you are new to Nebula.' about: Review documentation.
- name: 💁 Support/Chat - name: 💁 Support/Chat
url: https://join.slack.com/t/nebulaoss/shared_invite/zt-39pk4xopc-CUKlGcb5Z39dQ0cK1v7ehA url: https://join.slack.com/t/nebulaoss/shared_invite/enQtOTA5MDI4NDg3MTg4LTkwY2EwNTI4NzQyMzc0M2ZlODBjNWI3NTY1MzhiOThiMmZlZjVkMTI0NGY4YTMyNjUwMWEyNzNkZTJmYzQxOGU
about: 'For faster support, join us on Slack for assistance!' about: 'This issue tracker is not for support questions. Join us on Slack for assistance!'
- name: 📱 Mobile Nebula
url: https://github.com/definednet/mobile_nebula
about: 'This issue tracker is not for mobile support. Try the Mobile Nebula repo instead!'

View File

@@ -1,11 +0,0 @@
<!--
Thank you for taking the time to submit a pull request!
Please be sure to provide a clear description of what you're trying to achieve with the change.
- If you're submitting a new feature, please explain how to use it and document any new config options in the example config.
- If you're submitting a bugfix, please link the related issue or describe the circumstances surrounding the issue.
- If you're changing a default, explain why you believe the new default is appropriate for most users.
P.S. If you're only updating the README or other docs, please file a pull request here instead: https://github.com/DefinedNet/nebula-docs
-->

View File

@@ -52,12 +52,4 @@ jobs:
working-directory: ./.github/workflows/smoke working-directory: ./.github/workflows/smoke
run: NAME="smoke-p256" ./smoke.sh run: NAME="smoke-p256" ./smoke.sh
- name: setup docker image for fips140
working-directory: ./.github/workflows/smoke
run: NAME="smoke-fips140" CURVE=P256 GOFIPS140=v1.0.0 LDFLAGS=-checklinkname=0 ./build.sh
- name: run smoke-fips140
working-directory: ./.github/workflows/smoke
run: NAME="smoke-fips140" ./smoke.sh
timeout-minutes: 10 timeout-minutes: 10

View File

@@ -32,9 +32,9 @@ jobs:
run: make vet run: make vet
- name: golangci-lint - name: golangci-lint
uses: golangci/golangci-lint-action@v8 uses: golangci/golangci-lint-action@v7
with: with:
version: v2.1 version: v2.0
- name: Test - name: Test
run: make test run: make test
@@ -115,9 +115,9 @@ jobs:
run: make vet run: make vet
- name: golangci-lint - name: golangci-lint
uses: golangci/golangci-lint-action@v8 uses: golangci/golangci-lint-action@v7
with: with:
version: v2.1 version: v2.0
- name: Test - name: Test
run: make test run: make test

View File

@@ -121,12 +121,12 @@ bin-pkcs11: CGO_ENABLED = 1
bin-pkcs11: bin bin-pkcs11: bin
bin: bin:
$(GOENV) go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula${NEBULA_CMD_SUFFIX} ${NEBULA_CMD_PATH} go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula${NEBULA_CMD_SUFFIX} ${NEBULA_CMD_PATH}
$(GOENV) go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula-cert${NEBULA_CMD_SUFFIX} ./cmd/nebula-cert go build $(BUILD_ARGS) -ldflags "$(LDFLAGS)" -o ./nebula-cert${NEBULA_CMD_SUFFIX} ./cmd/nebula-cert
install: install:
$(GOENV) go install $(BUILD_ARGS) -ldflags "$(LDFLAGS)" ${NEBULA_CMD_PATH} go install $(BUILD_ARGS) -ldflags "$(LDFLAGS)" ${NEBULA_CMD_PATH}
$(GOENV) go install $(BUILD_ARGS) -ldflags "$(LDFLAGS)" ./cmd/nebula-cert go install $(BUILD_ARGS) -ldflags "$(LDFLAGS)" ./cmd/nebula-cert
build/linux-arm-%: GOENV += GOARM=$(word 3, $(subst -, ,$*)) build/linux-arm-%: GOENV += GOARM=$(word 3, $(subst -, ,$*))
build/linux-mips-%: GOENV += GOMIPS=$(word 3, $(subst -, ,$*)) build/linux-mips-%: GOENV += GOMIPS=$(word 3, $(subst -, ,$*))
@@ -215,14 +215,6 @@ ifeq ($(words $(MAKECMDGOALS)),1)
@$(MAKE) service ${.DEFAULT_GOAL} --no-print-directory @$(MAKE) service ${.DEFAULT_GOAL} --no-print-directory
endif endif
fips140:
@echo > $(NULL_FILE)
$(eval GOENV += GOFIPS140=v1.0.0)
$(eval LDFLAGS += -checklinkname=0)
ifeq ($(words $(MAKECMDGOALS)),1)
@$(MAKE) fips140 ${.DEFAULT_GOAL} --no-print-directory
endif
bin-docker: bin build/linux-amd64/nebula build/linux-amd64/nebula-cert bin-docker: bin build/linux-amd64/nebula build/linux-amd64/nebula-cert
smoke-docker: bin-docker smoke-docker: bin-docker
@@ -244,5 +236,5 @@ smoke-vagrant/%: bin-docker build/%/nebula
cd .github/workflows/smoke/ && ./smoke-vagrant.sh $* cd .github/workflows/smoke/ && ./smoke-vagrant.sh $*
.FORCE: .FORCE:
.PHONY: bench bench-cpu bench-cpu-long bin build-test-mobile e2e e2ev e2evv e2evvv e2evvvv fips140 proto release service smoke-docker smoke-docker-race test test-cov-html smoke-vagrant/% .PHONY: bench bench-cpu bench-cpu-long bin build-test-mobile e2e e2ev e2evv e2evvv e2evvvv proto release service smoke-docker smoke-docker-race test test-cov-html smoke-vagrant/%
.DEFAULT_GOAL := bin .DEFAULT_GOAL := bin

View File

@@ -4,7 +4,7 @@ It lets you seamlessly connect computers anywhere in the world. Nebula is portab
It can be used to connect a small number of computers, but is also able to connect tens of thousands of computers. It can be used to connect a small number of computers, but is also able to connect tens of thousands of computers.
Nebula incorporates a number of existing concepts like encryption, security groups, certificates, Nebula incorporates a number of existing concepts like encryption, security groups, certificates,
and tunneling. and tunneling, and each of those individual pieces existed before Nebula in various forms.
What makes Nebula different to existing offerings is that it brings all of these ideas together, What makes Nebula different to existing offerings is that it brings all of these ideas together,
resulting in a sum that is greater than its individual parts. resulting in a sum that is greater than its individual parts.
@@ -12,7 +12,7 @@ Further documentation can be found [here](https://nebula.defined.net/docs/).
You can read more about Nebula [here](https://medium.com/p/884110a5579). You can read more about Nebula [here](https://medium.com/p/884110a5579).
You can also join the NebulaOSS Slack group [here](https://join.slack.com/t/nebulaoss/shared_invite/zt-39pk4xopc-CUKlGcb5Z39dQ0cK1v7ehA). You can also join the NebulaOSS Slack group [here](https://join.slack.com/t/nebulaoss/shared_invite/zt-2xqe6e7vn-k_KGi8s13nsr7cvHVvHvuQ).
## Supported Platforms ## Supported Platforms
@@ -28,33 +28,33 @@ Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for
#### Distribution Packages #### Distribution Packages
- [Arch Linux](https://archlinux.org/packages/extra/x86_64/nebula/) - [Arch Linux](https://archlinux.org/packages/extra/x86_64/nebula/)
```sh ```
sudo pacman -S nebula $ sudo pacman -S nebula
``` ```
- [Fedora Linux](https://src.fedoraproject.org/rpms/nebula) - [Fedora Linux](https://src.fedoraproject.org/rpms/nebula)
```sh ```
sudo dnf install nebula $ sudo dnf install nebula
``` ```
- [Debian Linux](https://packages.debian.org/source/stable/nebula) - [Debian Linux](https://packages.debian.org/source/stable/nebula)
```sh ```
sudo apt install nebula $ sudo apt install nebula
``` ```
- [Alpine Linux](https://pkgs.alpinelinux.org/packages?name=nebula) - [Alpine Linux](https://pkgs.alpinelinux.org/packages?name=nebula)
```sh ```
sudo apk add nebula $ sudo apk add nebula
``` ```
- [macOS Homebrew](https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/n/nebula.rb) - [macOS Homebrew](https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/n/nebula.rb)
```sh ```
brew install nebula $ brew install nebula
``` ```
- [Docker](https://hub.docker.com/r/nebulaoss/nebula) - [Docker](https://hub.docker.com/r/nebulaoss/nebula)
```sh ```
docker pull nebulaoss/nebula $ docker pull nebulaoss/nebula
``` ```
#### Mobile #### Mobile
@@ -64,10 +64,10 @@ Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for
## Technical Overview ## Technical Overview
Nebula is a mutually authenticated peer-to-peer software-defined network based on the [Noise Protocol Framework](https://noiseprotocol.org/). Nebula is a mutually authenticated peer-to-peer software defined network based on the [Noise Protocol Framework](https://noiseprotocol.org/).
Nebula uses certificates to assert a node's IP address, name, and membership within user-defined groups. Nebula uses certificates to assert a node's IP address, name, and membership within user-defined groups.
Nebula's user-defined groups allow for provider agnostic traffic filtering between nodes. Nebula's user-defined groups allow for provider agnostic traffic filtering between nodes.
Discovery nodes (aka lighthouses) allow individual peers to find each other and optionally use UDP hole punching to establish connections from behind most firewalls or NATs. Discovery nodes allow individual peers to find each other and optionally use UDP hole punching to establish connections from behind most firewalls or NATs.
Users can move data between nodes in any number of cloud service providers, datacenters, and endpoints, without needing to maintain a particular addressing scheme. Users can move data between nodes in any number of cloud service providers, datacenters, and endpoints, without needing to maintain a particular addressing scheme.
Nebula uses Elliptic-curve Diffie-Hellman (`ECDH`) key exchange and `AES-256-GCM` in its default configuration. Nebula uses Elliptic-curve Diffie-Hellman (`ECDH`) key exchange and `AES-256-GCM` in its default configuration.
@@ -82,34 +82,28 @@ To set up a Nebula network, you'll need:
#### 2. (Optional, but you really should..) At least one discovery node with a routable IP address, which we call a lighthouse. #### 2. (Optional, but you really should..) At least one discovery node with a routable IP address, which we call a lighthouse.
Nebula lighthouses allow nodes to find each other, anywhere in the world. A lighthouse is the only node in a Nebula network whose IP should not change. Running a lighthouse requires very few compute resources, and you can easily use the least expensive option from a cloud hosting provider. If you're not sure which provider to use, a number of us have used $6/mo [DigitalOcean](https://digitalocean.com) droplets as lighthouses. Nebula lighthouses allow nodes to find each other, anywhere in the world. A lighthouse is the only node in a Nebula network whose IP should not change. Running a lighthouse requires very few compute resources, and you can easily use the least expensive option from a cloud hosting provider. If you're not sure which provider to use, a number of us have used $5/mo [DigitalOcean](https://digitalocean.com) droplets as lighthouses.
Once you have launched an instance, ensure that Nebula udp traffic (default port udp/4242) can reach it over the internet.
Once you have launched an instance, ensure that Nebula udp traffic (default port udp/4242) can reach it over the internet.
#### 3. A Nebula certificate authority, which will be the root of trust for a particular Nebula network. #### 3. A Nebula certificate authority, which will be the root of trust for a particular Nebula network.
```sh ```
./nebula-cert ca -name "Myorganization, Inc" ./nebula-cert ca -name "Myorganization, Inc"
``` ```
This will create files named `ca.key` and `ca.cert` in the current directory. The `ca.key` file is the most sensitive file you'll create, because it is the key used to sign the certificates for individual nebula nodes/hosts. Please store this file somewhere safe, preferably with strong encryption.
This will create files named `ca.key` and `ca.cert` in the current directory. The `ca.key` file is the most sensitive file you'll create, because it is the key used to sign the certificates for individual nebula nodes/hosts. Please store this file somewhere safe, preferably with strong encryption.
**Be aware!** By default, certificate authorities have a 1-year lifetime before expiration. See [this guide](https://nebula.defined.net/docs/guides/rotating-certificate-authority/) for details on rotating a CA.
#### 4. Nebula host keys and certificates generated from that certificate authority #### 4. Nebula host keys and certificates generated from that certificate authority
This assumes you have four nodes, named lighthouse1, laptop, server1, host3. You can name the nodes any way you'd like, including FQDN. You'll also need to choose IP addresses and the associated subnet. In this example, we are creating a nebula network that will use 192.168.100.x/24 as its network range. This example also demonstrates nebula groups, which can later be used to define traffic rules in a nebula network. This assumes you have four nodes, named lighthouse1, laptop, server1, host3. You can name the nodes any way you'd like, including FQDN. You'll also need to choose IP addresses and the associated subnet. In this example, we are creating a nebula network that will use 192.168.100.x/24 as its network range. This example also demonstrates nebula groups, which can later be used to define traffic rules in a nebula network.
```sh ```
./nebula-cert sign -name "lighthouse1" -ip "192.168.100.1/24" ./nebula-cert sign -name "lighthouse1" -ip "192.168.100.1/24"
./nebula-cert sign -name "laptop" -ip "192.168.100.2/24" -groups "laptop,home,ssh" ./nebula-cert sign -name "laptop" -ip "192.168.100.2/24" -groups "laptop,home,ssh"
./nebula-cert sign -name "server1" -ip "192.168.100.9/24" -groups "servers" ./nebula-cert sign -name "server1" -ip "192.168.100.9/24" -groups "servers"
./nebula-cert sign -name "host3" -ip "192.168.100.10/24" ./nebula-cert sign -name "host3" -ip "192.168.100.10/24"
``` ```
By default, host certificates will expire 1 second before the CA expires. Use the `-duration` flag to specify a shorter lifetime.
#### 5. Configuration files for each host #### 5. Configuration files for each host
Download a copy of the nebula [example configuration](https://github.com/slackhq/nebula/blob/master/examples/config.yml). Download a copy of the nebula [example configuration](https://github.com/slackhq/nebula/blob/master/examples/config.yml).
* On the lighthouse node, you'll need to ensure `am_lighthouse: true` is set. * On the lighthouse node, you'll need to ensure `am_lighthouse: true` is set.
@@ -124,13 +118,10 @@ For each host, copy the nebula binary to the host, along with `config.yml` from
**DO NOT COPY `ca.key` TO INDIVIDUAL NODES.** **DO NOT COPY `ca.key` TO INDIVIDUAL NODES.**
#### 7. Run nebula on each host #### 7. Run nebula on each host
```
```sh
./nebula -config /path/to/config.yml ./nebula -config /path/to/config.yml
``` ```
For more detailed instructions, [find the full documentation here](https://nebula.defined.net/docs/).
## Building Nebula from source ## Building Nebula from source
Make sure you have [go](https://go.dev/doc/install) installed and clone this repo. Change to the nebula directory. Make sure you have [go](https://go.dev/doc/install) installed and clone this repo. Change to the nebula directory.
@@ -143,23 +134,14 @@ To build nebula for a specific platform (ex, Windows):
See the [Makefile](Makefile) for more details on build targets See the [Makefile](Makefile) for more details on build targets
## Curve P256, BoringCrypto and FIPS 140-3 mode ## Curve P256 and BoringCrypto
The default curve used for cryptographic handshakes and signatures is Curve25519. This is the recommended setting for most users. If your deployment has certain compliance requirements, you have the option of creating your CA using `nebula-cert ca -curve P256` to use NIST Curve P256. The CA will then sign certificates using ECDSA P256, and any hosts using these certificates will use P256 for ECDH handshakes. The default curve used for cryptographic handshakes and signatures is Curve25519. This is the recommended setting for most users. If your deployment has certain compliance requirements, you have the option of creating your CA using `nebula-cert ca -curve P256` to use NIST Curve P256. The CA will then sign certificates using ECDSA P256, and any hosts using these certificates will use P256 for ECDH handshakes.
Nebula can be built using the [BoringCrypto GOEXPERIMENT](https://github.com/golang/go/blob/go1.20/src/crypto/internal/boring/README.md) by running either of the following make targets: In addition, Nebula can be built using the [BoringCrypto GOEXPERIMENT](https://github.com/golang/go/blob/go1.20/src/crypto/internal/boring/README.md) by running either of the following make targets:
```sh make bin-boringcrypto
make bin-boringcrypto make release-boringcrypto
make release-boringcrypto
```
Nebula can also be built using the [FIPS 140-3](https://go.dev/doc/security/fips140) mode of Go by running either of the following make targets:
```sh
make fips140
make fips140 release
```
This is not the recommended default deployment, but may be useful based on your compliance requirements. This is not the recommended default deployment, but may be useful based on your compliance requirements.
@@ -167,3 +149,5 @@ This is not the recommended default deployment, but may be useful based on your
Nebula was created at Slack Technologies, Inc by Nate Brown and Ryan Huber, with contributions from Oliver Fross, Alan Lam, Wade Simmons, and Lining Wang. Nebula was created at Slack Technologies, Inc by Nate Brown and Ryan Huber, with contributions from Oliver Fross, Alan Lam, Wade Simmons, and Lining Wang.

View File

@@ -25,14 +25,14 @@ func TestNewAllowListFromConfig(t *testing.T) {
c.Settings["allowlist"] = map[string]any{ c.Settings["allowlist"] = map[string]any{
"192.168.0.0/16": "abc", "192.168.0.0/16": "abc",
} }
r, err = newAllowListFromConfig(c, "allowlist", nil) _, err = newAllowListFromConfig(c, "allowlist", nil)
require.EqualError(t, err, "config `allowlist` has invalid value (type string): abc") require.EqualError(t, err, "config `allowlist` has invalid value (type string): abc")
c.Settings["allowlist"] = map[string]any{ c.Settings["allowlist"] = map[string]any{
"192.168.0.0/16": true, "192.168.0.0/16": true,
"10.0.0.0/8": false, "10.0.0.0/8": false,
} }
r, err = newAllowListFromConfig(c, "allowlist", nil) _, err = newAllowListFromConfig(c, "allowlist", nil)
require.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for 0.0.0.0/0") require.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for 0.0.0.0/0")
c.Settings["allowlist"] = map[string]any{ c.Settings["allowlist"] = map[string]any{
@@ -42,7 +42,7 @@ func TestNewAllowListFromConfig(t *testing.T) {
"fd00::/8": true, "fd00::/8": true,
"fd00:fd00::/16": false, "fd00:fd00::/16": false,
} }
r, err = newAllowListFromConfig(c, "allowlist", nil) _, err = newAllowListFromConfig(c, "allowlist", nil)
require.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for ::/0") require.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for ::/0")
c.Settings["allowlist"] = map[string]any{ c.Settings["allowlist"] = map[string]any{
@@ -75,7 +75,7 @@ func TestNewAllowListFromConfig(t *testing.T) {
`docker.*`: "foo", `docker.*`: "foo",
}, },
} }
lr, err := NewLocalAllowListFromConfig(c, "allowlist") _, err = NewLocalAllowListFromConfig(c, "allowlist")
require.EqualError(t, err, "config `allowlist.interfaces` has invalid value (type string): foo") require.EqualError(t, err, "config `allowlist.interfaces` has invalid value (type string): foo")
c.Settings["allowlist"] = map[string]any{ c.Settings["allowlist"] = map[string]any{
@@ -84,7 +84,7 @@ func TestNewAllowListFromConfig(t *testing.T) {
`eth.*`: true, `eth.*`: true,
}, },
} }
lr, err = NewLocalAllowListFromConfig(c, "allowlist") _, err = NewLocalAllowListFromConfig(c, "allowlist")
require.EqualError(t, err, "config `allowlist.interfaces` values must all be the same true/false value") require.EqualError(t, err, "config `allowlist.interfaces` values must all be the same true/false value")
c.Settings["allowlist"] = map[string]any{ c.Settings["allowlist"] = map[string]any{
@@ -92,7 +92,7 @@ func TestNewAllowListFromConfig(t *testing.T) {
`docker.*`: false, `docker.*`: false,
}, },
} }
lr, err = NewLocalAllowListFromConfig(c, "allowlist") lr, err := NewLocalAllowListFromConfig(c, "allowlist")
if assert.NoError(t, err) { if assert.NoError(t, err) {
assert.NotNil(t, lr) assert.NotNil(t, lr)
} }

10
bits.go
View File

@@ -18,7 +18,7 @@ type Bits struct {
func NewBits(bits uint64) *Bits { func NewBits(bits uint64) *Bits {
return &Bits{ return &Bits{
length: bits, length: bits,
bits: make([]bool, bits, bits), bits: make([]bool, bits),
current: 0, current: 0,
lostCounter: metrics.GetOrRegisterCounter("network.packets.lost", nil), lostCounter: metrics.GetOrRegisterCounter("network.packets.lost", nil),
dupeCounter: metrics.GetOrRegisterCounter("network.packets.duplicate", nil), dupeCounter: metrics.GetOrRegisterCounter("network.packets.duplicate", nil),
@@ -28,7 +28,7 @@ func NewBits(bits uint64) *Bits {
func (b *Bits) Check(l logrus.FieldLogger, i uint64) bool { func (b *Bits) Check(l logrus.FieldLogger, i uint64) bool {
// If i is the next number, return true. // If i is the next number, return true.
if i > b.current || (i == 0 && b.firstSeen == false && b.current < b.length) { if i > b.current || (i == 0 && !b.firstSeen && b.current < b.length) {
return true return true
} }
@@ -51,7 +51,7 @@ func (b *Bits) Update(l *logrus.Logger, i uint64) bool {
// If i is the next number, return true and update current. // If i is the next number, return true and update current.
if i == b.current+1 { if i == b.current+1 {
// Report missed packets, we can only understand what was missed after the first window has been gone through // Report missed packets, we can only understand what was missed after the first window has been gone through
if i > b.length && b.bits[i%b.length] == false { if i > b.length && !b.bits[i%b.length] {
b.lostCounter.Inc(1) b.lostCounter.Inc(1)
} }
b.bits[i%b.length] = true b.bits[i%b.length] = true
@@ -104,7 +104,7 @@ func (b *Bits) Update(l *logrus.Logger, i uint64) bool {
} }
// Allow for the 0 packet to come in within the first window // Allow for the 0 packet to come in within the first window
if i == 0 && b.firstSeen == false && b.current < b.length { if i == 0 && !b.firstSeen && b.current < b.length {
b.firstSeen = true b.firstSeen = true
b.bits[i%b.length] = true b.bits[i%b.length] = true
return true return true
@@ -122,7 +122,7 @@ func (b *Bits) Update(l *logrus.Logger, i uint64) bool {
return false return false
} }
if b.bits[i%b.length] == true { if b.bits[i%b.length] {
if l.Level >= logrus.DebugLevel { if l.Level >= logrus.DebugLevel {
l.WithField("receiveWindow", m{"accepted": false, "currentCounter": b.current, "incomingCounter": i, "reason": "old duplicate"}). l.WithField("receiveWindow", m{"accepted": false, "currentCounter": b.current, "incomingCounter": i, "reason": "old duplicate"}).
Debug("Receive window") Debug("Receive window")

View File

@@ -20,8 +20,6 @@ import (
"google.golang.org/protobuf/proto" "google.golang.org/protobuf/proto"
) )
const publicKeyLen = 32
type certificateV1 struct { type certificateV1 struct {
details detailsV1 details detailsV1
signature []byte signature []byte

View File

@@ -113,14 +113,14 @@ func TestCertificateV2_MarshalJSON(t *testing.T) {
signature: []byte("1234567890abcedf1234567890abcedf1234567890abcedf1234567890abcedf"), signature: []byte("1234567890abcedf1234567890abcedf1234567890abcedf1234567890abcedf"),
} }
b, err := nc.MarshalJSON() _, err := nc.MarshalJSON()
require.ErrorIs(t, err, ErrMissingDetails) require.ErrorIs(t, err, ErrMissingDetails)
rd, err := nc.details.Marshal() rd, err := nc.details.Marshal()
require.NoError(t, err) require.NoError(t, err)
nc.rawDetails = rd nc.rawDetails = rd
b, err = nc.MarshalJSON() b, err := nc.MarshalJSON()
require.NoError(t, err) require.NoError(t, err)
assert.JSONEq( assert.JSONEq(
t, t,
@@ -174,8 +174,9 @@ func TestCertificateV2_VerifyPrivateKey(t *testing.T) {
require.ErrorIs(t, err, ErrInvalidPrivateKey) require.ErrorIs(t, err, ErrInvalidPrivateKey)
c, _, priv, _ = NewTestCert(Version2, Curve_P256, ca2, caKey2, "test", time.Time{}, time.Time{}, nil, nil, nil) c, _, priv, _ = NewTestCert(Version2, Curve_P256, ca2, caKey2, "test", time.Time{}, time.Time{}, nil, nil, nil)
rawPriv, b, curve, err = UnmarshalPrivateKeyFromPEM(priv) _, _, curve, err = UnmarshalPrivateKeyFromPEM(priv)
assert.Equal(t, err, nil)
assert.Equal(t, curve, Curve_P256)
err = c.VerifyPrivateKey(Curve_P256, priv[:16]) err = c.VerifyPrivateKey(Curve_P256, priv[:16])
require.ErrorIs(t, err, ErrInvalidPrivateKey) require.ErrorIs(t, err, ErrInvalidPrivateKey)
@@ -261,6 +262,7 @@ func TestCertificateV2_marshalForSigningStability(t *testing.T) {
assert.Equal(t, expectedRawDetails, db) assert.Equal(t, expectedRawDetails, db)
expectedForSigning, err := hex.DecodeString(expectedRawDetailsStr + "00313233343536373839306162636564666768696a313233343536373839306162") expectedForSigning, err := hex.DecodeString(expectedRawDetailsStr + "00313233343536373839306162636564666768696a313233343536373839306162")
require.NoError(t, err)
b, err := nc.marshalForSigning() b, err := nc.marshalForSigning()
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, expectedForSigning, b) assert.Equal(t, expectedForSigning, b)

View File

@@ -227,6 +227,9 @@ func UnmarshalNebulaEncryptedData(b []byte) (*NebulaEncryptedData, error) {
} }
func unmarshalArgon2Parameters(params *RawNebulaArgon2Parameters) (*Argon2Parameters, error) { func unmarshalArgon2Parameters(params *RawNebulaArgon2Parameters) (*Argon2Parameters, error) {
// Are we testing the compilers types here?
// No value of int32 is lewss than math.MinInt32.
// By definition these checks can never be true.
if params.Version < math.MinInt32 || params.Version > math.MaxInt32 { if params.Version < math.MinInt32 || params.Version > math.MaxInt32 {
return nil, fmt.Errorf("Argon2Parameters Version must be at least %d and no more than %d", math.MinInt32, math.MaxInt32) return nil, fmt.Errorf("Argon2Parameters Version must be at least %d and no more than %d", math.MinInt32, math.MaxInt32)
} }

View File

@@ -26,21 +26,21 @@ func TestNewArgon2Parameters(t *testing.T) {
} }
func TestDecryptAndUnmarshalSigningPrivateKey(t *testing.T) { func TestDecryptAndUnmarshalSigningPrivateKey(t *testing.T) {
passphrase := []byte("DO NOT USE") passphrase := []byte("DO NOT USE THIS KEY")
privKey := []byte(`# A good key privKey := []byte(`# A good key
-----BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY----- -----BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
CjsKC0FFUy0yNTYtR0NNEiwIExCAgAQYAyAEKiCPoDfGQiosxNPTbPn5EsMlc2MI CjwKC0FFUy0yNTYtR0NNEi0IExCAgIABGAEgBCognnjujd67Vsv99p22wfAjQaDT
c0Bt4oz6gTrFQhX3aBJcimhHKeAuhyTGvllD0Z19fe+DFPcLH3h5VrdjVfIAajg0 oCMW1mdjkU3gACKNW4MSXOWR9Sts4C81yk1RUku2gvGKs3TB9LYoklLsIizSYOLl
KrbV3n9UHif/Au5skWmquNJzoW1E4MTdRbvpti6o+WdQ49DxjBFhx0YH8LBqrbPU +Vs//O1T0I1Xbml2XBAROsb/VSoDln/6LMqR4B6fn6B3GOsLBBqRI8daDl9lRMPB
0BGkUHmIO7daP24= qrlJ69wer3ZUHFXA
-----END NEBULA ED25519 ENCRYPTED PRIVATE KEY----- -----END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
`) `)
shortKey := []byte(`# A key which, once decrypted, is too short shortKey := []byte(`# A key which, once decrypted, is too short
-----BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY----- -----BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
CjsKC0FFUy0yNTYtR0NNEiwIExCAgAQYAyAEKiAVJwdfl3r+eqi/vF6S7OMdpjfo CjwKC0FFUy0yNTYtR0NNEi0IExCAgIABGAEgBCoga5h8owMEBWRSMMJKzuUvWce7
hAzmTCRnr58Su4AqmBJbCv3zleYCEKYJP6UI3S8ekLMGISsgO4hm5leukCCyqT0Z k0qlBkQmCxiuLh80MuASW70YcKt8jeEIS2axo2V6zAKA9TSMcCsJW1kDDXEtL/xe
cQ76yrberpzkJKoPLGisX8f+xdy4aXSZl7oEYWQte1+vqbtl/eY9PGZhxUQdcyq7 GLF5T7sDl5COp4LU3pGxpV+KoeQ/S3gQCAAcnaOtnJQX+aSDnbO3jCHyP7U9CHbs
hqzIyrRqfUgVuA== rQr3bdH3Oy/WiYU=
-----END NEBULA ED25519 ENCRYPTED PRIVATE KEY----- -----END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
`) `)
invalidBanner := []byte(`# Invalid banner (not encrypted) invalidBanner := []byte(`# Invalid banner (not encrypted)
@@ -72,12 +72,14 @@ qrlJ69wer3ZUHFXA
require.EqualError(t, err, "key was not 64 bytes, is invalid ed25519 private key") require.EqualError(t, err, "key was not 64 bytes, is invalid ed25519 private key")
assert.Nil(t, k) assert.Nil(t, k)
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem)) assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
assert.Equal(t, curve, Curve_CURVE25519)
// Fail due to invalid banner // Fail due to invalid banner
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey(passphrase, rest) curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey(passphrase, rest)
require.EqualError(t, err, "bytes did not contain a proper nebula encrypted Ed25519/ECDSA private key banner") require.EqualError(t, err, "bytes did not contain a proper nebula encrypted Ed25519/ECDSA private key banner")
assert.Nil(t, k) assert.Nil(t, k)
assert.Equal(t, rest, invalidPem) assert.Equal(t, rest, invalidPem)
assert.Equal(t, curve, Curve_CURVE25519)
// Fail due to ivalid PEM format, because // Fail due to ivalid PEM format, because
// it's missing the requisite pre-encapsulation boundary. // it's missing the requisite pre-encapsulation boundary.
@@ -85,12 +87,14 @@ qrlJ69wer3ZUHFXA
require.EqualError(t, err, "input did not contain a valid PEM encoded block") require.EqualError(t, err, "input did not contain a valid PEM encoded block")
assert.Nil(t, k) assert.Nil(t, k)
assert.Equal(t, rest, invalidPem) assert.Equal(t, rest, invalidPem)
assert.Equal(t, curve, Curve_CURVE25519)
// Fail due to invalid passphrase // Fail due to invalid passphrase
curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey([]byte("invalid passphrase"), privKey) curve, k, rest, err = DecryptAndUnmarshalSigningPrivateKey([]byte("invalid passphrase"), privKey)
require.EqualError(t, err, "invalid passphrase or corrupt private key") require.EqualError(t, err, "invalid passphrase or corrupt private key")
assert.Nil(t, k) assert.Nil(t, k)
assert.Equal(t, []byte{}, rest) assert.Equal(t, []byte{}, rest)
assert.Equal(t, curve, Curve_CURVE25519)
} }
func TestEncryptAndMarshalSigningPrivateKey(t *testing.T) { func TestEncryptAndMarshalSigningPrivateKey(t *testing.T) {

View File

@@ -21,6 +21,9 @@ func NewTestCaCert(version Version, curve Curve, before, after time.Time, networ
switch curve { switch curve {
case Curve_CURVE25519: case Curve_CURVE25519:
pub, priv, err = ed25519.GenerateKey(rand.Reader) pub, priv, err = ed25519.GenerateKey(rand.Reader)
if err != nil {
panic(err)
}
case Curve_P256: case Curve_P256:
privk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) privk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil { if err != nil {

View File

@@ -97,12 +97,14 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
// Fail due to short key // Fail due to short key
k, rest, curve, err = UnmarshalSigningPrivateKeyFromPEM(rest) k, rest, curve, err = UnmarshalSigningPrivateKeyFromPEM(rest)
assert.Nil(t, k) assert.Nil(t, k)
assert.Equal(t, Curve_CURVE25519, curve)
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem)) assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
require.EqualError(t, err, "key was not 64 bytes, is invalid Ed25519 private key") require.EqualError(t, err, "key was not 64 bytes, is invalid Ed25519 private key")
// Fail due to invalid banner // Fail due to invalid banner
k, rest, curve, err = UnmarshalSigningPrivateKeyFromPEM(rest) k, rest, curve, err = UnmarshalSigningPrivateKeyFromPEM(rest)
assert.Nil(t, k) assert.Nil(t, k)
assert.Equal(t, Curve_CURVE25519, curve)
assert.Equal(t, rest, invalidPem) assert.Equal(t, rest, invalidPem)
require.EqualError(t, err, "bytes did not contain a proper Ed25519/ECDSA private key banner") require.EqualError(t, err, "bytes did not contain a proper Ed25519/ECDSA private key banner")
@@ -110,6 +112,7 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
// it's missing the requisite pre-encapsulation boundary. // it's missing the requisite pre-encapsulation boundary.
k, rest, curve, err = UnmarshalSigningPrivateKeyFromPEM(rest) k, rest, curve, err = UnmarshalSigningPrivateKeyFromPEM(rest)
assert.Nil(t, k) assert.Nil(t, k)
assert.Equal(t, Curve_CURVE25519, curve)
assert.Equal(t, rest, invalidPem) assert.Equal(t, rest, invalidPem)
require.EqualError(t, err, "input did not contain a valid PEM encoded block") require.EqualError(t, err, "input did not contain a valid PEM encoded block")
} }
@@ -159,12 +162,14 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
// Fail due to short key // Fail due to short key
k, rest, curve, err = UnmarshalPrivateKeyFromPEM(rest) k, rest, curve, err = UnmarshalPrivateKeyFromPEM(rest)
assert.Nil(t, k) assert.Nil(t, k)
assert.Equal(t, Curve_CURVE25519, curve)
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem)) assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
require.EqualError(t, err, "key was not 32 bytes, is invalid CURVE25519 private key") require.EqualError(t, err, "key was not 32 bytes, is invalid CURVE25519 private key")
// Fail due to invalid banner // Fail due to invalid banner
k, rest, curve, err = UnmarshalPrivateKeyFromPEM(rest) k, rest, curve, err = UnmarshalPrivateKeyFromPEM(rest)
assert.Nil(t, k) assert.Nil(t, k)
assert.Equal(t, Curve_CURVE25519, curve)
assert.Equal(t, rest, invalidPem) assert.Equal(t, rest, invalidPem)
require.EqualError(t, err, "bytes did not contain a proper private key banner") require.EqualError(t, err, "bytes did not contain a proper private key banner")
@@ -172,6 +177,7 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
// it's missing the requisite pre-encapsulation boundary. // it's missing the requisite pre-encapsulation boundary.
k, rest, curve, err = UnmarshalPrivateKeyFromPEM(rest) k, rest, curve, err = UnmarshalPrivateKeyFromPEM(rest)
assert.Nil(t, k) assert.Nil(t, k)
assert.Equal(t, Curve_CURVE25519, curve)
assert.Equal(t, rest, invalidPem) assert.Equal(t, rest, invalidPem)
require.EqualError(t, err, "input did not contain a valid PEM encoded block") require.EqualError(t, err, "input did not contain a valid PEM encoded block")
} }
@@ -275,12 +281,14 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
// Fail due to short key // Fail due to short key
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest) k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
assert.Nil(t, k) assert.Nil(t, k)
assert.Equal(t, Curve_CURVE25519, curve)
assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem)) assert.Equal(t, rest, appendByteSlices(invalidBanner, invalidPem))
require.EqualError(t, err, "key was not 32 bytes, is invalid CURVE25519 public key") require.EqualError(t, err, "key was not 32 bytes, is invalid CURVE25519 public key")
// Fail due to invalid banner // Fail due to invalid banner
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest) k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
assert.Nil(t, k) assert.Nil(t, k)
assert.Equal(t, Curve_CURVE25519, curve)
require.EqualError(t, err, "bytes did not contain a proper public key banner") require.EqualError(t, err, "bytes did not contain a proper public key banner")
assert.Equal(t, rest, invalidPem) assert.Equal(t, rest, invalidPem)
@@ -288,6 +296,7 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
// it's missing the requisite pre-encapsulation boundary. // it's missing the requisite pre-encapsulation boundary.
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest) k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
assert.Nil(t, k) assert.Nil(t, k)
assert.Equal(t, Curve_CURVE25519, curve)
assert.Equal(t, rest, invalidPem) assert.Equal(t, rest, invalidPem)
require.EqualError(t, err, "input did not contain a valid PEM encoded block") require.EqualError(t, err, "input did not contain a valid PEM encoded block")
} }

View File

@@ -37,6 +37,7 @@ func TestCertificateV1_Sign(t *testing.T) {
} }
pub, priv, err := ed25519.GenerateKey(rand.Reader) pub, priv, err := ed25519.GenerateKey(rand.Reader)
require.NoError(t, err)
c, err := tbs.Sign(&certificateV1{details: detailsV1{notBefore: before, notAfter: after}}, Curve_CURVE25519, priv) c, err := tbs.Sign(&certificateV1{details: detailsV1{notBefore: before, notAfter: after}}, Curve_CURVE25519, priv)
require.NoError(t, err) require.NoError(t, err)
assert.NotNil(t, c) assert.NotNil(t, c)

View File

@@ -22,6 +22,9 @@ func NewTestCaCert(version cert.Version, curve cert.Curve, before, after time.Ti
switch curve { switch curve {
case cert.Curve_CURVE25519: case cert.Curve_CURVE25519:
pub, priv, err = ed25519.GenerateKey(rand.Reader) pub, priv, err = ed25519.GenerateKey(rand.Reader)
if err != nil {
panic(err)
}
case cert.Curve_P256: case cert.Curve_P256:
privk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) privk, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil { if err != nil {

View File

@@ -81,7 +81,7 @@ func parseArgonParameters(memory uint, parallelism uint, iterations uint) (*cert
return cert.NewArgon2Parameters(uint32(memory), uint8(parallelism), uint32(iterations)), nil return cert.NewArgon2Parameters(uint32(memory), uint8(parallelism), uint32(iterations)), nil
} }
func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error { func ca(args []string, out io.Writer, _ io.Writer, pr PasswordReader) error {
cf := newCaFlags() cf := newCaFlags()
err := cf.set.Parse(args) err := cf.set.Parse(args)
if err != nil { if err != nil {

View File

@@ -29,7 +29,7 @@ func newKeygenFlags() *keygenFlags {
return &cf return &cf
} }
func keygen(args []string, out io.Writer, errOut io.Writer) error { func keygen(args []string, _ io.Writer, _ io.Writer) error {
cf := newKeygenFlags() cf := newKeygenFlags()
err := cf.set.Parse(args) err := cf.set.Parse(args)
if err != nil { if err != nil {

View File

@@ -3,7 +3,6 @@ package main
import ( import (
"bytes" "bytes"
"errors" "errors"
"fmt"
"io" "io"
"os" "os"
"testing" "testing"
@@ -77,7 +76,7 @@ func assertHelpError(t *testing.T, err error, msg string) {
case *helpError: case *helpError:
// good // good
default: default:
t.Fatal(fmt.Sprintf("err was not a helpError: %q, expected %q", err, msg)) t.Fatalf("err was not a helpError: %q, expected %q", err, msg)
} }
require.EqualError(t, err, msg) require.EqualError(t, err, msg)

View File

@@ -10,7 +10,7 @@ func p11Supported() bool {
return false return false
} }
func p11Flag(set *flag.FlagSet) *string { func p11Flag(_ *flag.FlagSet) *string {
var ret = "" var ret = ""
return &ret return &ret
} }

View File

@@ -1,12 +1,12 @@
package main package main
import ( import (
"bytes"
"encoding/json" "encoding/json"
"flag" "flag"
"fmt" "fmt"
"io" "io"
"os" "os"
"strings"
"github.com/skip2/go-qrcode" "github.com/skip2/go-qrcode"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
@@ -29,7 +29,7 @@ func newPrintFlags() *printFlags {
return &pf return &pf
} }
func printCert(args []string, out io.Writer, errOut io.Writer) error { func printCert(args []string, out io.Writer, _ io.Writer) error {
pf := newPrintFlags() pf := newPrintFlags()
err := pf.set.Parse(args) err := pf.set.Parse(args)
if err != nil { if err != nil {
@@ -72,7 +72,7 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
qrBytes = append(qrBytes, b...) qrBytes = append(qrBytes, b...)
} }
if rawCert == nil || len(rawCert) == 0 || strings.TrimSpace(string(rawCert)) == "" { if len(rawCert) == 0 || len(bytes.TrimSpace(rawCert)) == 0 {
break break
} }

View File

@@ -1,12 +1,12 @@
package main package main
import ( import (
"bytes"
"errors" "errors"
"flag" "flag"
"fmt" "fmt"
"io" "io"
"os" "os"
"strings"
"time" "time"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
@@ -52,7 +52,7 @@ func verify(args []string, out io.Writer, errOut io.Writer) error {
return fmt.Errorf("error while adding ca cert to pool: %w", err) return fmt.Errorf("error while adding ca cert to pool: %w", err)
} }
if rawCACert == nil || len(rawCACert) == 0 || strings.TrimSpace(string(rawCACert)) == "" { if len(rawCACert) == 0 || len(bytes.TrimSpace(rawCACert)) == 0 {
break break
} }
} }

View File

@@ -97,7 +97,7 @@ func Test_verify(t *testing.T) {
crt, _ := NewTestCert(ca, caPriv, "test-cert", time.Now().Add(time.Hour*-1), time.Now().Add(time.Hour), nil, nil, nil) crt, _ := NewTestCert(ca, caPriv, "test-cert", time.Now().Add(time.Hour*-1), time.Now().Add(time.Hour), nil, nil, nil)
// Slightly evil hack to modify the certificate after it was sealed to generate an invalid signature // Slightly evil hack to modify the certificate after it was sealed to generate an invalid signature
pub := crt.PublicKey() pub := crt.PublicKey()
for i, _ := range pub { for i := range pub {
pub[i] = 0 pub[i] = 0
} }
b, _ = crt.MarshalPEM() b, _ = crt.MarshalPEM()

View File

@@ -51,10 +51,7 @@ func (p *program) Stop(s service.Service) error {
func fileExists(filename string) bool { func fileExists(filename string) bool {
_, err := os.Stat(filename) _, err := os.Stat(filename)
if os.IsNotExist(err) { return !os.IsNotExist(err)
return false
}
return true
} }
func doService(configPath *string, configTest *bool, build string, serviceFlag *string) { func doService(configPath *string, configTest *bool, build string, serviceFlag *string) {

View File

@@ -63,7 +63,7 @@ func (c *C) Load(path string) error {
func (c *C) LoadString(raw string) error { func (c *C) LoadString(raw string) error {
if raw == "" { if raw == "" {
return errors.New("Empty configuration") return errors.New("empty configuration")
} }
return c.parseRaw([]byte(raw)) return c.parseRaw([]byte(raw))
} }

View File

@@ -4,16 +4,13 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/binary" "encoding/binary"
"fmt"
"net/netip" "net/netip"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/rcrowley/go-metrics" "github.com/rcrowley/go-metrics"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/header" "github.com/slackhq/nebula/header"
) )
@@ -30,128 +27,134 @@ const (
) )
type connectionManager struct { type connectionManager struct {
in map[uint32]struct{}
inLock *sync.RWMutex
out map[uint32]struct{}
outLock *sync.RWMutex
// relayUsed holds which relay localIndexs are in use // relayUsed holds which relay localIndexs are in use
relayUsed map[uint32]struct{} relayUsed map[uint32]struct{}
relayUsedLock *sync.RWMutex relayUsedLock *sync.RWMutex
hostMap *HostMap hostMap *HostMap
trafficTimer *LockingTimerWheel[uint32] trafficTimer *LockingTimerWheel[uint32]
intf *Interface intf *Interface
punchy *Punchy pendingDeletion map[uint32]struct{}
punchy *Punchy
// Configuration settings
checkInterval time.Duration checkInterval time.Duration
pendingDeletionInterval time.Duration pendingDeletionInterval time.Duration
inactivityTimeout atomic.Int64 metricsTxPunchy metrics.Counter
dropInactive atomic.Bool
metricsTxPunchy metrics.Counter
l *logrus.Logger l *logrus.Logger
} }
func newConnectionManagerFromConfig(l *logrus.Logger, c *config.C, hm *HostMap, p *Punchy) *connectionManager { func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval time.Duration, punchy *Punchy) *connectionManager {
cm := &connectionManager{ var max time.Duration
hostMap: hm, if checkInterval < pendingDeletionInterval {
l: l, max = pendingDeletionInterval
punchy: p, } else {
relayUsed: make(map[uint32]struct{}), max = checkInterval
relayUsedLock: &sync.RWMutex{},
metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
} }
cm.reload(c, true) nc := &connectionManager{
c.RegisterReloadCallback(func(c *config.C) { hostMap: intf.hostMap,
cm.reload(c, false) in: make(map[uint32]struct{}),
}) inLock: &sync.RWMutex{},
out: make(map[uint32]struct{}),
return cm outLock: &sync.RWMutex{},
} relayUsed: make(map[uint32]struct{}),
relayUsedLock: &sync.RWMutex{},
func (cm *connectionManager) reload(c *config.C, initial bool) { trafficTimer: NewLockingTimerWheel[uint32](time.Millisecond*500, max),
if initial { intf: intf,
cm.checkInterval = time.Duration(c.GetInt("timers.connection_alive_interval", 5)) * time.Second pendingDeletion: make(map[uint32]struct{}),
cm.pendingDeletionInterval = time.Duration(c.GetInt("timers.pending_deletion_interval", 10)) * time.Second checkInterval: checkInterval,
pendingDeletionInterval: pendingDeletionInterval,
// We want at least a minimum resolution of 500ms per tick so that we can hit these intervals punchy: punchy,
// pretty close to their configured duration. metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
// The inactivity duration is checked each time a hostinfo ticks through so we don't need the wheel to contain it. l: l,
minDuration := min(time.Millisecond*500, cm.checkInterval, cm.pendingDeletionInterval)
maxDuration := max(cm.checkInterval, cm.pendingDeletionInterval)
cm.trafficTimer = NewLockingTimerWheel[uint32](minDuration, maxDuration)
} }
if initial || c.HasChanged("tunnels.inactivity_timeout") { nc.Start(ctx)
old := cm.getInactivityTimeout() return nc
cm.inactivityTimeout.Store((int64)(c.GetDuration("tunnels.inactivity_timeout", 10*time.Minute)))
if !initial {
cm.l.WithField("oldDuration", old).
WithField("newDuration", cm.getInactivityTimeout()).
Info("Inactivity timeout has changed")
}
}
if initial || c.HasChanged("tunnels.drop_inactive") {
old := cm.dropInactive.Load()
cm.dropInactive.Store(c.GetBool("tunnels.drop_inactive", false))
if !initial {
cm.l.WithField("oldBool", old).
WithField("newBool", cm.dropInactive.Load()).
Info("Drop inactive setting has changed")
}
}
} }
func (cm *connectionManager) getInactivityTimeout() time.Duration { func (n *connectionManager) In(localIndex uint32) {
return (time.Duration)(cm.inactivityTimeout.Load()) n.inLock.RLock()
}
func (cm *connectionManager) In(h *HostInfo) {
h.in.Store(true)
}
func (cm *connectionManager) Out(h *HostInfo) {
h.out.Store(true)
}
func (cm *connectionManager) RelayUsed(localIndex uint32) {
cm.relayUsedLock.RLock()
// If this already exists, return // If this already exists, return
if _, ok := cm.relayUsed[localIndex]; ok { if _, ok := n.in[localIndex]; ok {
cm.relayUsedLock.RUnlock() n.inLock.RUnlock()
return return
} }
cm.relayUsedLock.RUnlock() n.inLock.RUnlock()
cm.relayUsedLock.Lock() n.inLock.Lock()
cm.relayUsed[localIndex] = struct{}{} n.in[localIndex] = struct{}{}
cm.relayUsedLock.Unlock() n.inLock.Unlock()
}
func (n *connectionManager) Out(localIndex uint32) {
n.outLock.RLock()
// If this already exists, return
if _, ok := n.out[localIndex]; ok {
n.outLock.RUnlock()
return
}
n.outLock.RUnlock()
n.outLock.Lock()
n.out[localIndex] = struct{}{}
n.outLock.Unlock()
}
func (n *connectionManager) RelayUsed(localIndex uint32) {
n.relayUsedLock.RLock()
// If this already exists, return
if _, ok := n.relayUsed[localIndex]; ok {
n.relayUsedLock.RUnlock()
return
}
n.relayUsedLock.RUnlock()
n.relayUsedLock.Lock()
n.relayUsed[localIndex] = struct{}{}
n.relayUsedLock.Unlock()
} }
// getAndResetTrafficCheck returns if there was any inbound or outbound traffic within the last tick and // getAndResetTrafficCheck returns if there was any inbound or outbound traffic within the last tick and
// resets the state for this local index // resets the state for this local index
func (cm *connectionManager) getAndResetTrafficCheck(h *HostInfo, now time.Time) (bool, bool) { func (n *connectionManager) getAndResetTrafficCheck(localIndex uint32) (bool, bool) {
in := h.in.Swap(false) n.inLock.Lock()
out := h.out.Swap(false) n.outLock.Lock()
if in || out { _, in := n.in[localIndex]
h.lastUsed = now _, out := n.out[localIndex]
} delete(n.in, localIndex)
delete(n.out, localIndex)
n.inLock.Unlock()
n.outLock.Unlock()
return in, out return in, out
} }
// AddTrafficWatch must be called for every new HostInfo. func (n *connectionManager) AddTrafficWatch(localIndex uint32) {
// We will continue to monitor the HostInfo until the tunnel is dropped. // Use a write lock directly because it should be incredibly rare that we are ever already tracking this index
func (cm *connectionManager) AddTrafficWatch(h *HostInfo) { n.outLock.Lock()
if h.out.Swap(true) == false { if _, ok := n.out[localIndex]; ok {
cm.trafficTimer.Add(h.localIndexId, cm.checkInterval) n.outLock.Unlock()
return
} }
n.out[localIndex] = struct{}{}
n.trafficTimer.Add(localIndex, n.checkInterval)
n.outLock.Unlock()
} }
func (cm *connectionManager) Start(ctx context.Context) { func (n *connectionManager) Start(ctx context.Context) {
clockSource := time.NewTicker(cm.trafficTimer.t.tickDuration) go n.Run(ctx)
}
func (n *connectionManager) Run(ctx context.Context) {
//TODO: this tick should be based on the min wheel tick? Check firewall
clockSource := time.NewTicker(500 * time.Millisecond)
defer clockSource.Stop() defer clockSource.Stop()
p := []byte("") p := []byte("")
nb := make([]byte, 12, 12) nb := make([]byte, 12)
out := make([]byte, mtu) out := make([]byte, mtu)
for { for {
@@ -160,61 +163,61 @@ func (cm *connectionManager) Start(ctx context.Context) {
return return
case now := <-clockSource.C: case now := <-clockSource.C:
cm.trafficTimer.Advance(now) n.trafficTimer.Advance(now)
for { for {
localIndex, has := cm.trafficTimer.Purge() localIndex, has := n.trafficTimer.Purge()
if !has { if !has {
break break
} }
cm.doTrafficCheck(localIndex, p, nb, out, now) n.doTrafficCheck(localIndex, p, nb, out, now)
} }
} }
} }
} }
func (cm *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) { func (n *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
decision, hostinfo, primary := cm.makeTrafficDecision(localIndex, now) decision, hostinfo, primary := n.makeTrafficDecision(localIndex, now)
switch decision { switch decision {
case deleteTunnel: case deleteTunnel:
if cm.hostMap.DeleteHostInfo(hostinfo) { if n.hostMap.DeleteHostInfo(hostinfo) {
// Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap // Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap
cm.intf.lightHouse.DeleteVpnAddrs(hostinfo.vpnAddrs) n.intf.lightHouse.DeleteVpnAddrs(hostinfo.vpnAddrs)
} }
case closeTunnel: case closeTunnel:
cm.intf.sendCloseTunnel(hostinfo) n.intf.sendCloseTunnel(hostinfo)
cm.intf.closeTunnel(hostinfo) n.intf.closeTunnel(hostinfo)
case swapPrimary: case swapPrimary:
cm.swapPrimary(hostinfo, primary) n.swapPrimary(hostinfo, primary)
case migrateRelays: case migrateRelays:
cm.migrateRelayUsed(hostinfo, primary) n.migrateRelayUsed(hostinfo, primary)
case tryRehandshake: case tryRehandshake:
cm.tryRehandshake(hostinfo) n.tryRehandshake(hostinfo)
case sendTestPacket: case sendTestPacket:
cm.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out) n.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
} }
cm.resetRelayTrafficCheck(hostinfo) n.resetRelayTrafficCheck(hostinfo)
} }
func (cm *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) { func (n *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) {
if hostinfo != nil { if hostinfo != nil {
cm.relayUsedLock.Lock() n.relayUsedLock.Lock()
defer cm.relayUsedLock.Unlock() defer n.relayUsedLock.Unlock()
// No need to migrate any relays, delete usage info now. // No need to migrate any relays, delete usage info now.
for _, idx := range hostinfo.relayState.CopyRelayForIdxs() { for _, idx := range hostinfo.relayState.CopyRelayForIdxs() {
delete(cm.relayUsed, idx) delete(n.relayUsed, idx)
} }
} }
} }
func (cm *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) { func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) {
relayFor := oldhostinfo.relayState.CopyAllRelayFor() relayFor := oldhostinfo.relayState.CopyAllRelayFor()
for _, r := range relayFor { for _, r := range relayFor {
@@ -224,51 +227,46 @@ func (cm *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo
var relayFrom netip.Addr var relayFrom netip.Addr
var relayTo netip.Addr var relayTo netip.Addr
switch { switch {
case ok: case ok && existing.State == Established:
switch existing.State { // This relay already exists in newhostinfo, then do nothing.
case Established, PeerRequested, Disestablished: continue
// This relay already exists in newhostinfo, then do nothing. case ok && existing.State == Requested:
continue // The relay exists in a Requested state; re-send the request
case Requested: index = existing.LocalIndex
// The relay exists in a Requested state; re-send the request switch r.Type {
index = existing.LocalIndex case TerminalType:
switch r.Type { relayFrom = n.intf.myVpnAddrs[0]
case TerminalType: relayTo = existing.PeerAddr
relayFrom = cm.intf.myVpnAddrs[0] case ForwardingType:
relayTo = existing.PeerAddr relayFrom = existing.PeerAddr
case ForwardingType: relayTo = newhostinfo.vpnAddrs[0]
relayFrom = existing.PeerAddr default:
relayTo = newhostinfo.vpnAddrs[0] // should never happen
default:
// should never happen
panic(fmt.Sprintf("Migrating unknown relay type: %v", r.Type))
}
} }
case !ok: case !ok:
cm.relayUsedLock.RLock() n.relayUsedLock.RLock()
if _, relayUsed := cm.relayUsed[r.LocalIndex]; !relayUsed { if _, relayUsed := n.relayUsed[r.LocalIndex]; !relayUsed {
// The relay hasn't been used; don't migrate it. // The relay hasn't been used; don't migrate it.
cm.relayUsedLock.RUnlock() n.relayUsedLock.RUnlock()
continue continue
} }
cm.relayUsedLock.RUnlock() n.relayUsedLock.RUnlock()
// The relay doesn't exist at all; create some relay state and send the request. // The relay doesn't exist at all; create some relay state and send the request.
var err error var err error
index, err = AddRelay(cm.l, newhostinfo, cm.hostMap, r.PeerAddr, nil, r.Type, Requested) index, err = AddRelay(n.l, newhostinfo, n.hostMap, r.PeerAddr, nil, r.Type, Requested)
if err != nil { if err != nil {
cm.l.WithError(err).Error("failed to migrate relay to new hostinfo") n.l.WithError(err).Error("failed to migrate relay to new hostinfo")
continue continue
} }
switch r.Type { switch r.Type {
case TerminalType: case TerminalType:
relayFrom = cm.intf.myVpnAddrs[0] relayFrom = n.intf.myVpnAddrs[0]
relayTo = r.PeerAddr relayTo = r.PeerAddr
case ForwardingType: case ForwardingType:
relayFrom = r.PeerAddr relayFrom = r.PeerAddr
relayTo = newhostinfo.vpnAddrs[0] relayTo = newhostinfo.vpnAddrs[0]
default: default:
// should never happen // should never happen
panic(fmt.Sprintf("Migrating unknown relay type: %v", r.Type))
} }
} }
@@ -281,12 +279,12 @@ func (cm *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo
switch newhostinfo.GetCert().Certificate.Version() { switch newhostinfo.GetCert().Certificate.Version() {
case cert.Version1: case cert.Version1:
if !relayFrom.Is4() { if !relayFrom.Is4() {
cm.l.Error("can not migrate v1 relay with a v6 network because the relay is not running a current nebula version") n.l.Error("can not migrate v1 relay with a v6 network because the relay is not running a current nebula version")
continue continue
} }
if !relayTo.Is4() { if !relayTo.Is4() {
cm.l.Error("can not migrate v1 relay with a v6 remote network because the relay is not running a current nebula version") n.l.Error("can not migrate v1 relay with a v6 remote network because the relay is not running a current nebula version")
continue continue
} }
@@ -298,16 +296,16 @@ func (cm *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo
req.RelayFromAddr = netAddrToProtoAddr(relayFrom) req.RelayFromAddr = netAddrToProtoAddr(relayFrom)
req.RelayToAddr = netAddrToProtoAddr(relayTo) req.RelayToAddr = netAddrToProtoAddr(relayTo)
default: default:
newhostinfo.logger(cm.l).Error("Unknown certificate version found while attempting to migrate relay") newhostinfo.logger(n.l).Error("Unknown certificate version found while attempting to migrate relay")
continue continue
} }
msg, err := req.Marshal() msg, err := req.Marshal()
if err != nil { if err != nil {
cm.l.WithError(err).Error("failed to marshal Control message to migrate relay") n.l.WithError(err).Error("failed to marshal Control message to migrate relay")
} else { } else {
cm.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu)) n.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
cm.l.WithFields(logrus.Fields{ n.l.WithFields(logrus.Fields{
"relayFrom": req.RelayFromAddr, "relayFrom": req.RelayFromAddr,
"relayTo": req.RelayToAddr, "relayTo": req.RelayToAddr,
"initiatorRelayIndex": req.InitiatorRelayIndex, "initiatorRelayIndex": req.InitiatorRelayIndex,
@@ -318,45 +316,46 @@ func (cm *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo
} }
} }
func (cm *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time) (trafficDecision, *HostInfo, *HostInfo) { func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
// Read lock the main hostmap to order decisions based on tunnels being the primary tunnel n.hostMap.RLock()
cm.hostMap.RLock() defer n.hostMap.RUnlock()
defer cm.hostMap.RUnlock()
hostinfo := cm.hostMap.Indexes[localIndex] hostinfo := n.hostMap.Indexes[localIndex]
if hostinfo == nil { if hostinfo == nil {
cm.l.WithField("localIndex", localIndex).Debugln("Not found in hostmap") n.l.WithField("localIndex", localIndex).Debugf("Not found in hostmap")
delete(n.pendingDeletion, localIndex)
return doNothing, nil, nil return doNothing, nil, nil
} }
if cm.isInvalidCertificate(now, hostinfo) { if n.isInvalidCertificate(now, hostinfo) {
delete(n.pendingDeletion, hostinfo.localIndexId)
return closeTunnel, hostinfo, nil return closeTunnel, hostinfo, nil
} }
primary := cm.hostMap.Hosts[hostinfo.vpnAddrs[0]] primary := n.hostMap.Hosts[hostinfo.vpnAddrs[0]]
mainHostInfo := true mainHostInfo := true
if primary != nil && primary != hostinfo { if primary != nil && primary != hostinfo {
mainHostInfo = false mainHostInfo = false
} }
// Check for traffic on this hostinfo // Check for traffic on this hostinfo
inTraffic, outTraffic := cm.getAndResetTrafficCheck(hostinfo, now) inTraffic, outTraffic := n.getAndResetTrafficCheck(localIndex)
// A hostinfo is determined alive if there is incoming traffic // A hostinfo is determined alive if there is incoming traffic
if inTraffic { if inTraffic {
decision := doNothing decision := doNothing
if cm.l.Level >= logrus.DebugLevel { if n.l.Level >= logrus.DebugLevel {
hostinfo.logger(cm.l). hostinfo.logger(n.l).
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}). WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
Debug("Tunnel status") Debug("Tunnel status")
} }
hostinfo.pendingDeletion.Store(false) delete(n.pendingDeletion, hostinfo.localIndexId)
if mainHostInfo { if mainHostInfo {
decision = tryRehandshake decision = tryRehandshake
} else { } else {
if cm.shouldSwapPrimary(hostinfo, primary) { if n.shouldSwapPrimary(hostinfo) {
decision = swapPrimary decision = swapPrimary
} else { } else {
// migrate the relays to the primary, if in use. // migrate the relays to the primary, if in use.
@@ -364,55 +363,46 @@ func (cm *connectionManager) makeTrafficDecision(localIndex uint32, now time.Tim
} }
} }
cm.trafficTimer.Add(hostinfo.localIndexId, cm.checkInterval) n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
if !outTraffic { if !outTraffic {
// Send a punch packet to keep the NAT state alive // Send a punch packet to keep the NAT state alive
cm.sendPunch(hostinfo) n.sendPunch(hostinfo)
} }
return decision, hostinfo, primary return decision, hostinfo, primary
} }
if hostinfo.pendingDeletion.Load() { if _, ok := n.pendingDeletion[hostinfo.localIndexId]; ok {
// We have already sent a test packet and nothing was returned, this hostinfo is dead // We have already sent a test packet and nothing was returned, this hostinfo is dead
hostinfo.logger(cm.l). hostinfo.logger(n.l).
WithField("tunnelCheck", m{"state": "dead", "method": "active"}). WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
Info("Tunnel status") Info("Tunnel status")
delete(n.pendingDeletion, hostinfo.localIndexId)
return deleteTunnel, hostinfo, nil return deleteTunnel, hostinfo, nil
} }
decision := doNothing decision := doNothing
if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo { if hostinfo.ConnectionState != nil && mainHostInfo {
if !outTraffic { if !outTraffic {
inactiveFor, isInactive := cm.isInactive(hostinfo, now)
if isInactive {
// Tunnel is inactive, tear it down
hostinfo.logger(cm.l).
WithField("inactiveDuration", inactiveFor).
WithField("primary", mainHostInfo).
Info("Dropping tunnel due to inactivity")
return closeTunnel, hostinfo, primary
}
// If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel. // If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel.
// Just maintain NAT state if configured to do so. // Just maintain NAT state if configured to do so.
cm.sendPunch(hostinfo) n.sendPunch(hostinfo)
cm.trafficTimer.Add(hostinfo.localIndexId, cm.checkInterval) n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
return doNothing, nil, nil return doNothing, nil, nil
} }
if cm.punchy.GetTargetEverything() { if n.punchy.GetTargetEverything() {
// This is similar to the old punchy behavior with a slight optimization. // This is similar to the old punchy behavior with a slight optimization.
// We aren't receiving traffic but we are sending it, punch on all known // We aren't receiving traffic but we are sending it, punch on all known
// ips in case we need to re-prime NAT state // ips in case we need to re-prime NAT state
cm.sendPunch(hostinfo) n.sendPunch(hostinfo)
} }
if cm.l.Level >= logrus.DebugLevel { if n.l.Level >= logrus.DebugLevel {
hostinfo.logger(cm.l). hostinfo.logger(n.l).
WithField("tunnelCheck", m{"state": "testing", "method": "active"}). WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
Debug("Tunnel status") Debug("Tunnel status")
} }
@@ -421,33 +411,17 @@ func (cm *connectionManager) makeTrafficDecision(localIndex uint32, now time.Tim
decision = sendTestPacket decision = sendTestPacket
} else { } else {
if cm.l.Level >= logrus.DebugLevel { if n.l.Level >= logrus.DebugLevel {
hostinfo.logger(cm.l).Debugf("Hostinfo sadness") hostinfo.logger(n.l).Debugf("Hostinfo sadness")
} }
} }
hostinfo.pendingDeletion.Store(true) n.pendingDeletion[hostinfo.localIndexId] = struct{}{}
cm.trafficTimer.Add(hostinfo.localIndexId, cm.pendingDeletionInterval) n.trafficTimer.Add(hostinfo.localIndexId, n.pendingDeletionInterval)
return decision, hostinfo, nil return decision, hostinfo, nil
} }
func (cm *connectionManager) isInactive(hostinfo *HostInfo, now time.Time) (time.Duration, bool) { func (n *connectionManager) shouldSwapPrimary(current *HostInfo) bool {
if cm.dropInactive.Load() == false {
// We aren't configured to drop inactive tunnels
return 0, false
}
inactiveDuration := now.Sub(hostinfo.lastUsed)
if inactiveDuration < cm.getInactivityTimeout() {
// It's not considered inactive
return inactiveDuration, false
}
// The tunnel is inactive
return inactiveDuration, true
}
func (cm *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
// The primary tunnel is the most recent handshake to complete locally and should work entirely fine. // The primary tunnel is the most recent handshake to complete locally and should work entirely fine.
// If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary. // If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary.
// Let's sort this out. // Let's sort this out.
@@ -455,90 +429,83 @@ func (cm *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool
// Only one side should swap because if both swap then we may never resolve to a single tunnel. // Only one side should swap because if both swap then we may never resolve to a single tunnel.
// vpn addr is static across all tunnels for this host pair so lets // vpn addr is static across all tunnels for this host pair so lets
// use that to determine if we should consider swapping. // use that to determine if we should consider swapping.
if current.vpnAddrs[0].Compare(cm.intf.myVpnAddrs[0]) < 0 { if current.vpnAddrs[0].Compare(n.intf.myVpnAddrs[0]) < 0 {
// Their primary vpn addr is less than mine. Do not swap. // Their primary vpn addr is less than mine. Do not swap.
return false return false
} }
crt := cm.intf.pki.getCertState().getCertificate(current.ConnectionState.myCert.Version()) crt := n.intf.pki.getCertState().getCertificate(current.ConnectionState.myCert.Version())
// If this tunnel is using the latest certificate then we should swap it to primary for a bit and see if things // If this tunnel is using the latest certificate then we should swap it to primary for a bit and see if things
// settle down. // settle down.
return bytes.Equal(current.ConnectionState.myCert.Signature(), crt.Signature()) return bytes.Equal(current.ConnectionState.myCert.Signature(), crt.Signature())
} }
func (cm *connectionManager) swapPrimary(current, primary *HostInfo) { func (n *connectionManager) swapPrimary(current, primary *HostInfo) {
cm.hostMap.Lock() n.hostMap.Lock()
// Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake. // Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake.
if cm.hostMap.Hosts[current.vpnAddrs[0]] == primary { if n.hostMap.Hosts[current.vpnAddrs[0]] == primary {
cm.hostMap.unlockedMakePrimary(current) n.hostMap.unlockedMakePrimary(current)
} }
cm.hostMap.Unlock() n.hostMap.Unlock()
} }
// isInvalidCertificate will check if we should destroy a tunnel if pki.disconnect_invalid is true and // isInvalidCertificate will check if we should destroy a tunnel if pki.disconnect_invalid is true and
// the certificate is no longer valid. Block listed certificates will skip the pki.disconnect_invalid // the certificate is no longer valid. Block listed certificates will skip the pki.disconnect_invalid
// check and return true. // check and return true.
func (cm *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool { func (n *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool {
remoteCert := hostinfo.GetCert() remoteCert := hostinfo.GetCert()
if remoteCert == nil { if remoteCert == nil {
return false return false
} }
caPool := cm.intf.pki.GetCAPool() caPool := n.intf.pki.GetCAPool()
err := caPool.VerifyCachedCertificate(now, remoteCert) err := caPool.VerifyCachedCertificate(now, remoteCert)
if err == nil { if err == nil {
return false return false
} }
if !cm.intf.disconnectInvalid.Load() && err != cert.ErrBlockListed { if !n.intf.disconnectInvalid.Load() && err != cert.ErrBlockListed {
// Block listed certificates should always be disconnected // Block listed certificates should always be disconnected
return false return false
} }
hostinfo.logger(cm.l).WithError(err). hostinfo.logger(n.l).WithError(err).
WithField("fingerprint", remoteCert.Fingerprint). WithField("fingerprint", remoteCert.Fingerprint).
Info("Remote certificate is no longer valid, tearing down the tunnel") Info("Remote certificate is no longer valid, tearing down the tunnel")
return true return true
} }
func (cm *connectionManager) sendPunch(hostinfo *HostInfo) { func (n *connectionManager) sendPunch(hostinfo *HostInfo) {
if !cm.punchy.GetPunch() { if !n.punchy.GetPunch() {
// Punching is disabled // Punching is disabled
return return
} }
if cm.intf.lightHouse.IsAnyLighthouseAddr(hostinfo.vpnAddrs) { if n.punchy.GetTargetEverything() {
// Do not punch to lighthouses, we assume our lighthouse update interval is good enough. hostinfo.remotes.ForEach(n.hostMap.GetPreferredRanges(), func(addr netip.AddrPort, preferred bool) {
// In the event the update interval is not sufficient to maintain NAT state then a publicly available lighthouse n.metricsTxPunchy.Inc(1)
// would lose the ability to notify us and punchy.respond would become unreliable. n.intf.outside.WriteTo([]byte{1}, addr)
return
}
if cm.punchy.GetTargetEverything() {
hostinfo.remotes.ForEach(cm.hostMap.GetPreferredRanges(), func(addr netip.AddrPort, preferred bool) {
cm.metricsTxPunchy.Inc(1)
cm.intf.outside.WriteTo([]byte{1}, addr)
}) })
} else if hostinfo.remote.IsValid() { } else if hostinfo.remote.IsValid() {
cm.metricsTxPunchy.Inc(1) n.metricsTxPunchy.Inc(1)
cm.intf.outside.WriteTo([]byte{1}, hostinfo.remote) n.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
} }
} }
func (cm *connectionManager) tryRehandshake(hostinfo *HostInfo) { func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) {
cs := cm.intf.pki.getCertState() cs := n.intf.pki.getCertState()
curCrt := hostinfo.ConnectionState.myCert curCrt := hostinfo.ConnectionState.myCert
myCrt := cs.getCertificate(curCrt.Version()) myCrt := cs.getCertificate(curCrt.Version())
if curCrt.Version() >= cs.initiatingVersion && bytes.Equal(curCrt.Signature(), myCrt.Signature()) == true { if curCrt.Version() >= cs.initiatingVersion && bytes.Equal(curCrt.Signature(), myCrt.Signature()) {
// The current tunnel is using the latest certificate and version, no need to rehandshake. // The current tunnel is using the latest certificate and version, no need to rehandshake.
return return
} }
cm.l.WithField("vpnAddrs", hostinfo.vpnAddrs). n.l.WithField("vpnAddrs", hostinfo.vpnAddrs).
WithField("reason", "local certificate is not current"). WithField("reason", "local certificate is not current").
Info("Re-handshaking with remote") Info("Re-handshaking with remote")
cm.intf.handshakeManager.StartHandshake(hostinfo.vpnAddrs[0], nil) n.intf.handshakeManager.StartHandshake(hostinfo.vpnAddrs[0], nil)
} }

View File

@@ -1,6 +1,7 @@
package nebula package nebula
import ( import (
"context"
"crypto/ed25519" "crypto/ed25519"
"crypto/rand" "crypto/rand"
"net/netip" "net/netip"
@@ -63,12 +64,12 @@ func Test_NewConnectionManagerTest(t *testing.T) {
ifce.pki.cs.Store(cs) ifce.pki.cs.Store(cs)
// Create manager // Create manager
conf := config.NewC(l) ctx, cancel := context.WithCancel(context.Background())
punchy := NewPunchyFromConfig(l, conf) defer cancel()
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy) punchy := NewPunchyFromConfig(l, config.NewC(l))
nc.intf = ifce nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
p := []byte("") p := []byte("")
nb := make([]byte, 12, 12) nb := make([]byte, 12)
out := make([]byte, mtu) out := make([]byte, mtu)
// Add an ip we have established a connection w/ to hostmap // Add an ip we have established a connection w/ to hostmap
@@ -84,33 +85,32 @@ func Test_NewConnectionManagerTest(t *testing.T) {
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce) nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
// We saw traffic out to vpnIp // We saw traffic out to vpnIp
nc.Out(hostinfo) nc.Out(hostinfo.localIndexId)
nc.In(hostinfo) nc.In(hostinfo.localIndexId)
assert.False(t, hostinfo.pendingDeletion.Load()) assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0]) assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId) assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.True(t, hostinfo.out.Load()) assert.Contains(t, nc.out, hostinfo.localIndexId)
assert.True(t, hostinfo.in.Load())
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded // Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now()) nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.False(t, hostinfo.pendingDeletion.Load()) assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
assert.False(t, hostinfo.out.Load()) assert.NotContains(t, nc.out, hostinfo.localIndexId)
assert.False(t, hostinfo.in.Load()) assert.NotContains(t, nc.in, hostinfo.localIndexId)
// Do another traffic check tick, this host should be pending deletion now // Do another traffic check tick, this host should be pending deletion now
nc.Out(hostinfo) nc.Out(hostinfo.localIndexId)
assert.True(t, hostinfo.out.Load())
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now()) nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.True(t, hostinfo.pendingDeletion.Load()) assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId)
assert.False(t, hostinfo.out.Load()) assert.NotContains(t, nc.out, hostinfo.localIndexId)
assert.False(t, hostinfo.in.Load()) assert.NotContains(t, nc.in, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId) assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0]) assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
// Do a final traffic check tick, the host should now be removed // Do a final traffic check tick, the host should now be removed
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now()) nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs) assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId) assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
} }
@@ -146,12 +146,12 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
ifce.pki.cs.Store(cs) ifce.pki.cs.Store(cs)
// Create manager // Create manager
conf := config.NewC(l) ctx, cancel := context.WithCancel(context.Background())
punchy := NewPunchyFromConfig(l, conf) defer cancel()
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy) punchy := NewPunchyFromConfig(l, config.NewC(l))
nc.intf = ifce nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
p := []byte("") p := []byte("")
nb := make([]byte, 12, 12) nb := make([]byte, 12)
out := make([]byte, mtu) out := make([]byte, mtu)
// Add an ip we have established a connection w/ to hostmap // Add an ip we have established a connection w/ to hostmap
@@ -167,129 +167,33 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce) nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
// We saw traffic out to vpnIp // We saw traffic out to vpnIp
nc.Out(hostinfo) nc.Out(hostinfo.localIndexId)
nc.In(hostinfo) nc.In(hostinfo.localIndexId)
assert.True(t, hostinfo.in.Load()) assert.NotContains(t, nc.pendingDeletion, hostinfo.vpnAddrs[0])
assert.True(t, hostinfo.out.Load())
assert.False(t, hostinfo.pendingDeletion.Load())
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0]) assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId) assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded // Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now()) nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.False(t, hostinfo.pendingDeletion.Load()) assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
assert.False(t, hostinfo.out.Load()) assert.NotContains(t, nc.out, hostinfo.localIndexId)
assert.False(t, hostinfo.in.Load()) assert.NotContains(t, nc.in, hostinfo.localIndexId)
// Do another traffic check tick, this host should be pending deletion now // Do another traffic check tick, this host should be pending deletion now
nc.Out(hostinfo) nc.Out(hostinfo.localIndexId)
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now()) nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.True(t, hostinfo.pendingDeletion.Load()) assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId)
assert.False(t, hostinfo.out.Load()) assert.NotContains(t, nc.out, hostinfo.localIndexId)
assert.False(t, hostinfo.in.Load()) assert.NotContains(t, nc.in, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId) assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0]) assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
// We saw traffic, should no longer be pending deletion // We saw traffic, should no longer be pending deletion
nc.In(hostinfo) nc.In(hostinfo.localIndexId)
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now()) nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.False(t, hostinfo.pendingDeletion.Load()) assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
assert.False(t, hostinfo.out.Load()) assert.NotContains(t, nc.out, hostinfo.localIndexId)
assert.False(t, hostinfo.in.Load()) assert.NotContains(t, nc.in, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
}
func Test_NewConnectionManager_DisconnectInactive(t *testing.T) {
l := test.NewLogger()
localrange := netip.MustParsePrefix("10.1.1.1/24")
vpnAddrs := []netip.Addr{netip.MustParseAddr("172.1.1.2")}
preferredRanges := []netip.Prefix{localrange}
// Very incomplete mock objects
hostMap := newHostMap(l)
hostMap.preferredRanges.Store(&preferredRanges)
cs := &CertState{
initiatingVersion: cert.Version1,
privateKey: []byte{},
v1Cert: &dummyCert{version: cert.Version1},
v1HandshakeBytes: []byte{},
}
lh := newTestLighthouse()
ifce := &Interface{
hostMap: hostMap,
inside: &test.NoopTun{},
outside: &udp.NoopConn{},
firewall: &Firewall{},
lightHouse: lh,
pki: &PKI{},
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
l: l,
}
ifce.pki.cs.Store(cs)
// Create manager
conf := config.NewC(l)
conf.Settings["tunnels"] = map[string]any{
"drop_inactive": true,
}
punchy := NewPunchyFromConfig(l, conf)
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
assert.True(t, nc.dropInactive.Load())
nc.intf = ifce
// Add an ip we have established a connection w/ to hostmap
hostinfo := &HostInfo{
vpnAddrs: vpnAddrs,
localIndexId: 1099,
remoteIndexId: 9901,
}
hostinfo.ConnectionState = &ConnectionState{
myCert: &dummyCert{version: cert.Version1},
H: &noise.HandshakeState{},
}
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
// Do a traffic check tick, in and out should be cleared but should not be pending deletion
nc.Out(hostinfo)
nc.In(hostinfo)
assert.True(t, hostinfo.out.Load())
assert.True(t, hostinfo.in.Load())
now := time.Now()
decision, _, _ := nc.makeTrafficDecision(hostinfo.localIndexId, now)
assert.Equal(t, tryRehandshake, decision)
assert.Equal(t, now, hostinfo.lastUsed)
assert.False(t, hostinfo.pendingDeletion.Load())
assert.False(t, hostinfo.out.Load())
assert.False(t, hostinfo.in.Load())
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Second*5))
assert.Equal(t, doNothing, decision)
assert.Equal(t, now, hostinfo.lastUsed)
assert.False(t, hostinfo.pendingDeletion.Load())
assert.False(t, hostinfo.out.Load())
assert.False(t, hostinfo.in.Load())
// Do another traffic check tick, should still not be pending deletion
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Second*10))
assert.Equal(t, doNothing, decision)
assert.Equal(t, now, hostinfo.lastUsed)
assert.False(t, hostinfo.pendingDeletion.Load())
assert.False(t, hostinfo.out.Load())
assert.False(t, hostinfo.in.Load())
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
// Finally advance beyond the inactivity timeout
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Minute*10))
assert.Equal(t, closeTunnel, decision)
assert.Equal(t, now, hostinfo.lastUsed)
assert.False(t, hostinfo.pendingDeletion.Load())
assert.False(t, hostinfo.out.Load())
assert.False(t, hostinfo.in.Load())
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId) assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0]) assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
} }
@@ -337,7 +241,7 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
require.NoError(t, err) require.NoError(t, err)
cachedPeerCert, err := ncp.VerifyCertificate(now.Add(time.Second), peerCert) cachedPeerCert, err := ncp.VerifyCertificate(now.Add(time.Second), peerCert)
require.NoError(t, err)
cs := &CertState{ cs := &CertState{
privateKey: []byte{}, privateKey: []byte{},
v1Cert: &dummyCert{}, v1Cert: &dummyCert{},
@@ -360,10 +264,10 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
ifce.disconnectInvalid.Store(true) ifce.disconnectInvalid.Store(true)
// Create manager // Create manager
conf := config.NewC(l) ctx, cancel := context.WithCancel(context.Background())
punchy := NewPunchyFromConfig(l, conf) defer cancel()
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy) punchy := NewPunchyFromConfig(l, config.NewC(l))
nc.intf = ifce nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
ifce.connectionManager = nc ifce.connectionManager = nc
hostinfo := &HostInfo{ hostinfo := &HostInfo{

View File

@@ -26,15 +26,14 @@ type controlHostLister interface {
} }
type Control struct { type Control struct {
f *Interface f *Interface
l *logrus.Logger l *logrus.Logger
ctx context.Context ctx context.Context
cancel context.CancelFunc cancel context.CancelFunc
sshStart func() sshStart func()
statsStart func() statsStart func()
dnsStart func() dnsStart func()
lighthouseStart func() lighthouseStart func()
connectionManagerStart func(context.Context)
} }
type ControlHostInfo struct { type ControlHostInfo struct {
@@ -64,9 +63,6 @@ func (c *Control) Start() {
if c.dnsStart != nil { if c.dnsStart != nil {
go c.dnsStart() go c.dnsStart()
} }
if c.connectionManagerStart != nil {
go c.connectionManagerStart(c.ctx)
}
if c.lighthouseStart != nil { if c.lighthouseStart != nil {
c.lighthouseStart() c.lighthouseStart()
} }
@@ -135,7 +131,8 @@ func (c *Control) ListHostmapIndexes(pendingMap bool) []ControlHostInfo {
// GetCertByVpnIp returns the authenticated certificate of the given vpn IP, or nil if not found // GetCertByVpnIp returns the authenticated certificate of the given vpn IP, or nil if not found
func (c *Control) GetCertByVpnIp(vpnIp netip.Addr) cert.Certificate { func (c *Control) GetCertByVpnIp(vpnIp netip.Addr) cert.Certificate {
if c.f.myVpnAddrsTable.Contains(vpnIp) { _, found := c.f.myVpnAddrsTable.Lookup(vpnIp)
if found {
// Only returning the default certificate since its impossible // Only returning the default certificate since its impossible
// for any other host but ourselves to have more than 1 // for any other host but ourselves to have more than 1
return c.f.pki.getCertState().GetDefaultCertificate().Copy() return c.f.pki.getCertState().GetDefaultCertificate().Copy()
@@ -218,7 +215,7 @@ func (c *Control) CloseTunnel(vpnIp netip.Addr, localOnly bool) bool {
hostInfo.ConnectionState, hostInfo.ConnectionState,
hostInfo, hostInfo,
[]byte{}, []byte{},
make([]byte, 12, 12), make([]byte, 12),
make([]byte, mtu), make([]byte, mtu),
) )
} }
@@ -234,7 +231,7 @@ func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
if excludeLighthouses && c.f.lightHouse.IsAnyLighthouseAddr(h.vpnAddrs) { if excludeLighthouses && c.f.lightHouse.IsAnyLighthouseAddr(h.vpnAddrs) {
return return
} }
c.f.send(header.CloseTunnel, 0, h.ConnectionState, h, []byte{}, make([]byte, 12, 12), make([]byte, mtu)) c.f.send(header.CloseTunnel, 0, h.ConnectionState, h, []byte{}, make([]byte, 12), make([]byte, mtu))
c.f.closeTunnel(h) c.f.closeTunnel(h)
c.l.WithField("vpnAddrs", h.vpnAddrs).WithField("udpAddr", h.remote). c.l.WithField("vpnAddrs", h.vpnAddrs).WithField("udpAddr", h.remote).
@@ -285,9 +282,7 @@ func copyHostInfo(h *HostInfo, preferredRanges []netip.Prefix) ControlHostInfo {
CurrentRemote: h.remote, CurrentRemote: h.remote,
} }
for i, a := range h.vpnAddrs { copy(chi.VpnAddrs, h.vpnAddrs)
chi.VpnAddrs[i] = a
}
if h.ConnectionState != nil { if h.ConnectionState != nil {
chi.MessageCounter = h.ConnectionState.messageCounter.Load() chi.MessageCounter = h.ConnectionState.messageCounter.Load()

View File

@@ -26,13 +26,11 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
remote2 := netip.MustParseAddrPort("[1:2:3:4:5:6:7:8]:4444") remote2 := netip.MustParseAddrPort("[1:2:3:4:5:6:7:8]:4444")
ipNet := net.IPNet{ ipNet := net.IPNet{
IP: remote1.Addr().AsSlice(), IP: remote1.Addr().AsSlice(),
Mask: net.IPMask{255, 255, 255, 0},
} }
ipNet2 := net.IPNet{ ipNet2 := net.IPNet{
IP: remote2.Addr().AsSlice(), IP: remote2.Addr().AsSlice(),
Mask: net.IPMask{255, 255, 255, 0},
} }
remotes := NewRemoteList([]netip.Addr{netip.IPv4Unspecified()}, nil) remotes := NewRemoteList([]netip.Addr{netip.IPv4Unspecified()}, nil)
@@ -53,7 +51,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
localIndexId: 201, localIndexId: 201,
vpnAddrs: []netip.Addr{vpnIp}, vpnAddrs: []netip.Addr{vpnIp},
relayState: RelayState{ relayState: RelayState{
relays: nil, relays: map[netip.Addr]struct{}{},
relayForByAddr: map[netip.Addr]*Relay{}, relayForByAddr: map[netip.Addr]*Relay{},
relayForByIdx: map[uint32]*Relay{}, relayForByIdx: map[uint32]*Relay{},
}, },
@@ -72,7 +70,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
localIndexId: 201, localIndexId: 201,
vpnAddrs: []netip.Addr{vpnIp2}, vpnAddrs: []netip.Addr{vpnIp2},
relayState: RelayState{ relayState: RelayState{
relays: nil, relays: map[netip.Addr]struct{}{},
relayForByAddr: map[netip.Addr]*Relay{}, relayForByAddr: map[netip.Addr]*Relay{},
relayForByIdx: map[uint32]*Relay{}, relayForByIdx: map[uint32]*Relay{},
}, },

View File

@@ -26,7 +26,7 @@ type dnsRecords struct {
dnsMap4 map[string]netip.Addr dnsMap4 map[string]netip.Addr
dnsMap6 map[string]netip.Addr dnsMap6 map[string]netip.Addr
hostMap *HostMap hostMap *HostMap
myVpnAddrsTable *bart.Lite myVpnAddrsTable *bart.Table[struct{}]
} }
func newDnsRecords(l *logrus.Logger, cs *CertState, hostMap *HostMap) *dnsRecords { func newDnsRecords(l *logrus.Logger, cs *CertState, hostMap *HostMap) *dnsRecords {
@@ -112,8 +112,8 @@ func (d *dnsRecords) isSelfNebulaOrLocalhost(addr string) bool {
return true return true
} }
//if we found it in this table, it's good _, found := d.myVpnAddrsTable.Lookup(b)
return d.myVpnAddrsTable.Contains(b) return found //if we found it in this table, it's good
} }
func (d *dnsRecords) parseQuery(m *dns.Msg, w dns.ResponseWriter) { func (d *dnsRecords) parseQuery(m *dns.Msg, w dns.ResponseWriter) {

View File

@@ -506,7 +506,7 @@ func TestReestablishRelays(t *testing.T) {
curIndexes := len(myControl.GetHostmap().Indexes) curIndexes := len(myControl.GetHostmap().Indexes)
for curIndexes >= start { for curIndexes >= start {
curIndexes = len(myControl.GetHostmap().Indexes) curIndexes = len(myControl.GetHostmap().Indexes)
r.Logf("Wait for the dead index to go away:start=%v indexes, current=%v indexes", start, curIndexes) r.Logf("Wait for the dead index to go away:start=%v indexes, currnet=%v indexes", start, curIndexes)
myControl.InjectTunUDPPacket(theirVpnIpNet[0].Addr(), 80, myVpnIpNet[0].Addr(), 80, []byte("Hi from me should fail")) myControl.InjectTunUDPPacket(theirVpnIpNet[0].Addr(), 80, myVpnIpNet[0].Addr(), 80, []byte("Hi from me should fail"))
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType { r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
@@ -1052,9 +1052,6 @@ func TestRehandshakingLoser(t *testing.T) {
t.Log("Stand up a tunnel between me and them") t.Log("Stand up a tunnel between me and them")
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r) assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
myControl.GetHostInfoByVpnAddr(theirVpnIpNet[0].Addr(), false)
theirControl.GetHostInfoByVpnAddr(myVpnIpNet[0].Addr(), false)
r.RenderHostmaps("Starting hostmaps", myControl, theirControl) r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
r.Log("Renew their certificate and spin until mine sees it") r.Log("Renew their certificate and spin until mine sees it")

View File

@@ -700,7 +700,6 @@ func (r *R) FlushAll() {
r.Unlock() r.Unlock()
panic("Can't FlushAll for host: " + p.To.String()) panic("Can't FlushAll for host: " + p.To.String())
} }
receiver.InjectUDPPacket(p)
r.Unlock() r.Unlock()
} }
} }

View File

@@ -1,57 +0,0 @@
//go:build e2e_testing
// +build e2e_testing
package e2e
import (
"testing"
"time"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/cert_test"
"github.com/slackhq/nebula/e2e/router"
)
func TestDropInactiveTunnels(t *testing.T) {
// The goal of this test is to ensure the shortest inactivity timeout will close the tunnel on both sides
// under ideal conditions
ca, _, caKey, _ := cert_test.NewTestCaCert(cert.Version1, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(cert.Version1, ca, caKey, "me", "10.128.0.1/24", m{"tunnels": m{"drop_inactive": true, "inactivity_timeout": "5s"}})
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(cert.Version1, ca, caKey, "them", "10.128.0.2/24", m{"tunnels": m{"drop_inactive": true, "inactivity_timeout": "10m"}})
// Share our underlay information
myControl.InjectLightHouseAddr(theirVpnIpNet[0].Addr(), theirUdpAddr)
theirControl.InjectLightHouseAddr(myVpnIpNet[0].Addr(), myUdpAddr)
// Start the servers
myControl.Start()
theirControl.Start()
r := router.NewR(t, myControl, theirControl)
r.Log("Assert the tunnel between me and them works")
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
r.Log("Go inactive and wait for the tunnels to get dropped")
waitStart := time.Now()
for {
myIndexes := len(myControl.GetHostmap().Indexes)
theirIndexes := len(theirControl.GetHostmap().Indexes)
if myIndexes == 0 && theirIndexes == 0 {
break
}
since := time.Since(waitStart)
r.Logf("my tunnels: %v; their tunnels: %v; duration: %v", myIndexes, theirIndexes, since)
if since > time.Second*30 {
t.Fatal("Tunnel should have been declared inactive after 5 seconds and before 30 seconds")
}
time.Sleep(1 * time.Second)
r.FlushAll()
}
r.Logf("Inactive tunnels were dropped within %v", time.Since(waitStart))
myControl.Stop()
theirControl.Stop()
}

View File

@@ -275,10 +275,6 @@ tun:
# On linux only, set to true to manage unsafe routes directly on the system route table with gateway routes instead of # On linux only, set to true to manage unsafe routes directly on the system route table with gateway routes instead of
# in nebula configuration files. Default false, not reloadable. # in nebula configuration files. Default false, not reloadable.
#use_system_route_table: false #use_system_route_table: false
# Buffer size for reading routes updates. 0 means default system buffer size. (/proc/sys/net/core/rmem_default).
# If using massive routes updates, for example BGP, you may need to increase this value to avoid packet loss.
# SO_RCVBUFFORCE is used to avoid having to raise the system wide max
#use_system_route_table_buffer_size: 0
# Configure logging level # Configure logging level
logging: logging:
@@ -338,18 +334,6 @@ logging:
# after receiving the response for lighthouse queries # after receiving the response for lighthouse queries
#trigger_buffer: 64 #trigger_buffer: 64
# Tunnel manager settings
#tunnels:
# drop_inactive controls whether inactive tunnels are maintained or dropped after the inactive_timeout period has
# elapsed.
# In general, it is a good idea to enable this setting. It will be enabled by default in a future release.
# This setting is reloadable
#drop_inactive: false
# inactivity_timeout controls how long a tunnel MUST NOT see any inbound or outbound traffic before being considered
# inactive and eligible to be dropped.
# This setting is reloadable
#inactivity_timeout: 10m
# Nebula security group configuration # Nebula security group configuration
firewall: firewall:

View File

@@ -5,12 +5,8 @@ import (
"fmt" "fmt"
"log" "log"
"net" "net"
"os"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula"
"github.com/slackhq/nebula/config" "github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/overlay"
"github.com/slackhq/nebula/service" "github.com/slackhq/nebula/service"
) )
@@ -63,16 +59,7 @@ pki:
if err := cfg.LoadString(configStr); err != nil { if err := cfg.LoadString(configStr); err != nil {
return err return err
} }
svc, err := service.New(&cfg)
logger := logrus.New()
logger.Out = os.Stdout
ctrl, err := nebula.Main(&cfg, false, "custom-app", logger, overlay.NewUserDeviceFromConfig)
if err != nil {
return err
}
svc, err := service.New(ctrl)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -53,7 +53,7 @@ type Firewall struct {
// routableNetworks describes the vpn addresses as well as any unsafe networks issued to us in the certificate. // routableNetworks describes the vpn addresses as well as any unsafe networks issued to us in the certificate.
// The vpn addresses are a full bit match while the unsafe networks only match the prefix // The vpn addresses are a full bit match while the unsafe networks only match the prefix
routableNetworks *bart.Lite routableNetworks *bart.Table[struct{}]
// assignedNetworks is a list of vpn networks assigned to us in the certificate. // assignedNetworks is a list of vpn networks assigned to us in the certificate.
assignedNetworks []netip.Prefix assignedNetworks []netip.Prefix
@@ -125,7 +125,7 @@ type firewallPort map[int32]*FirewallCA
type firewallLocalCIDR struct { type firewallLocalCIDR struct {
Any bool Any bool
LocalCIDR *bart.Lite LocalCIDR *bart.Table[struct{}]
} }
// NewFirewall creates a new Firewall object. A TimerWheel is created for you from the provided timeouts. // NewFirewall creates a new Firewall object. A TimerWheel is created for you from the provided timeouts.
@@ -148,17 +148,17 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D
tmax = defaultTimeout tmax = defaultTimeout
} }
routableNetworks := new(bart.Lite) routableNetworks := new(bart.Table[struct{}])
var assignedNetworks []netip.Prefix var assignedNetworks []netip.Prefix
for _, network := range c.Networks() { for _, network := range c.Networks() {
nprefix := netip.PrefixFrom(network.Addr(), network.Addr().BitLen()) nprefix := netip.PrefixFrom(network.Addr(), network.Addr().BitLen())
routableNetworks.Insert(nprefix) routableNetworks.Insert(nprefix, struct{}{})
assignedNetworks = append(assignedNetworks, network) assignedNetworks = append(assignedNetworks, network)
} }
hasUnsafeNetworks := false hasUnsafeNetworks := false
for _, n := range c.UnsafeNetworks() { for _, n := range c.UnsafeNetworks() {
routableNetworks.Insert(n) routableNetworks.Insert(n, struct{}{})
hasUnsafeNetworks = true hasUnsafeNetworks = true
} }
@@ -431,7 +431,8 @@ func (f *Firewall) Drop(fp firewall.Packet, incoming bool, h *HostInfo, caPool *
// Make sure remote address matches nebula certificate // Make sure remote address matches nebula certificate
if h.networks != nil { if h.networks != nil {
if !h.networks.Contains(fp.RemoteAddr) { _, ok := h.networks.Lookup(fp.RemoteAddr)
if !ok {
f.metrics(incoming).droppedRemoteAddr.Inc(1) f.metrics(incoming).droppedRemoteAddr.Inc(1)
return ErrInvalidRemoteIP return ErrInvalidRemoteIP
} }
@@ -444,7 +445,8 @@ func (f *Firewall) Drop(fp firewall.Packet, incoming bool, h *HostInfo, caPool *
} }
// Make sure we are supposed to be handling this local ip address // Make sure we are supposed to be handling this local ip address
if !f.routableNetworks.Contains(fp.LocalAddr) { _, ok := f.routableNetworks.Lookup(fp.LocalAddr)
if !ok {
f.metrics(incoming).droppedLocalAddr.Inc(1) f.metrics(incoming).droppedLocalAddr.Inc(1)
return ErrInvalidLocalIP return ErrInvalidLocalIP
} }
@@ -604,7 +606,7 @@ func (f *Firewall) evict(p firewall.Packet) {
return return
} }
newT := t.Expires.Sub(time.Now()) newT := time.Until(t.Expires)
// Timeout is in the future, re-add the timer // Timeout is in the future, re-add the timer
if newT > 0 { if newT > 0 {
@@ -750,7 +752,7 @@ func (fc *FirewallCA) match(p firewall.Packet, c *cert.CachedCertificate, caPool
func (fr *FirewallRule) addRule(f *Firewall, groups []string, host string, ip, localCIDR netip.Prefix) error { func (fr *FirewallRule) addRule(f *Firewall, groups []string, host string, ip, localCIDR netip.Prefix) error {
flc := func() *firewallLocalCIDR { flc := func() *firewallLocalCIDR {
return &firewallLocalCIDR{ return &firewallLocalCIDR{
LocalCIDR: new(bart.Lite), LocalCIDR: new(bart.Table[struct{}]),
} }
} }
@@ -830,7 +832,7 @@ func (fr *FirewallRule) match(p firewall.Packet, c *cert.CachedCertificate) bool
} }
// Shortcut path for if groups, hosts, or cidr contained an `any` // Shortcut path for if groups, hosts, or cidr contained an `any`
if fr.Any.match(p, c) { if fr.Any.match(p) {
return true return true
} }
@@ -847,21 +849,21 @@ func (fr *FirewallRule) match(p firewall.Packet, c *cert.CachedCertificate) bool
found = true found = true
} }
if found && sg.LocalCIDR.match(p, c) { if found && sg.LocalCIDR.match(p) {
return true return true
} }
} }
if fr.Hosts != nil { if fr.Hosts != nil {
if flc, ok := fr.Hosts[c.Certificate.Name()]; ok { if flc, ok := fr.Hosts[c.Certificate.Name()]; ok {
if flc.match(p, c) { if flc.match(p) {
return true return true
} }
} }
} }
for _, v := range fr.CIDR.Supernets(netip.PrefixFrom(p.RemoteAddr, p.RemoteAddr.BitLen())) { for _, v := range fr.CIDR.Supernets(netip.PrefixFrom(p.RemoteAddr, p.RemoteAddr.BitLen())) {
if v.match(p, c) { if v.match(p) {
return true return true
} }
} }
@@ -877,7 +879,7 @@ func (flc *firewallLocalCIDR) addRule(f *Firewall, localIp netip.Prefix) error {
} }
for _, network := range f.assignedNetworks { for _, network := range f.assignedNetworks {
flc.LocalCIDR.Insert(network) flc.LocalCIDR.Insert(network, struct{}{})
} }
return nil return nil
@@ -886,11 +888,11 @@ func (flc *firewallLocalCIDR) addRule(f *Firewall, localIp netip.Prefix) error {
return nil return nil
} }
flc.LocalCIDR.Insert(localIp) flc.LocalCIDR.Insert(localIp, struct{}{})
return nil return nil
} }
func (flc *firewallLocalCIDR) match(p firewall.Packet, c *cert.CachedCertificate) bool { func (flc *firewallLocalCIDR) match(p firewall.Packet) bool {
if flc == nil { if flc == nil {
return false return false
} }
@@ -899,7 +901,8 @@ func (flc *firewallLocalCIDR) match(p firewall.Packet, c *cert.CachedCertificate
return true return true
} }
return flc.LocalCIDR.Contains(p.LocalAddr) _, ok := flc.LocalCIDR.Lookup(p.LocalAddr)
return ok
} }
type rule struct { type rule struct {

View File

@@ -35,22 +35,27 @@ func TestNewFirewall(t *testing.T) {
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen) assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(l, time.Second, time.Hour, time.Minute, c) fw = NewFirewall(l, time.Second, time.Hour, time.Minute, c)
conntrack = fw.Conntrack
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen) assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(l, time.Hour, time.Second, time.Minute, c) fw = NewFirewall(l, time.Hour, time.Second, time.Minute, c)
conntrack = fw.Conntrack
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen) assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(l, time.Hour, time.Minute, time.Second, c) fw = NewFirewall(l, time.Hour, time.Minute, time.Second, c)
conntrack = fw.Conntrack
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen) assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(l, time.Minute, time.Hour, time.Second, c) fw = NewFirewall(l, time.Minute, time.Hour, time.Second, c)
conntrack = fw.Conntrack
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen) assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
fw = NewFirewall(l, time.Minute, time.Second, time.Hour, c) fw = NewFirewall(l, time.Minute, time.Second, time.Hour, c)
conntrack = fw.Conntrack
assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration) assert.Equal(t, time.Hour, conntrack.TimerWheel.wheelDuration)
assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen) assert.Equal(t, 3602, conntrack.TimerWheel.wheelLen)
} }

15
go.mod
View File

@@ -1,32 +1,32 @@
module github.com/slackhq/nebula module github.com/slackhq/nebula
go 1.24.0 go 1.23.6
toolchain go1.24.1 toolchain go1.24.1
require ( require (
dario.cat/mergo v1.0.2 dario.cat/mergo v1.0.1
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
github.com/armon/go-radix v1.0.0 github.com/armon/go-radix v1.0.0
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
github.com/flynn/noise v1.1.0 github.com/flynn/noise v1.1.0
github.com/gaissmai/bart v0.20.4 github.com/gaissmai/bart v0.20.1
github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf v1.3.2
github.com/google/gopacket v1.1.19 github.com/google/gopacket v1.1.19
github.com/kardianos/service v1.2.2 github.com/kardianos/service v1.2.2
github.com/miekg/dns v1.1.65 github.com/miekg/dns v1.1.65
github.com/miekg/pkcs11 v1.1.2-0.20231115102856-9078ad6b9d4b github.com/miekg/pkcs11 v1.1.2-0.20231115102856-9078ad6b9d4b
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f
github.com/prometheus/client_golang v1.22.0 github.com/prometheus/client_golang v1.21.1
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
github.com/sirupsen/logrus v1.9.3 github.com/sirupsen/logrus v1.9.3
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6 github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6
github.com/stretchr/testify v1.10.0 github.com/stretchr/testify v1.10.0
github.com/vishvananda/netlink v1.3.1 github.com/vishvananda/netlink v1.3.0
golang.org/x/crypto v0.37.0 golang.org/x/crypto v0.37.0
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 golang.org/x/exp v0.0.0-20230725093048-515e97ebf090
golang.org/x/net v0.39.0 golang.org/x/net v0.38.0
golang.org/x/sync v0.13.0 golang.org/x/sync v0.13.0
golang.org/x/sys v0.32.0 golang.org/x/sys v0.32.0
golang.org/x/term v0.31.0 golang.org/x/term v0.31.0
@@ -43,12 +43,13 @@ require (
github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/google/btree v1.1.2 // indirect github.com/google/btree v1.1.2 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect
github.com/vishvananda/netns v0.0.5 // indirect github.com/vishvananda/netns v0.0.4 // indirect
golang.org/x/mod v0.23.0 // indirect golang.org/x/mod v0.23.0 // indirect
golang.org/x/time v0.5.0 // indirect golang.org/x/time v0.5.0 // indirect
golang.org/x/tools v0.30.0 // indirect golang.org/x/tools v0.30.0 // indirect

32
go.sum
View File

@@ -1,6 +1,6 @@
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@@ -24,8 +24,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
github.com/gaissmai/bart v0.20.4 h1:Ik47r1fy3jRVU+1eYzKSW3ho2UgBVTVnUS8O993584U= github.com/gaissmai/bart v0.20.1 h1:igNss0zDsSY8e+ophKgD9KJVPKBOo7uSVjyKCL7nIzo=
github.com/gaissmai/bart v0.20.4/go.mod h1:cEed+ge8dalcbpi8wtS9x9m2hn/fNJH5suhdGQOHnYk= github.com/gaissmai/bart v0.20.1/go.mod h1:JJzMAhNF5Rjo4SF4jWBrANuJfqY+FvsFhW7t1UZJ+XY=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
@@ -53,8 +53,8 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
@@ -68,8 +68,8 @@ github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX
github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
@@ -106,8 +106,8 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -145,10 +145,10 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/vishvananda/netlink v1.3.1 h1:3AEMt62VKqz90r0tmNhog0r/PpWKmrEShJU0wJW6bV0= github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk=
github.com/vishvananda/netlink v1.3.1/go.mod h1:ARtKouGSTGchR8aMwmkzC0qiNPrrWO5JS/XMVl45+b4= github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -176,8 +176,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY= golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View File

@@ -192,7 +192,8 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
for _, network := range remoteCert.Certificate.Networks() { for _, network := range remoteCert.Certificate.Networks() {
vpnAddr := network.Addr() vpnAddr := network.Addr()
if f.myVpnAddrsTable.Contains(vpnAddr) { _, found := f.myVpnAddrsTable.Lookup(vpnAddr)
if found {
f.l.WithField("vpnAddr", vpnAddr).WithField("udpAddr", addr). f.l.WithField("vpnAddr", vpnAddr).WithField("udpAddr", addr).
WithField("certName", certName). WithField("certName", certName).
WithField("certVersion", certVersion). WithField("certVersion", certVersion).
@@ -203,7 +204,7 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
} }
// vpnAddrs outside our vpn networks are of no use to us, filter them out // vpnAddrs outside our vpn networks are of no use to us, filter them out
if !f.myVpnNetworksTable.Contains(vpnAddr) { if _, ok := f.myVpnNetworksTable.Lookup(vpnAddr); !ok {
continue continue
} }
@@ -249,7 +250,7 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
HandshakePacket: make(map[uint8][]byte, 0), HandshakePacket: make(map[uint8][]byte, 0),
lastHandshakeTime: hs.Details.Time, lastHandshakeTime: hs.Details.Time,
relayState: RelayState{ relayState: RelayState{
relays: nil, relays: map[netip.Addr]struct{}{},
relayForByAddr: map[netip.Addr]*Relay{}, relayForByAddr: map[netip.Addr]*Relay{},
relayForByIdx: map[uint32]*Relay{}, relayForByIdx: map[uint32]*Relay{},
}, },
@@ -342,7 +343,7 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
if existing.SetRemoteIfPreferred(f.hostMap, addr) { if existing.SetRemoteIfPreferred(f.hostMap, addr) {
// Send a test packet to ensure the other side has also switched to // Send a test packet to ensure the other side has also switched to
// the preferred remote // the preferred remote
f.SendMessageToVpnAddr(header.Test, header.TestRequest, vpnAddrs[0], []byte(""), make([]byte, 12, 12), make([]byte, mtu)) f.SendMessageToVpnAddr(header.Test, header.TestRequest, vpnAddrs[0], []byte(""), make([]byte, 12), make([]byte, mtu))
} }
msg = existing.HandshakePacket[2] msg = existing.HandshakePacket[2]
@@ -385,7 +386,7 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
Info("Handshake too old") Info("Handshake too old")
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues // Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
f.SendMessageToVpnAddr(header.Test, header.TestRequest, vpnAddrs[0], []byte(""), make([]byte, 12, 12), make([]byte, mtu)) f.SendMessageToVpnAddr(header.Test, header.TestRequest, vpnAddrs[0], []byte(""), make([]byte, 12), make([]byte, mtu))
return return
case ErrLocalIndexCollision: case ErrLocalIndexCollision:
// This means we failed to insert because of collision on localIndexId. Just let the next handshake packet retry // This means we failed to insert because of collision on localIndexId. Just let the next handshake packet retry
@@ -457,11 +458,9 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
Info("Handshake message sent") Info("Handshake message sent")
} }
f.connectionManager.AddTrafficWatch(hostinfo) f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
hostinfo.remotes.ResetBlockedRemotes() hostinfo.remotes.ResetBlockedRemotes()
return
} }
func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *HandshakeHostInfo, packet []byte, h *header.H) bool { func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *HandshakeHostInfo, packet []byte, h *header.H) bool {
@@ -578,7 +577,7 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
for _, network := range vpnNetworks { for _, network := range vpnNetworks {
// vpnAddrs outside our vpn networks are of no use to us, filter them out // vpnAddrs outside our vpn networks are of no use to us, filter them out
vpnAddr := network.Addr() vpnAddr := network.Addr()
if !f.myVpnNetworksTable.Contains(vpnAddr) { if _, ok := f.myVpnNetworksTable.Lookup(vpnAddr); !ok {
continue continue
} }
@@ -652,14 +651,14 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
// Complete our handshake and update metrics, this will replace any existing tunnels for the vpnAddrs here // Complete our handshake and update metrics, this will replace any existing tunnels for the vpnAddrs here
f.handshakeManager.Complete(hostinfo, f) f.handshakeManager.Complete(hostinfo, f)
f.connectionManager.AddTrafficWatch(hostinfo) f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
if f.l.Level >= logrus.DebugLevel { if f.l.Level >= logrus.DebugLevel {
hostinfo.logger(f.l).Debugf("Sending %d stored packets", len(hh.packetStore)) hostinfo.logger(f.l).Debugf("Sending %d stored packets", len(hh.packetStore))
} }
if len(hh.packetStore) > 0 { if len(hh.packetStore) > 0 {
nb := make([]byte, 12, 12) nb := make([]byte, 12)
out := make([]byte, mtu) out := make([]byte, mtu)
for _, cp := range hh.packetStore { for _, cp := range hh.packetStore {
cp.callback(cp.messageType, cp.messageSubType, hostinfo, cp.packet, nb, out) cp.callback(cp.messageType, cp.messageSubType, hostinfo, cp.packet, nb, out)

View File

@@ -274,7 +274,8 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
} }
// Don't relay through the host I'm trying to connect to // Don't relay through the host I'm trying to connect to
if hm.f.myVpnAddrsTable.Contains(relay) { _, found := hm.f.myVpnAddrsTable.Lookup(relay)
if found {
continue continue
} }
@@ -450,7 +451,7 @@ func (hm *HandshakeManager) StartHandshake(vpnAddr netip.Addr, cacheCb func(*Han
vpnAddrs: []netip.Addr{vpnAddr}, vpnAddrs: []netip.Addr{vpnAddr},
HandshakePacket: make(map[uint8][]byte, 0), HandshakePacket: make(map[uint8][]byte, 0),
relayState: RelayState{ relayState: RelayState{
relays: nil, relays: map[netip.Addr]struct{}{},
relayForByAddr: map[netip.Addr]*Relay{}, relayForByAddr: map[netip.Addr]*Relay{},
relayForByIdx: map[uint32]*Relay{}, relayForByIdx: map[uint32]*Relay{},
}, },

View File

@@ -65,30 +65,16 @@ func Test_NewHandshakeManagerVpnIp(t *testing.T) {
assert.NotContains(t, blah.vpnIps, ip) assert.NotContains(t, blah.vpnIps, ip)
} }
func testCountTimerWheelEntries(tw *LockingTimerWheel[netip.Addr]) (c int) {
for _, i := range tw.t.wheel {
n := i.Head
for n != nil {
c++
n = n.Next
}
}
return c
}
type mockEncWriter struct { type mockEncWriter struct {
} }
func (mw *mockEncWriter) SendMessageToVpnAddr(_ header.MessageType, _ header.MessageSubType, _ netip.Addr, _, _, _ []byte) { func (mw *mockEncWriter) SendMessageToVpnAddr(_ header.MessageType, _ header.MessageSubType, _ netip.Addr, _, _, _ []byte) {
return
} }
func (mw *mockEncWriter) SendVia(_ *HostInfo, _ *Relay, _, _, _ []byte, _ bool) { func (mw *mockEncWriter) SendVia(_ *HostInfo, _ *Relay, _, _, _ []byte, _ bool) {
return
} }
func (mw *mockEncWriter) SendMessageToHostInfo(_ header.MessageType, _ header.MessageSubType, _ *HostInfo, _, _, _ []byte) { func (mw *mockEncWriter) SendMessageToHostInfo(_ header.MessageType, _ header.MessageSubType, _ *HostInfo, _, _, _ []byte) {
return
} }
func (mw *mockEncWriter) Handshake(_ netip.Addr) {} func (mw *mockEncWriter) Handshake(_ netip.Addr) {}

View File

@@ -23,7 +23,7 @@ type m = map[string]any
const ( const (
Version uint8 = 1 Version uint8 = 1
Len = 16 Len int = 16
) )
type MessageType uint8 type MessageType uint8

View File

@@ -4,7 +4,6 @@ import (
"errors" "errors"
"net" "net"
"net/netip" "net/netip"
"slices"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
@@ -69,7 +68,7 @@ type HostMap struct {
type RelayState struct { type RelayState struct {
sync.RWMutex sync.RWMutex
relays []netip.Addr // Ordered set of VpnAddrs of Hosts to use as relays to access this peer relays map[netip.Addr]struct{} // Set of vpnAddr's of Hosts to use as relays to access this peer
// For data race avoidance, the contents of a *Relay are treated immutably. To update a *Relay, copy the existing data, // For data race avoidance, the contents of a *Relay are treated immutably. To update a *Relay, copy the existing data,
// modify what needs to be updated, and store the new modified copy in the relayForByIp and relayForByIdx maps (with // modify what needs to be updated, and store the new modified copy in the relayForByIp and relayForByIdx maps (with
// the RelayState Lock held) // the RelayState Lock held)
@@ -80,12 +79,7 @@ type RelayState struct {
func (rs *RelayState) DeleteRelay(ip netip.Addr) { func (rs *RelayState) DeleteRelay(ip netip.Addr) {
rs.Lock() rs.Lock()
defer rs.Unlock() defer rs.Unlock()
for idx, val := range rs.relays { delete(rs.relays, ip)
if val == ip {
rs.relays = append(rs.relays[:idx], rs.relays[idx+1:]...)
return
}
}
} }
func (rs *RelayState) UpdateRelayForByIpState(vpnIp netip.Addr, state int) { func (rs *RelayState) UpdateRelayForByIpState(vpnIp netip.Addr, state int) {
@@ -130,16 +124,16 @@ func (rs *RelayState) GetRelayForByAddr(addr netip.Addr) (*Relay, bool) {
func (rs *RelayState) InsertRelayTo(ip netip.Addr) { func (rs *RelayState) InsertRelayTo(ip netip.Addr) {
rs.Lock() rs.Lock()
defer rs.Unlock() defer rs.Unlock()
if !slices.Contains(rs.relays, ip) { rs.relays[ip] = struct{}{}
rs.relays = append(rs.relays, ip)
}
} }
func (rs *RelayState) CopyRelayIps() []netip.Addr { func (rs *RelayState) CopyRelayIps() []netip.Addr {
ret := make([]netip.Addr, len(rs.relays))
rs.RLock() rs.RLock()
defer rs.RUnlock() defer rs.RUnlock()
copy(ret, rs.relays) ret := make([]netip.Addr, 0, len(rs.relays))
for ip := range rs.relays {
ret = append(ret, ip)
}
return ret return ret
} }
@@ -229,7 +223,7 @@ type HostInfo struct {
recvError atomic.Uint32 recvError atomic.Uint32
// networks are both all vpn and unsafe networks assigned to this host // networks are both all vpn and unsafe networks assigned to this host
networks *bart.Lite networks *bart.Table[struct{}]
relayState RelayState relayState RelayState
// HandshakePacket records the packets used to create this hostinfo // HandshakePacket records the packets used to create this hostinfo
@@ -256,14 +250,6 @@ type HostInfo struct {
// Used to track other hostinfos for this vpn ip since only 1 can be primary // Used to track other hostinfos for this vpn ip since only 1 can be primary
// Synchronised via hostmap lock and not the hostinfo lock. // Synchronised via hostmap lock and not the hostinfo lock.
next, prev *HostInfo next, prev *HostInfo
//TODO: in, out, and others might benefit from being an atomic.Int32. We could collapse connectionManager pendingDeletion, relayUsed, and in/out into this 1 thing
in, out, pendingDeletion atomic.Bool
// lastUsed tracks the last time ConnectionManager checked the tunnel and it was in use.
// This value will be behind against actual tunnel utilization in the hot path.
// This should only be used by the ConnectionManagers ticker routine.
lastUsed time.Time
} }
type ViaSender struct { type ViaSender struct {
@@ -582,7 +568,7 @@ func (hm *HostMap) unlockedAddHostInfo(hostinfo *HostInfo, f *Interface) {
dnsR.Add(remoteCert.Certificate.Name()+".", hostinfo.vpnAddrs) dnsR.Add(remoteCert.Certificate.Name()+".", hostinfo.vpnAddrs)
} }
for _, addr := range hostinfo.vpnAddrs { for _, addr := range hostinfo.vpnAddrs {
hm.unlockedInnerAddHostInfo(addr, hostinfo, f) hm.unlockedInnerAddHostInfo(addr, hostinfo)
} }
hm.Indexes[hostinfo.localIndexId] = hostinfo hm.Indexes[hostinfo.localIndexId] = hostinfo
@@ -595,7 +581,7 @@ func (hm *HostMap) unlockedAddHostInfo(hostinfo *HostInfo, f *Interface) {
} }
} }
func (hm *HostMap) unlockedInnerAddHostInfo(vpnAddr netip.Addr, hostinfo *HostInfo, f *Interface) { func (hm *HostMap) unlockedInnerAddHostInfo(vpnAddr netip.Addr, hostinfo *HostInfo) {
existing := hm.Hosts[vpnAddr] existing := hm.Hosts[vpnAddr]
hm.Hosts[vpnAddr] = hostinfo hm.Hosts[vpnAddr] = hostinfo
@@ -662,7 +648,7 @@ func (i *HostInfo) TryPromoteBest(preferredRanges []netip.Prefix, ifce *Interfac
// Try to send a test packet to that host, this should // Try to send a test packet to that host, this should
// cause it to detect a roaming event and switch remotes // cause it to detect a roaming event and switch remotes
ifce.sendTo(header.Test, header.TestRequest, i.ConnectionState, i, addr, []byte(""), make([]byte, 12, 12), make([]byte, mtu)) ifce.sendTo(header.Test, header.TestRequest, i.ConnectionState, i, addr, []byte(""), make([]byte, 12), make([]byte, mtu))
}) })
} }
@@ -746,13 +732,13 @@ func (i *HostInfo) buildNetworks(networks, unsafeNetworks []netip.Prefix) {
return return
} }
i.networks = new(bart.Lite) i.networks = new(bart.Table[struct{}])
for _, network := range networks { for _, network := range networks {
i.networks.Insert(network) i.networks.Insert(network, struct{}{})
} }
for _, network := range unsafeNetworks { for _, network := range unsafeNetworks {
i.networks.Insert(network) i.networks.Insert(network, struct{}{})
} }
} }
@@ -808,7 +794,7 @@ func localAddrs(l *logrus.Logger, allowList *LocalAllowList) []netip.Addr {
} }
addr = addr.Unmap() addr = addr.Unmap()
if addr.IsLoopback() == false && addr.IsLinkLocalUnicast() == false { if !addr.IsLoopback() && !addr.IsLinkLocalUnicast() {
isAllowed := allowList.Allow(addr) isAllowed := allowList.Allow(addr)
if l.Level >= logrus.TraceLevel { if l.Level >= logrus.TraceLevel {
l.WithField("localAddr", addr).WithField("allowed", isAllowed).Trace("localAllowList.Allow") l.WithField("localAddr", addr).WithField("allowed", isAllowed).Trace("localAllowList.Allow")

View File

@@ -7,7 +7,6 @@ import (
"github.com/slackhq/nebula/config" "github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/test" "github.com/slackhq/nebula/test"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
func TestHostMap_MakePrimary(t *testing.T) { func TestHostMap_MakePrimary(t *testing.T) {
@@ -216,31 +215,3 @@ func TestHostMap_reload(t *testing.T) {
c.ReloadConfigString("preferred_ranges: [1.1.1.1/32]") c.ReloadConfigString("preferred_ranges: [1.1.1.1/32]")
assert.Equal(t, []string{"1.1.1.1/32"}, toS(hm.GetPreferredRanges())) assert.Equal(t, []string{"1.1.1.1/32"}, toS(hm.GetPreferredRanges()))
} }
func TestHostMap_RelayState(t *testing.T) {
h1 := &HostInfo{vpnAddrs: []netip.Addr{netip.MustParseAddr("0.0.0.1")}, localIndexId: 1}
a1 := netip.MustParseAddr("::1")
a2 := netip.MustParseAddr("2001::1")
h1.relayState.InsertRelayTo(a1)
assert.Equal(t, []netip.Addr{a1}, h1.relayState.relays)
h1.relayState.InsertRelayTo(a2)
assert.Equal(t, []netip.Addr{a1, a2}, h1.relayState.relays)
// Ensure that the first relay added is the first one returned in the copy
currentRelays := h1.relayState.CopyRelayIps()
require.Len(t, currentRelays, 2)
assert.Equal(t, a1, currentRelays[0])
// Deleting the last one in the list works ok
h1.relayState.DeleteRelay(a2)
assert.Equal(t, []netip.Addr{a1}, h1.relayState.relays)
// Deleting an element not in the list works ok
h1.relayState.DeleteRelay(a2)
assert.Equal(t, []netip.Addr{a1}, h1.relayState.relays)
// Deleting the only element in the list works ok
h1.relayState.DeleteRelay(a1)
assert.Equal(t, []netip.Addr{}, h1.relayState.relays)
}

View File

@@ -22,12 +22,14 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
// Ignore local broadcast packets // Ignore local broadcast packets
if f.dropLocalBroadcast { if f.dropLocalBroadcast {
if f.myBroadcastAddrsTable.Contains(fwPacket.RemoteAddr) { _, found := f.myBroadcastAddrsTable.Lookup(fwPacket.RemoteAddr)
if found {
return return
} }
} }
if f.myVpnAddrsTable.Contains(fwPacket.RemoteAddr) { _, found := f.myVpnAddrsTable.Lookup(fwPacket.RemoteAddr)
if found {
// Immediately forward packets from self to self. // Immediately forward packets from self to self.
// This should only happen on Darwin-based and FreeBSD hosts, which // This should only happen on Darwin-based and FreeBSD hosts, which
// routes packets from the Nebula addr to the Nebula addr through the Nebula // routes packets from the Nebula addr to the Nebula addr through the Nebula
@@ -128,7 +130,8 @@ func (f *Interface) Handshake(vpnAddr netip.Addr) {
// getOrHandshakeNoRouting returns nil if the vpnAddr is not routable. // getOrHandshakeNoRouting returns nil if the vpnAddr is not routable.
// If the 2nd return var is false then the hostinfo is not ready to be used in a tunnel // If the 2nd return var is false then the hostinfo is not ready to be used in a tunnel
func (f *Interface) getOrHandshakeNoRouting(vpnAddr netip.Addr, cacheCallback func(*HandshakeHostInfo)) (*HostInfo, bool) { func (f *Interface) getOrHandshakeNoRouting(vpnAddr netip.Addr, cacheCallback func(*HandshakeHostInfo)) (*HostInfo, bool) {
if f.myVpnNetworksTable.Contains(vpnAddr) { _, found := f.myVpnNetworksTable.Lookup(vpnAddr)
if found {
return f.handshakeManager.GetOrHandshake(vpnAddr, cacheCallback) return f.handshakeManager.GetOrHandshake(vpnAddr, cacheCallback)
} }
@@ -288,7 +291,7 @@ func (f *Interface) SendVia(via *HostInfo,
c := via.ConnectionState.messageCounter.Add(1) c := via.ConnectionState.messageCounter.Add(1)
out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c) out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c)
f.connectionManager.Out(via) f.connectionManager.Out(via.localIndexId)
// Authenticate the header and payload, but do not encrypt for this message type. // Authenticate the header and payload, but do not encrypt for this message type.
// The payload consists of the inner, unencrypted Nebula header, as well as the end-to-end encrypted payload. // The payload consists of the inner, unencrypted Nebula header, as well as the end-to-end encrypted payload.
@@ -356,7 +359,7 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
//l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p) //l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p)
out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c) out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c)
f.connectionManager.Out(hostinfo) f.connectionManager.Out(hostinfo.localIndexId)
// Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against // Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against
// all our addrs and enable a faster roaming. // all our addrs and enable a faster roaming.

View File

@@ -2,7 +2,6 @@ package nebula
import ( import (
"context" "context"
"crypto/fips140"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@@ -25,23 +24,23 @@ import (
const mtu = 9001 const mtu = 9001
type InterfaceConfig struct { type InterfaceConfig struct {
HostMap *HostMap HostMap *HostMap
Outside udp.Conn Outside udp.Conn
Inside overlay.Device Inside overlay.Device
pki *PKI pki *PKI
Cipher string Firewall *Firewall
Firewall *Firewall ServeDns bool
ServeDns bool HandshakeManager *HandshakeManager
HandshakeManager *HandshakeManager lightHouse *LightHouse
lightHouse *LightHouse checkInterval time.Duration
connectionManager *connectionManager pendingDeletionInterval time.Duration
DropLocalBroadcast bool DropLocalBroadcast bool
DropMulticast bool DropMulticast bool
routines int routines int
MessageMetrics *MessageMetrics MessageMetrics *MessageMetrics
version string version string
relayManager *relayManager relayManager *relayManager
punchy *Punchy punchy *Punchy
tryPromoteEvery uint32 tryPromoteEvery uint32
reQueryEvery uint32 reQueryEvery uint32
@@ -62,11 +61,11 @@ type Interface struct {
serveDns bool serveDns bool
createTime time.Time createTime time.Time
lightHouse *LightHouse lightHouse *LightHouse
myBroadcastAddrsTable *bart.Lite myBroadcastAddrsTable *bart.Table[struct{}]
myVpnAddrs []netip.Addr // A list of addresses assigned to us via our certificate myVpnAddrs []netip.Addr // A list of addresses assigned to us via our certificate
myVpnAddrsTable *bart.Lite myVpnAddrsTable *bart.Table[struct{}] // A table of addresses assigned to us via our certificate
myVpnNetworks []netip.Prefix // A list of networks assigned to us via our certificate myVpnNetworks []netip.Prefix // A list of networks assigned to us via our certificate
myVpnNetworksTable *bart.Lite myVpnNetworksTable *bart.Table[struct{}] // A table of networks assigned to us via our certificate
dropLocalBroadcast bool dropLocalBroadcast bool
dropMulticast bool dropMulticast bool
routines int routines int
@@ -158,9 +157,6 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
if c.Firewall == nil { if c.Firewall == nil {
return nil, errors.New("no firewall rules") return nil, errors.New("no firewall rules")
} }
if c.connectionManager == nil {
return nil, errors.New("no connection manager")
}
cs := c.pki.getCertState() cs := c.pki.getCertState()
ifce := &Interface{ ifce := &Interface{
@@ -185,7 +181,7 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
myVpnAddrsTable: cs.myVpnAddrsTable, myVpnAddrsTable: cs.myVpnAddrsTable,
myBroadcastAddrsTable: cs.myVpnBroadcastAddrsTable, myBroadcastAddrsTable: cs.myVpnBroadcastAddrsTable,
relayManager: c.relayManager, relayManager: c.relayManager,
connectionManager: c.connectionManager,
conntrackCacheTimeout: c.ConntrackCacheTimeout, conntrackCacheTimeout: c.ConntrackCacheTimeout,
metricHandshakes: metrics.GetOrRegisterHistogram("handshakes", nil, metrics.NewExpDecaySample(1028, 0.015)), metricHandshakes: metrics.GetOrRegisterHistogram("handshakes", nil, metrics.NewExpDecaySample(1028, 0.015)),
@@ -202,7 +198,7 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
ifce.reQueryEvery.Store(c.reQueryEvery) ifce.reQueryEvery.Store(c.reQueryEvery)
ifce.reQueryWait.Store(int64(c.reQueryWait)) ifce.reQueryWait.Store(int64(c.reQueryWait))
ifce.connectionManager.intf = ifce ifce.connectionManager = newConnectionManager(ctx, c.l, ifce, c.checkInterval, c.pendingDeletionInterval, c.punchy)
return ifce, nil return ifce, nil
} }
@@ -221,7 +217,6 @@ func (f *Interface) activate() {
f.l.WithField("interface", f.inside.Name()).WithField("networks", f.myVpnNetworks). f.l.WithField("interface", f.inside.Name()).WithField("networks", f.myVpnNetworks).
WithField("build", f.version).WithField("udpAddr", addr). WithField("build", f.version).WithField("udpAddr", addr).
WithField("boringcrypto", boringEnabled()). WithField("boringcrypto", boringEnabled()).
WithField("fips140", fips140.Enabled()).
Info("Nebula interface is active") Info("Nebula interface is active")
metrics.GetOrRegisterGauge("routines", nil).Update(int64(f.routines)) metrics.GetOrRegisterGauge("routines", nil).Update(int64(f.routines))
@@ -271,7 +266,7 @@ func (f *Interface) listenOut(i int) {
plaintext := make([]byte, udp.MTU) plaintext := make([]byte, udp.MTU)
h := &header.H{} h := &header.H{}
fwPacket := &firewall.Packet{} fwPacket := &firewall.Packet{}
nb := make([]byte, 12, 12) nb := make([]byte, 12)
li.ListenOut(func(fromUdpAddr netip.AddrPort, payload []byte) { li.ListenOut(func(fromUdpAddr netip.AddrPort, payload []byte) {
f.readOutsidePackets(fromUdpAddr, nil, plaintext[:0], payload, h, fwPacket, lhh, nb, i, ctCache.Get(f.l)) f.readOutsidePackets(fromUdpAddr, nil, plaintext[:0], payload, h, fwPacket, lhh, nb, i, ctCache.Get(f.l))
@@ -284,7 +279,7 @@ func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) {
packet := make([]byte, mtu) packet := make([]byte, mtu)
out := make([]byte, mtu) out := make([]byte, mtu)
fwPacket := &firewall.Packet{} fwPacket := &firewall.Packet{}
nb := make([]byte, 12, 12) nb := make([]byte, 12)
conntrackCache := firewall.NewConntrackCacheTicker(f.conntrackCacheTimeout) conntrackCache := firewall.NewConntrackCacheTicker(f.conntrackCacheTimeout)
@@ -327,7 +322,7 @@ func (f *Interface) reloadDisconnectInvalid(c *config.C) {
func (f *Interface) reloadFirewall(c *config.C) { func (f *Interface) reloadFirewall(c *config.C) {
//TODO: need to trigger/detect if the certificate changed too //TODO: need to trigger/detect if the certificate changed too
if c.HasChanged("firewall") == false { if !c.HasChanged("firewall") {
f.l.Debug("No firewall config change detected") f.l.Debug("No firewall config change detected")
return return
} }
@@ -429,7 +424,7 @@ func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
certState := f.pki.getCertState() certState := f.pki.getCertState()
defaultCrt := certState.GetDefaultCertificate() defaultCrt := certState.GetDefaultCertificate()
certExpirationGauge.Update(int64(defaultCrt.NotAfter().Sub(time.Now()) / time.Second)) certExpirationGauge.Update(int64(time.Until(defaultCrt.NotAfter()) / time.Second))
certInitiatingVersion.Update(int64(defaultCrt.Version())) certInitiatingVersion.Update(int64(defaultCrt.Version()))
// Report the max certificate version we are capable of using // Report the max certificate version we are capable of using

View File

@@ -32,7 +32,7 @@ type LightHouse struct {
amLighthouse bool amLighthouse bool
myVpnNetworks []netip.Prefix myVpnNetworks []netip.Prefix
myVpnNetworksTable *bart.Lite myVpnNetworksTable *bart.Table[struct{}]
punchConn udp.Conn punchConn udp.Conn
punchy *Punchy punchy *Punchy
@@ -201,7 +201,8 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
//TODO: we could technically insert all returned addrs instead of just the first one if a dns lookup was used //TODO: we could technically insert all returned addrs instead of just the first one if a dns lookup was used
addr := addrs[0].Unmap() addr := addrs[0].Unmap()
if lh.myVpnNetworksTable.Contains(addr) { _, found := lh.myVpnNetworksTable.Lookup(addr)
if found {
lh.l.WithField("addr", rawAddr).WithField("entry", i+1). lh.l.WithField("addr", rawAddr).WithField("entry", i+1).
Warn("Ignoring lighthouse.advertise_addrs report because it is within the nebula network range") Warn("Ignoring lighthouse.advertise_addrs report because it is within the nebula network range")
continue continue
@@ -358,7 +359,8 @@ func (lh *LightHouse) parseLighthouses(c *config.C, lhMap map[netip.Addr]struct{
return util.NewContextualError("Unable to parse lighthouse host entry", m{"host": host, "entry": i + 1}, err) return util.NewContextualError("Unable to parse lighthouse host entry", m{"host": host, "entry": i + 1}, err)
} }
if !lh.myVpnNetworksTable.Contains(addr) { _, found := lh.myVpnNetworksTable.Lookup(addr)
if !found {
return util.NewContextualError("lighthouse host is not in our networks, invalid", m{"vpnAddr": addr, "networks": lh.myVpnNetworks}, nil) return util.NewContextualError("lighthouse host is not in our networks, invalid", m{"vpnAddr": addr, "networks": lh.myVpnNetworks}, nil)
} }
lhMap[addr] = struct{}{} lhMap[addr] = struct{}{}
@@ -369,7 +371,7 @@ func (lh *LightHouse) parseLighthouses(c *config.C, lhMap map[netip.Addr]struct{
} }
staticList := lh.GetStaticHostList() staticList := lh.GetStaticHostList()
for lhAddr, _ := range lhMap { for lhAddr := range lhMap {
if _, ok := staticList[lhAddr]; !ok { if _, ok := staticList[lhAddr]; !ok {
return fmt.Errorf("lighthouse %s does not have a static_host_map entry", lhAddr) return fmt.Errorf("lighthouse %s does not have a static_host_map entry", lhAddr)
} }
@@ -429,7 +431,8 @@ func (lh *LightHouse) loadStaticMap(c *config.C, staticList map[netip.Addr]struc
return util.NewContextualError("Unable to parse static_host_map entry", m{"host": k, "entry": i + 1}, err) return util.NewContextualError("Unable to parse static_host_map entry", m{"host": k, "entry": i + 1}, err)
} }
if !lh.myVpnNetworksTable.Contains(vpnAddr) { _, found := lh.myVpnNetworksTable.Lookup(vpnAddr)
if !found {
return util.NewContextualError("static_host_map key is not in our network, invalid", m{"vpnAddr": vpnAddr, "networks": lh.myVpnNetworks, "entry": i + 1}, nil) return util.NewContextualError("static_host_map key is not in our network, invalid", m{"vpnAddr": vpnAddr, "networks": lh.myVpnNetworks, "entry": i + 1}, nil)
} }
@@ -650,11 +653,9 @@ func (lh *LightHouse) shouldAdd(vpnAddr netip.Addr, to netip.Addr) bool {
return false return false
} }
if lh.myVpnNetworksTable.Contains(to) { _, found := lh.myVpnNetworksTable.Lookup(to)
return false
}
return true return !found
} }
// unlockedShouldAddV4 checks if to is allowed by our allow list // unlockedShouldAddV4 checks if to is allowed by our allow list
@@ -670,11 +671,8 @@ func (lh *LightHouse) unlockedShouldAddV4(vpnAddr netip.Addr, to *V4AddrPort) bo
return false return false
} }
if lh.myVpnNetworksTable.Contains(udpAddr.Addr()) { _, found := lh.myVpnNetworksTable.Lookup(udpAddr.Addr())
return false return !found
}
return true
} }
// unlockedShouldAddV6 checks if to is allowed by our allow list // unlockedShouldAddV6 checks if to is allowed by our allow list
@@ -690,11 +688,9 @@ func (lh *LightHouse) unlockedShouldAddV6(vpnAddr netip.Addr, to *V6AddrPort) bo
return false return false
} }
if lh.myVpnNetworksTable.Contains(udpAddr.Addr()) { _, found := lh.myVpnNetworksTable.Lookup(udpAddr.Addr())
return false
}
return true return !found
} }
func (lh *LightHouse) IsLighthouseAddr(vpnAddr netip.Addr) bool { func (lh *LightHouse) IsLighthouseAddr(vpnAddr netip.Addr) bool {
@@ -722,7 +718,7 @@ func (lh *LightHouse) startQueryWorker() {
} }
go func() { go func() {
nb := make([]byte, 12, 12) nb := make([]byte, 12)
out := make([]byte, mtu) out := make([]byte, mtu)
for { for {
@@ -850,7 +846,8 @@ func (lh *LightHouse) SendUpdate() {
lal := lh.GetLocalAllowList() lal := lh.GetLocalAllowList()
for _, e := range localAddrs(lh.l, lal) { for _, e := range localAddrs(lh.l, lal) {
if lh.myVpnNetworksTable.Contains(e) { _, found := lh.myVpnNetworksTable.Lookup(e)
if found {
continue continue
} }
@@ -862,7 +859,7 @@ func (lh *LightHouse) SendUpdate() {
} }
} }
nb := make([]byte, 12, 12) nb := make([]byte, 12)
out := make([]byte, mtu) out := make([]byte, mtu)
var v1Update, v2Update []byte var v1Update, v2Update []byte
@@ -964,7 +961,7 @@ type LightHouseHandler struct {
func (lh *LightHouse) NewRequestHandler() *LightHouseHandler { func (lh *LightHouse) NewRequestHandler() *LightHouseHandler {
lhh := &LightHouseHandler{ lhh := &LightHouseHandler{
lh: lh, lh: lh,
nb: make([]byte, 12, 12), nb: make([]byte, 12),
out: make([]byte, mtu), out: make([]byte, mtu),
l: lh.l, l: lh.l,
pb: make([]byte, mtu), pb: make([]byte, mtu),
@@ -1155,7 +1152,7 @@ func (lhh *LightHouseHandler) coalesceAnswers(v cert.Version, c *cache, n *Nebul
if c.v4.learned != nil { if c.v4.learned != nil {
n.Details.V4AddrPorts = append(n.Details.V4AddrPorts, c.v4.learned) n.Details.V4AddrPorts = append(n.Details.V4AddrPorts, c.v4.learned)
} }
if c.v4.reported != nil && len(c.v4.reported) > 0 { if len(c.v4.reported) > 0 {
n.Details.V4AddrPorts = append(n.Details.V4AddrPorts, c.v4.reported...) n.Details.V4AddrPorts = append(n.Details.V4AddrPorts, c.v4.reported...)
} }
} }
@@ -1164,7 +1161,7 @@ func (lhh *LightHouseHandler) coalesceAnswers(v cert.Version, c *cache, n *Nebul
if c.v6.learned != nil { if c.v6.learned != nil {
n.Details.V6AddrPorts = append(n.Details.V6AddrPorts, c.v6.learned) n.Details.V6AddrPorts = append(n.Details.V6AddrPorts, c.v6.learned)
} }
if c.v6.reported != nil && len(c.v6.reported) > 0 { if len(c.v6.reported) > 0 {
n.Details.V6AddrPorts = append(n.Details.V6AddrPorts, c.v6.reported...) n.Details.V6AddrPorts = append(n.Details.V6AddrPorts, c.v6.reported...)
} }
} }
@@ -1362,7 +1359,7 @@ func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, fromVpn
//NOTE: we have to allocate a new output buffer here since we are spawning a new goroutine //NOTE: we have to allocate a new output buffer here since we are spawning a new goroutine
// for each punchBack packet. We should move this into a timerwheel or a single goroutine // for each punchBack packet. We should move this into a timerwheel or a single goroutine
// managed by a channel. // managed by a channel.
w.SendMessageToVpnAddr(header.Test, header.TestRequest, queryVpnAddr, []byte(""), make([]byte, 12, 12), make([]byte, mtu)) w.SendMessageToVpnAddr(header.Test, header.TestRequest, queryVpnAddr, []byte(""), make([]byte, 12), make([]byte, mtu))
}() }()
} }
} }

View File

@@ -31,8 +31,8 @@ func TestOldIPv4Only(t *testing.T) {
func Test_lhStaticMapping(t *testing.T) { func Test_lhStaticMapping(t *testing.T) {
l := test.NewLogger() l := test.NewLogger()
myVpnNet := netip.MustParsePrefix("10.128.0.1/16") myVpnNet := netip.MustParsePrefix("10.128.0.1/16")
nt := new(bart.Lite) nt := new(bart.Table[struct{}])
nt.Insert(myVpnNet) nt.Insert(myVpnNet, struct{}{})
cs := &CertState{ cs := &CertState{
myVpnNetworks: []netip.Prefix{myVpnNet}, myVpnNetworks: []netip.Prefix{myVpnNet},
myVpnNetworksTable: nt, myVpnNetworksTable: nt,
@@ -56,8 +56,8 @@ func Test_lhStaticMapping(t *testing.T) {
func TestReloadLighthouseInterval(t *testing.T) { func TestReloadLighthouseInterval(t *testing.T) {
l := test.NewLogger() l := test.NewLogger()
myVpnNet := netip.MustParsePrefix("10.128.0.1/16") myVpnNet := netip.MustParsePrefix("10.128.0.1/16")
nt := new(bart.Lite) nt := new(bart.Table[struct{}])
nt.Insert(myVpnNet) nt.Insert(myVpnNet, struct{}{})
cs := &CertState{ cs := &CertState{
myVpnNetworks: []netip.Prefix{myVpnNet}, myVpnNetworks: []netip.Prefix{myVpnNet},
myVpnNetworksTable: nt, myVpnNetworksTable: nt,
@@ -91,8 +91,8 @@ func TestReloadLighthouseInterval(t *testing.T) {
func BenchmarkLighthouseHandleRequest(b *testing.B) { func BenchmarkLighthouseHandleRequest(b *testing.B) {
l := test.NewLogger() l := test.NewLogger()
myVpnNet := netip.MustParsePrefix("10.128.0.1/0") myVpnNet := netip.MustParsePrefix("10.128.0.1/0")
nt := new(bart.Lite) nt := new(bart.Table[struct{}])
nt.Insert(myVpnNet) nt.Insert(myVpnNet, struct{}{})
cs := &CertState{ cs := &CertState{
myVpnNetworks: []netip.Prefix{myVpnNet}, myVpnNetworks: []netip.Prefix{myVpnNet},
myVpnNetworksTable: nt, myVpnNetworksTable: nt,
@@ -196,8 +196,8 @@ func TestLighthouse_Memory(t *testing.T) {
c.Settings["listen"] = map[string]any{"port": 4242} c.Settings["listen"] = map[string]any{"port": 4242}
myVpnNet := netip.MustParsePrefix("10.128.0.1/24") myVpnNet := netip.MustParsePrefix("10.128.0.1/24")
nt := new(bart.Lite) nt := new(bart.Table[struct{}])
nt.Insert(myVpnNet) nt.Insert(myVpnNet, struct{}{})
cs := &CertState{ cs := &CertState{
myVpnNetworks: []netip.Prefix{myVpnNet}, myVpnNetworks: []netip.Prefix{myVpnNet},
myVpnNetworksTable: nt, myVpnNetworksTable: nt,
@@ -281,8 +281,8 @@ func TestLighthouse_reload(t *testing.T) {
c.Settings["listen"] = map[string]any{"port": 4242} c.Settings["listen"] = map[string]any{"port": 4242}
myVpnNet := netip.MustParsePrefix("10.128.0.1/24") myVpnNet := netip.MustParsePrefix("10.128.0.1/24")
nt := new(bart.Lite) nt := new(bart.Table[struct{}])
nt.Insert(myVpnNet) nt.Insert(myVpnNet, struct{}{})
cs := &CertState{ cs := &CertState{
myVpnNetworks: []netip.Prefix{myVpnNet}, myVpnNetworks: []netip.Prefix{myVpnNet},
myVpnNetworksTable: nt, myVpnNetworksTable: nt,
@@ -484,12 +484,12 @@ func Test_findNetworkUnion(t *testing.T) {
assert.Equal(t, out, afe81) assert.Equal(t, out, afe81)
//falsey cases //falsey cases
out, ok = findNetworkUnion([]netip.Prefix{oneSevenTwo, fe80}, []netip.Addr{a1}) _, ok = findNetworkUnion([]netip.Prefix{oneSevenTwo, fe80}, []netip.Addr{a1})
assert.False(t, ok) assert.False(t, ok)
out, ok = findNetworkUnion([]netip.Prefix{fc00, fe80}, []netip.Addr{a1}) _, ok = findNetworkUnion([]netip.Prefix{fc00, fe80}, []netip.Addr{a1})
assert.False(t, ok) assert.False(t, ok)
out, ok = findNetworkUnion([]netip.Prefix{oneSevenTwo, fc00}, []netip.Addr{a1, afe81}) _, ok = findNetworkUnion([]netip.Prefix{oneSevenTwo, fc00}, []netip.Addr{a1, afe81})
assert.False(t, ok) assert.False(t, ok)
out, ok = findNetworkUnion([]netip.Prefix{fc00}, []netip.Addr{a1, afe81}) _, ok = findNetworkUnion([]netip.Prefix{fc00}, []netip.Addr{a1, afe81})
assert.False(t, ok) assert.False(t, ok)
} }

45
main.go
View File

@@ -185,7 +185,6 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
hostMap := NewHostMapFromConfig(l, c) hostMap := NewHostMapFromConfig(l, c)
punchy := NewPunchyFromConfig(l, c) punchy := NewPunchyFromConfig(l, c)
connManager := newConnectionManagerFromConfig(l, c, hostMap, punchy)
lightHouse, err := NewLightHouseFromConfig(ctx, l, c, pki.getCertState(), udpConns[0], punchy) lightHouse, err := NewLightHouseFromConfig(ctx, l, c, pki.getCertState(), udpConns[0], punchy)
if err != nil { if err != nil {
return nil, util.ContextualizeIfNeeded("Failed to initialize lighthouse handler", err) return nil, util.ContextualizeIfNeeded("Failed to initialize lighthouse handler", err)
@@ -221,26 +220,31 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
} }
} }
checkInterval := c.GetInt("timers.connection_alive_interval", 5)
pendingDeletionInterval := c.GetInt("timers.pending_deletion_interval", 10)
ifConfig := &InterfaceConfig{ ifConfig := &InterfaceConfig{
HostMap: hostMap, HostMap: hostMap,
Inside: tun, Inside: tun,
Outside: udpConns[0], Outside: udpConns[0],
pki: pki, pki: pki,
Firewall: fw, Firewall: fw,
ServeDns: serveDns, ServeDns: serveDns,
HandshakeManager: handshakeManager, HandshakeManager: handshakeManager,
connectionManager: connManager, lightHouse: lightHouse,
lightHouse: lightHouse, checkInterval: time.Second * time.Duration(checkInterval),
tryPromoteEvery: c.GetUint32("counters.try_promote", defaultPromoteEvery), pendingDeletionInterval: time.Second * time.Duration(pendingDeletionInterval),
reQueryEvery: c.GetUint32("counters.requery_every_packets", defaultReQueryEvery), tryPromoteEvery: c.GetUint32("counters.try_promote", defaultPromoteEvery),
reQueryWait: c.GetDuration("timers.requery_wait_duration", defaultReQueryWait), reQueryEvery: c.GetUint32("counters.requery_every_packets", defaultReQueryEvery),
DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false), reQueryWait: c.GetDuration("timers.requery_wait_duration", defaultReQueryWait),
DropMulticast: c.GetBool("tun.drop_multicast", false), DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false),
routines: routines, DropMulticast: c.GetBool("tun.drop_multicast", false),
MessageMetrics: messageMetrics, routines: routines,
version: buildVersion, MessageMetrics: messageMetrics,
relayManager: NewRelayManager(ctx, l, hostMap, c), version: buildVersion,
punchy: punchy, relayManager: NewRelayManager(ctx, l, hostMap, c),
punchy: punchy,
ConntrackCacheTimeout: conntrackCacheTimeout, ConntrackCacheTimeout: conntrackCacheTimeout,
l: l, l: l,
} }
@@ -292,6 +296,5 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
statsStart, statsStart,
dnsStart, dnsStart,
lightHouse.StartUpdateWorker, lightHouse.StartUpdateWorker,
connManager.Start,
}, nil }, nil
} }

View File

@@ -17,7 +17,7 @@ type MessageMetrics struct {
func (m *MessageMetrics) Rx(t header.MessageType, s header.MessageSubType, i int64) { func (m *MessageMetrics) Rx(t header.MessageType, s header.MessageSubType, i int64) {
if m != nil { if m != nil {
if t >= 0 && int(t) < len(m.rx) && s >= 0 && int(s) < len(m.rx[t]) { if int(t) < len(m.rx) && int(s) < len(m.rx[t]) {
m.rx[t][s].Inc(i) m.rx[t][s].Inc(i)
} else if m.rxUnknown != nil { } else if m.rxUnknown != nil {
m.rxUnknown.Inc(i) m.rxUnknown.Inc(i)
@@ -26,7 +26,7 @@ func (m *MessageMetrics) Rx(t header.MessageType, s header.MessageSubType, i int
} }
func (m *MessageMetrics) Tx(t header.MessageType, s header.MessageSubType, i int64) { func (m *MessageMetrics) Tx(t header.MessageType, s header.MessageSubType, i int64) {
if m != nil { if m != nil {
if t >= 0 && int(t) < len(m.tx) && s >= 0 && int(s) < len(m.tx[t]) { if int(t) < len(m.tx) && int(s) < len(m.tx[t]) {
m.tx[t][s].Inc(i) m.tx[t][s].Inc(i)
} else if m.txUnknown != nil { } else if m.txUnknown != nil {
m.txUnknown.Inc(i) m.txUnknown.Inc(i)

View File

@@ -25,11 +25,6 @@ func NewNebulaCipherState(s *noise.CipherState) *NebulaCipherState {
} }
type cipherAEADDanger interface {
EncryptDanger(out, ad, plaintext []byte, n uint64, nb []byte) ([]byte, error)
DecryptDanger(out, ad, plaintext []byte, n uint64, nb []byte) ([]byte, error)
}
// EncryptDanger encrypts and authenticates a given payload. // EncryptDanger encrypts and authenticates a given payload.
// //
// out is a destination slice to hold the output of the EncryptDanger operation. // out is a destination slice to hold the output of the EncryptDanger operation.
@@ -40,25 +35,20 @@ type cipherAEADDanger interface {
// be re-used by callers to minimize garbage collection. // be re-used by callers to minimize garbage collection.
func (s *NebulaCipherState) EncryptDanger(out, ad, plaintext []byte, n uint64, nb []byte) ([]byte, error) { func (s *NebulaCipherState) EncryptDanger(out, ad, plaintext []byte, n uint64, nb []byte) ([]byte, error) {
if s != nil { if s != nil {
switch ce := s.c.(type) { // TODO: Is this okay now that we have made messageCounter atomic?
case cipherAEADDanger: // Alternative may be to split the counter space into ranges
return ce.EncryptDanger(out, ad, plaintext, n, nb) //if n <= s.n {
default: // return nil, errors.New("CRITICAL: a duplicate counter value was used")
// TODO: Is this okay now that we have made messageCounter atomic? //}
// Alternative may be to split the counter space into ranges //s.n = n
//if n <= s.n { nb[0] = 0
// return nil, errors.New("CRITICAL: a duplicate counter value was used") nb[1] = 0
//} nb[2] = 0
//s.n = n nb[3] = 0
nb[0] = 0 noiseEndianness.PutUint64(nb[4:], n)
nb[1] = 0 out = s.c.(cipher.AEAD).Seal(out, nb, plaintext, ad)
nb[2] = 0 //l.Debugf("Encryption: outlen: %d, nonce: %d, ad: %s, plainlen %d", len(out), n, ad, len(plaintext))
nb[3] = 0 return out, nil
noiseEndianness.PutUint64(nb[4:], n)
out = s.c.(cipher.AEAD).Seal(out, nb, plaintext, ad)
//l.Debugf("Encryption: outlen: %d, nonce: %d, ad: %s, plainlen %d", len(out), n, ad, len(plaintext))
return out, nil
}
} else { } else {
return nil, errors.New("no cipher state available to encrypt") return nil, errors.New("no cipher state available to encrypt")
} }
@@ -66,17 +56,12 @@ func (s *NebulaCipherState) EncryptDanger(out, ad, plaintext []byte, n uint64, n
func (s *NebulaCipherState) DecryptDanger(out, ad, ciphertext []byte, n uint64, nb []byte) ([]byte, error) { func (s *NebulaCipherState) DecryptDanger(out, ad, ciphertext []byte, n uint64, nb []byte) ([]byte, error) {
if s != nil { if s != nil {
switch ce := s.c.(type) { nb[0] = 0
case cipherAEADDanger: nb[1] = 0
return ce.DecryptDanger(out, ad, ciphertext, n, nb) nb[2] = 0
default: nb[3] = 0
nb[0] = 0 noiseEndianness.PutUint64(nb[4:], n)
nb[1] = 0 return s.c.(cipher.AEAD).Open(out, nb, ciphertext, ad)
nb[2] = 0
nb[3] = 0
noiseEndianness.PutUint64(nb[4:], n)
return s.c.(cipher.AEAD).Open(out, nb, ciphertext, ad)
}
} else { } else {
return []byte{}, nil return []byte{}, nil
} }

View File

@@ -1,78 +0,0 @@
//go:build fips140v1.0
// +build fips140v1.0
package noiseutil
import (
"crypto/cipher"
"encoding/binary"
// unsafe needed for go:linkname
_ "unsafe"
"github.com/flynn/noise"
)
// EncryptLockNeeded indicates if calls to Encrypt need a lock
// This is true for fips140 because the Seal function verifies that the
// nonce is strictly increasing.
const EncryptLockNeeded = true
// TODO: Use NewGCMWithCounterNonce once available:
// - https://github.com/golang/go/issues/73110
// Using tls.aeadAESGCM gives us the TLS 1.2 GCM, which also verifies
// that the nonce is strictly increasing.
//
//go:linkname aeadAESGCM crypto/tls.aeadAESGCM
func aeadAESGCM(key, noncePrefix []byte) cipher.AEAD
type cipherFn struct {
fn func([32]byte) noise.Cipher
name string
}
func (c cipherFn) Cipher(k [32]byte) noise.Cipher { return c.fn(k) }
func (c cipherFn) CipherName() string { return c.name }
// CipherAESGCM is the AES256-GCM AEAD cipher (using aeadAESGCM when fips140 is enabled)
var CipherAESGCM noise.CipherFunc = cipherFn{cipherAESGCM, "AESGCM"}
// tls.aeadAESGCM uses a 4 byte static prefix and an 8 byte nonce
var emptyPrefix = []byte{0, 0, 0, 0}
func cipherAESGCM(k [32]byte) noise.Cipher {
gcm := aeadAESGCM(k[:], emptyPrefix)
return aeadCipher{
gcm,
func(n uint64) []byte {
// tls.aeadAESGCM uses a 4 byte static prefix and an 8 byte nonce
var nonce [8]byte
binary.BigEndian.PutUint64(nonce[:], n)
return nonce[:]
},
}
}
type aeadCipher struct {
cipher.AEAD
nonce func(uint64) []byte
}
func (c aeadCipher) Encrypt(out []byte, n uint64, ad, plaintext []byte) []byte {
return c.Seal(out, c.nonce(n), plaintext, ad)
}
func (c aeadCipher) Decrypt(out []byte, n uint64, ad, ciphertext []byte) ([]byte, error) {
return c.Open(out, c.nonce(n), ciphertext, ad)
}
func (c aeadCipher) EncryptDanger(out, ad, plaintext []byte, n uint64, nb []byte) ([]byte, error) {
binary.BigEndian.PutUint64(nb[4:], n)
out = c.Seal(out, nb[4:], plaintext, ad)
return out, nil
}
func (c aeadCipher) DecryptDanger(out, ad, ciphertext []byte, n uint64, nb []byte) ([]byte, error) {
binary.BigEndian.PutUint64(nb[4:], n)
return c.Open(out, nb[4:], ciphertext, ad)
}

View File

@@ -1,42 +0,0 @@
//go:build fips140v1.0
// +build fips140v1.0
package noiseutil
import (
"crypto/fips140"
"encoding/hex"
"log"
"testing"
"github.com/stretchr/testify/assert"
)
func TestEncryptLockNeeded(t *testing.T) {
assert.True(t, EncryptLockNeeded)
}
// Ensure NewAESGCM validates the nonce is non-repeating
func TestNewAESGCM(t *testing.T) {
assert.True(t, fips140.Enabled())
key, _ := hex.DecodeString("feffe9928665731c6d6a8f9467308308feffe9928665731c6d6a8f9467308308")
iv, _ := hex.DecodeString("00000000facedbaddecaf888")
plaintext, _ := hex.DecodeString("d9313225f88406e5a55909c5aff5269a86a7a9531534f7da2e4c303d8a318a721c3c0c95956809532fcf0e2449a6b525b16aedf5aa0de657ba637b39")
aad, _ := hex.DecodeString("feedfacedeadbeeffeedfacedeadbeefabaddad2")
expected, _ := hex.DecodeString("72ce2ea385f88c20d856e9d1248c2ca08562bbe8a61459ffae06ec393540518e9b6b4c40a146053f26a3df83c5384a48d273148b15aba64d970107432b2892741359275676441c1572c3fa9e")
var keyArray [32]byte
copy(keyArray[:], key)
c := CipherAESGCM.Cipher(keyArray)
aead := c.(aeadCipher).AEAD
dst := aead.Seal([]byte{}, iv, plaintext, aad)
log.Printf("%x", dst)
assert.Equal(t, expected, dst)
// We expect this to fail since we are re-encrypting with a repeat IV
assert.PanicsWithValue(t, "crypto/cipher: counter decreased", func() {
dst = aead.Seal([]byte{}, iv, plaintext, aad)
})
}

View File

@@ -1,5 +1,5 @@
//go:build !boringcrypto && !fips140v1.0 //go:build !boringcrypto
// +build !boringcrypto,!fips140v1.0 // +build !boringcrypto
package noiseutil package noiseutil

View File

@@ -1,5 +1,5 @@
//go:build !boringcrypto && !fips140v1.0 //go:build !boringcrypto
// +build !boringcrypto,!fips140v1.0 // +build !boringcrypto
package noiseutil package noiseutil

View File

@@ -31,7 +31,8 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
//l.Error("in packet ", header, packet[HeaderLen:]) //l.Error("in packet ", header, packet[HeaderLen:])
if ip.IsValid() { if ip.IsValid() {
if f.myVpnNetworksTable.Contains(ip.Addr()) { _, found := f.myVpnNetworksTable.Lookup(ip.Addr())
if found {
if f.l.Level >= logrus.DebugLevel { if f.l.Level >= logrus.DebugLevel {
f.l.WithField("udpAddr", ip).Debug("Refusing to process double encrypted packet") f.l.WithField("udpAddr", ip).Debug("Refusing to process double encrypted packet")
} }
@@ -81,7 +82,7 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
// Pull the Roaming parts up here, and return in all call paths. // Pull the Roaming parts up here, and return in all call paths.
f.handleHostRoaming(hostinfo, ip) f.handleHostRoaming(hostinfo, ip)
// Track usage of both the HostInfo and the Relay for the received & authenticated packet // Track usage of both the HostInfo and the Relay for the received & authenticated packet
f.connectionManager.In(hostinfo) f.connectionManager.In(hostinfo.localIndexId)
f.connectionManager.RelayUsed(h.RemoteIndex) f.connectionManager.RelayUsed(h.RemoteIndex)
relay, ok := hostinfo.relayState.QueryRelayForByIdx(h.RemoteIndex) relay, ok := hostinfo.relayState.QueryRelayForByIdx(h.RemoteIndex)
@@ -213,7 +214,7 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
f.handleHostRoaming(hostinfo, ip) f.handleHostRoaming(hostinfo, ip)
f.connectionManager.In(hostinfo) f.connectionManager.In(hostinfo.localIndexId)
} }
// closeTunnel closes a tunnel locally, it does not send a closeTunnel packet to the remote // closeTunnel closes a tunnel locally, it does not send a closeTunnel packet to the remote
@@ -227,7 +228,7 @@ func (f *Interface) closeTunnel(hostInfo *HostInfo) {
// sendCloseTunnel is a helper function to send a proper close tunnel packet to a remote // sendCloseTunnel is a helper function to send a proper close tunnel packet to a remote
func (f *Interface) sendCloseTunnel(h *HostInfo) { func (f *Interface) sendCloseTunnel(h *HostInfo) {
f.send(header.CloseTunnel, 0, h.ConnectionState, h, []byte{}, make([]byte, 12, 12), make([]byte, mtu)) f.send(header.CloseTunnel, 0, h.ConnectionState, h, []byte{}, make([]byte, 12), make([]byte, mtu))
} }
func (f *Interface) handleHostRoaming(hostinfo *HostInfo, udpAddr netip.AddrPort) { func (f *Interface) handleHostRoaming(hostinfo *HostInfo, udpAddr netip.AddrPort) {
@@ -312,11 +313,12 @@ func parseV6(data []byte, incoming bool, fp *firewall.Packet) error {
offset := ipv6.HeaderLen // Start at the end of the ipv6 header offset := ipv6.HeaderLen // Start at the end of the ipv6 header
next := 0 next := 0
for { for {
if protoAt >= dataLen { if dataLen < offset {
break break
} }
proto := layers.IPProtocol(data[protoAt])
proto := layers.IPProtocol(data[protoAt])
//fmt.Println(proto, protoAt)
switch proto { switch proto {
case layers.IPProtocolICMPv6, layers.IPProtocolESP, layers.IPProtocolNoNextHeader: case layers.IPProtocolICMPv6, layers.IPProtocolESP, layers.IPProtocolNoNextHeader:
fp.Protocol = uint8(proto) fp.Protocol = uint8(proto)
@@ -364,7 +366,7 @@ func parseV6(data []byte, incoming bool, fp *firewall.Packet) error {
case layers.IPProtocolAH: case layers.IPProtocolAH:
// Auth headers, used by IPSec, have a different meaning for header length // Auth headers, used by IPSec, have a different meaning for header length
if dataLen <= offset+1 { if dataLen < offset+1 {
break break
} }
@@ -372,7 +374,7 @@ func parseV6(data []byte, incoming bool, fp *firewall.Packet) error {
default: default:
// Normal ipv6 header length processing // Normal ipv6 header length processing
if dataLen <= offset+1 { if dataLen < offset+1 {
break break
} }
@@ -498,7 +500,7 @@ func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out
return false return false
} }
f.connectionManager.In(hostinfo) f.connectionManager.In(hostinfo.localIndexId)
_, err = f.readers[q].Write(out) _, err = f.readers[q].Write(out)
if err != nil { if err != nil {
f.l.WithError(err).Error("Failed to write to tun") f.l.WithError(err).Error("Failed to write to tun")

View File

@@ -117,45 +117,6 @@ func Test_newPacket_v6(t *testing.T) {
err = newPacket(buffer.Bytes(), true, p) err = newPacket(buffer.Bytes(), true, p)
require.ErrorIs(t, err, ErrIPv6CouldNotFindPayload) require.ErrorIs(t, err, ErrIPv6CouldNotFindPayload)
// A v6 packet with a hop-by-hop extension
// ICMPv6 Payload (Echo Request)
icmpLayer := layers.ICMPv6{
TypeCode: layers.ICMPv6TypeEchoRequest,
}
// Hop-by-Hop Extension Header
hopOption := layers.IPv6HopByHopOption{}
hopOption.OptionData = []byte{0, 0, 0, 0}
hopByHop := layers.IPv6HopByHop{}
hopByHop.Options = append(hopByHop.Options, &hopOption)
ip = layers.IPv6{
Version: 6,
HopLimit: 128,
NextHeader: layers.IPProtocolIPv6Destination,
SrcIP: net.IPv6linklocalallrouters,
DstIP: net.IPv6linklocalallnodes,
}
buffer.Clear()
err = gopacket.SerializeLayers(buffer, gopacket.SerializeOptions{
ComputeChecksums: false,
FixLengths: true,
}, &ip, &hopByHop, &icmpLayer)
if err != nil {
panic(err)
}
// Ensure buffer length checks during parsing with the next 2 tests.
// A full IPv6 header and 1 byte in the first extension, but missing
// the length byte.
err = newPacket(buffer.Bytes()[:41], true, p)
require.ErrorIs(t, err, ErrIPv6CouldNotFindPayload)
// A full IPv6 header plus 1 full extension, but only 1 byte of the
// next layer, missing length byte
err = newPacket(buffer.Bytes()[:49], true, p)
require.ErrorIs(t, err, ErrIPv6CouldNotFindPayload)
// A good ICMP packet // A good ICMP packet
ip = layers.IPv6{ ip = layers.IPv6{
Version: 6, Version: 6,
@@ -327,10 +288,6 @@ func Test_newPacket_v6(t *testing.T) {
assert.Equal(t, uint16(22), p.LocalPort) assert.Equal(t, uint16(22), p.LocalPort)
assert.False(t, p.Fragment) assert.False(t, p.Fragment)
// Ensure buffer bounds checking during processing
err = newPacket(b[:41], true, p)
require.ErrorIs(t, err, ErrIPv6PacketTooShort)
// Invalid AH header // Invalid AH header
b = buffer.Bytes() b = buffer.Bytes()
err = newPacket(b, true, p) err = newPacket(b, true, p)

View File

@@ -3,7 +3,6 @@ package overlay
import ( import (
"fmt" "fmt"
"math" "math"
"net"
"net/netip" "net/netip"
"runtime" "runtime"
"strconv" "strconv"
@@ -305,29 +304,3 @@ func parseUnsafeRoutes(c *config.C, networks []netip.Prefix) ([]Route, error) {
return routes, nil return routes, nil
} }
func ipWithin(o *net.IPNet, i *net.IPNet) bool {
// Make sure o contains the lowest form of i
if !o.Contains(i.IP.Mask(i.Mask)) {
return false
}
// Find the max ip in i
ip4 := i.IP.To4()
if ip4 == nil {
return false
}
last := make(net.IP, len(ip4))
copy(last, ip4)
for x := range ip4 {
last[x] |= ^i.Mask[x]
}
// Make sure o contains the max
if !o.Contains(last) {
return false
}
return true
}

View File

@@ -225,6 +225,7 @@ func Test_parseUnsafeRoutes(t *testing.T) {
// no mtu // no mtu
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{map[string]any{"via": "127.0.0.1", "route": "1.0.0.0/8"}}} c.Settings["tun"] = map[string]any{"unsafe_routes": []any{map[string]any{"via": "127.0.0.1", "route": "1.0.0.0/8"}}}
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n}) routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
require.NoError(t, err)
assert.Len(t, routes, 1) assert.Len(t, routes, 1)
assert.Equal(t, 0, routes[0].MTU) assert.Equal(t, 0, routes[0].MTU)
@@ -318,7 +319,7 @@ func Test_makeRouteTree(t *testing.T) {
ip, err = netip.ParseAddr("1.1.0.1") ip, err = netip.ParseAddr("1.1.0.1")
require.NoError(t, err) require.NoError(t, err)
r, ok = routeTree.Lookup(ip) _, ok = routeTree.Lookup(ip)
assert.False(t, ok) assert.False(t, ok)
} }

View File

@@ -34,11 +34,10 @@ type tun struct {
deviceIndex int deviceIndex int
ioctlFd uintptr ioctlFd uintptr
Routes atomic.Pointer[[]Route] Routes atomic.Pointer[[]Route]
routeTree atomic.Pointer[bart.Table[routing.Gateways]] routeTree atomic.Pointer[bart.Table[routing.Gateways]]
routeChan chan struct{} routeChan chan struct{}
useSystemRoutes bool useSystemRoutes bool
useSystemRoutesBufferSize int
l *logrus.Logger l *logrus.Logger
} }
@@ -125,13 +124,12 @@ func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, multiqueu
func newTunGeneric(c *config.C, l *logrus.Logger, file *os.File, vpnNetworks []netip.Prefix) (*tun, error) { func newTunGeneric(c *config.C, l *logrus.Logger, file *os.File, vpnNetworks []netip.Prefix) (*tun, error) {
t := &tun{ t := &tun{
ReadWriteCloser: file, ReadWriteCloser: file,
fd: int(file.Fd()), fd: int(file.Fd()),
vpnNetworks: vpnNetworks, vpnNetworks: vpnNetworks,
TXQueueLen: c.GetInt("tun.tx_queue", 500), TXQueueLen: c.GetInt("tun.tx_queue", 500),
useSystemRoutes: c.GetBool("tun.use_system_route_table", false), useSystemRoutes: c.GetBool("tun.use_system_route_table", false),
useSystemRoutesBufferSize: c.GetInt("tun.use_system_route_table_buffer_size", 0), l: l,
l: l,
} }
err := t.reload(c, true) err := t.reload(c, true)
@@ -533,13 +531,7 @@ func (t *tun) watchRoutes() {
rch := make(chan netlink.RouteUpdate) rch := make(chan netlink.RouteUpdate)
doneChan := make(chan struct{}) doneChan := make(chan struct{})
netlinkOptions := netlink.RouteSubscribeOptions{ if err := netlink.RouteSubscribe(rch, doneChan); err != nil {
ReceiveBufferSize: t.useSystemRoutesBufferSize,
ReceiveBufferForceSize: t.useSystemRoutesBufferSize != 0,
ErrorCallback: func(e error) { t.l.WithError(e).Errorf("netlink error") },
}
if err := netlink.RouteSubscribeWithOptions(rch, doneChan, netlinkOptions); err != nil {
t.l.WithError(err).Errorf("failed to subscribe to system route changes") t.l.WithError(err).Errorf("failed to subscribe to system route changes")
return return
} }
@@ -549,14 +541,8 @@ func (t *tun) watchRoutes() {
go func() { go func() {
for { for {
select { select {
case r, ok := <-rch: case r := <-rch:
if ok { t.updateRoutes(r)
t.updateRoutes(r)
} else {
// may be should do something here as
// netlink stops sending updates
return
}
case <-doneChan: case <-doneChan:
// netlink.RouteSubscriber will close the rch for us // netlink.RouteSubscriber will close the rch for us
return return

View File

@@ -1,8 +1,6 @@
package pkclient package pkclient
import ( import (
"crypto/ecdsa"
"crypto/x509"
"fmt" "fmt"
"io" "io"
"strconv" "strconv"
@@ -50,27 +48,6 @@ func FromUrl(pkurl string) (*PKClient, error) {
return New(module, uint(slotid), pin, id, label) return New(module, uint(slotid), pin, id, label)
} }
func ecKeyToArray(key *ecdsa.PublicKey) []byte {
x := make([]byte, 32)
y := make([]byte, 32)
key.X.FillBytes(x)
key.Y.FillBytes(y)
return append([]byte{0x04}, append(x, y...)...)
}
func formatPubkeyFromPublicKeyInfoAttr(d []byte) ([]byte, error) {
e, err := x509.ParsePKIXPublicKey(d)
if err != nil {
return nil, err
}
switch t := e.(type) {
case *ecdsa.PublicKey:
return ecKeyToArray(e.(*ecdsa.PublicKey)), nil
default:
return nil, fmt.Errorf("unknown public key type: %T", t)
}
}
func (c *PKClient) Test() error { func (c *PKClient) Test() error {
pub, err := c.GetPubKey() pub, err := c.GetPubKey()
if err != nil { if err != nil {

View File

@@ -3,6 +3,8 @@
package pkclient package pkclient
import ( import (
"crypto/ecdsa"
"crypto/x509"
"encoding/asn1" "encoding/asn1"
"errors" "errors"
"fmt" "fmt"
@@ -227,3 +229,24 @@ func (c *PKClient) GetPubKey() ([]byte, error) {
return nil, fmt.Errorf("unknown public key length: %d", len(d)) return nil, fmt.Errorf("unknown public key length: %d", len(d))
} }
} }
func ecKeyToArray(key *ecdsa.PublicKey) []byte {
x := make([]byte, 32)
y := make([]byte, 32)
key.X.FillBytes(x)
key.Y.FillBytes(y)
return append([]byte{0x04}, append(x, y...)...)
}
func formatPubkeyFromPublicKeyInfoAttr(d []byte) ([]byte, error) {
e, err := x509.ParsePKIXPublicKey(d)
if err != nil {
return nil, err
}
switch t := e.(type) {
case *ecdsa.PublicKey:
return ecKeyToArray(e.(*ecdsa.PublicKey)), nil
default:
return nil, fmt.Errorf("unknown public key type: %T", t)
}
}

View File

@@ -7,10 +7,10 @@ import "errors"
type PKClient struct { type PKClient struct {
} }
var notImplemented = errors.New("not implemented") var errNotImplemented = errors.New("not implemented")
func New(hsmPath string, slotId uint, pin string, id string, label string) (*PKClient, error) { func New(hsmPath string, slotId uint, pin string, id string, label string) (*PKClient, error) {
return nil, notImplemented return nil, errNotImplemented
} }
func (c *PKClient) Close() error { func (c *PKClient) Close() error {
@@ -18,13 +18,13 @@ func (c *PKClient) Close() error {
} }
func (c *PKClient) SignASN1(data []byte) ([]byte, error) { func (c *PKClient) SignASN1(data []byte) ([]byte, error) {
return nil, notImplemented return nil, errNotImplemented
} }
func (c *PKClient) DeriveNoise(_ []byte) ([]byte, error) { func (c *PKClient) DeriveNoise(_ []byte) ([]byte, error) {
return nil, notImplemented return nil, errNotImplemented
} }
func (c *PKClient) GetPubKey() ([]byte, error) { func (c *PKClient) GetPubKey() ([]byte, error) {
return nil, notImplemented return nil, errNotImplemented
} }

18
pki.go
View File

@@ -39,10 +39,10 @@ type CertState struct {
cipher string cipher string
myVpnNetworks []netip.Prefix myVpnNetworks []netip.Prefix
myVpnNetworksTable *bart.Lite myVpnNetworksTable *bart.Table[struct{}]
myVpnAddrs []netip.Addr myVpnAddrs []netip.Addr
myVpnAddrsTable *bart.Lite myVpnAddrsTable *bart.Table[struct{}]
myVpnBroadcastAddrsTable *bart.Lite myVpnBroadcastAddrsTable *bart.Table[struct{}]
} }
func NewPKIFromConfig(l *logrus.Logger, c *config.C) (*PKI, error) { func NewPKIFromConfig(l *logrus.Logger, c *config.C) (*PKI, error) {
@@ -345,9 +345,9 @@ func newCertState(dv cert.Version, v1, v2 cert.Certificate, pkcs11backed bool, p
cs := CertState{ cs := CertState{
privateKey: privateKey, privateKey: privateKey,
pkcs11Backed: pkcs11backed, pkcs11Backed: pkcs11backed,
myVpnNetworksTable: new(bart.Lite), myVpnNetworksTable: new(bart.Table[struct{}]),
myVpnAddrsTable: new(bart.Lite), myVpnAddrsTable: new(bart.Table[struct{}]),
myVpnBroadcastAddrsTable: new(bart.Lite), myVpnBroadcastAddrsTable: new(bart.Table[struct{}]),
} }
if v1 != nil && v2 != nil { if v1 != nil && v2 != nil {
@@ -415,16 +415,16 @@ func newCertState(dv cert.Version, v1, v2 cert.Certificate, pkcs11backed bool, p
for _, network := range crt.Networks() { for _, network := range crt.Networks() {
cs.myVpnNetworks = append(cs.myVpnNetworks, network) cs.myVpnNetworks = append(cs.myVpnNetworks, network)
cs.myVpnNetworksTable.Insert(network) cs.myVpnNetworksTable.Insert(network, struct{}{})
cs.myVpnAddrs = append(cs.myVpnAddrs, network.Addr()) cs.myVpnAddrs = append(cs.myVpnAddrs, network.Addr())
cs.myVpnAddrsTable.Insert(netip.PrefixFrom(network.Addr(), network.Addr().BitLen())) cs.myVpnAddrsTable.Insert(netip.PrefixFrom(network.Addr(), network.Addr().BitLen()), struct{}{})
if network.Addr().Is4() { if network.Addr().Is4() {
addr := network.Masked().Addr().As4() addr := network.Masked().Addr().As4()
mask := net.CIDRMask(network.Bits(), network.Addr().BitLen()) mask := net.CIDRMask(network.Bits(), network.Addr().BitLen())
binary.BigEndian.PutUint32(addr[:], binary.BigEndian.Uint32(addr[:])|^binary.BigEndian.Uint32(mask)) binary.BigEndian.PutUint32(addr[:], binary.BigEndian.Uint32(addr[:])|^binary.BigEndian.Uint32(mask))
cs.myVpnBroadcastAddrsTable.Insert(netip.PrefixFrom(netip.AddrFrom4(addr), network.Addr().BitLen())) cs.myVpnBroadcastAddrsTable.Insert(netip.PrefixFrom(netip.AddrFrom4(addr), network.Addr().BitLen()), struct{}{})
} }
} }

View File

@@ -241,13 +241,15 @@ func (rm *relayManager) handleCreateRelayRequest(v cert.Version, h *HostInfo, f
logMsg.Info("handleCreateRelayRequest") logMsg.Info("handleCreateRelayRequest")
// Is the source of the relay me? This should never happen, but did happen due to // Is the source of the relay me? This should never happen, but did happen due to
// an issue migrating relays over to newly re-handshaked host info objects. // an issue migrating relays over to newly re-handshaked host info objects.
if f.myVpnAddrsTable.Contains(from) { _, found := f.myVpnAddrsTable.Lookup(from)
if found {
logMsg.WithField("myIP", from).Error("Discarding relay request from myself") logMsg.WithField("myIP", from).Error("Discarding relay request from myself")
return return
} }
// Is the target of the relay me? // Is the target of the relay me?
if f.myVpnAddrsTable.Contains(target) { _, found = f.myVpnAddrsTable.Lookup(target)
if found {
existingRelay, ok := h.relayState.QueryRelayForByIp(from) existingRelay, ok := h.relayState.QueryRelayForByIp(from)
if ok { if ok {
switch existingRelay.State { switch existingRelay.State {

View File

@@ -263,9 +263,7 @@ func (r *RemoteList) CopyAddrs(preferredRanges []netip.Prefix) []netip.AddrPort
r.RLock() r.RLock()
defer r.RUnlock() defer r.RUnlock()
c := make([]netip.AddrPort, len(r.addrs)) c := make([]netip.AddrPort, len(r.addrs))
for i, v := range r.addrs { copy(c, r.addrs)
c[i] = v
}
return c return c
} }
@@ -326,9 +324,7 @@ func (r *RemoteList) CopyCache() *CacheMap {
} }
if mc.relay != nil { if mc.relay != nil {
for _, a := range mc.relay.relay { c.Relay = append(c.Relay, mc.relay.relay...)
c.Relay = append(c.Relay, a)
}
} }
} }
@@ -362,9 +358,7 @@ func (r *RemoteList) CopyBlockedRemotes() []netip.AddrPort {
defer r.RUnlock() defer r.RUnlock()
c := make([]netip.AddrPort, len(r.badRemotes)) c := make([]netip.AddrPort, len(r.badRemotes))
for i, v := range r.badRemotes { copy(c, r.badRemotes)
c[i] = v
}
return c return c
} }
@@ -569,9 +563,7 @@ func (r *RemoteList) unlockedCollect() {
} }
if c.relay != nil { if c.relay != nil {
for _, v := range c.relay.relay { relays = append(relays, c.relay.relay...)
relays = append(relays, v)
}
} }
} }
@@ -635,15 +627,15 @@ func (r *RemoteList) unlockedSort(preferredRanges []netip.Prefix) {
a4 := a.Addr().Is4() a4 := a.Addr().Is4()
b4 := b.Addr().Is4() b4 := b.Addr().Is4()
switch { switch {
case a4 == false && b4 == true: case !a4 && b4:
// If i is v6 and j is v4, i is less than j // If i is v6 and j is v4, i is less than j
return true return true
case a4 == true && b4 == false: case a4 && !b4:
// If j is v6 and i is v4, i is not less than j // If j is v6 and i is v4, i is not less than j
return false return false
case a4 == true && b4 == true: case a4 && b4:
// i and j are both ipv4 // i and j are both ipv4
aPrivate := a.Addr().IsPrivate() aPrivate := a.Addr().IsPrivate()
bPrivate := b.Addr().IsPrivate() bPrivate := b.Addr().IsPrivate()
@@ -691,7 +683,6 @@ func (r *RemoteList) unlockedSort(preferredRanges []netip.Prefix) {
} }
r.addrs = r.addrs[:a+1] r.addrs = r.addrs[:a+1]
return
} }
// minInt returns the minimum integer of a or b // minInt returns the minimum integer of a or b

View File

@@ -9,10 +9,13 @@ import (
"math" "math"
"net" "net"
"net/netip" "net/netip"
"os"
"strings" "strings"
"sync" "sync"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula" "github.com/slackhq/nebula"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/overlay" "github.com/slackhq/nebula/overlay"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"gvisor.dev/gvisor/pkg/buffer" "gvisor.dev/gvisor/pkg/buffer"
@@ -43,7 +46,14 @@ type Service struct {
} }
} }
func New(control *nebula.Control) (*Service, error) { func New(config *config.C) (*Service, error) {
logger := logrus.New()
logger.Out = os.Stdout
control, err := nebula.Main(config, false, "custom-app", logger, overlay.NewUserDeviceFromConfig)
if err != nil {
return nil, err
}
control.Start() control.Start()
ctx := control.Context() ctx := control.Context()

View File

@@ -5,17 +5,13 @@ import (
"context" "context"
"errors" "errors"
"net/netip" "net/netip"
"os"
"testing" "testing"
"time" "time"
"dario.cat/mergo" "dario.cat/mergo"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/cert_test" "github.com/slackhq/nebula/cert_test"
"github.com/slackhq/nebula/config" "github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/overlay"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
) )
@@ -75,15 +71,7 @@ func newSimpleService(caCrt cert.Certificate, caKey []byte, name string, udpIp n
panic(err) panic(err)
} }
logger := logrus.New() s, err := New(&c)
logger.Out = os.Stdout
control, err := nebula.Main(&c, false, "custom-app", logger, overlay.NewUserDeviceFromConfig)
if err != nil {
panic(err)
}
s, err := New(control)
if err != nil { if err != nil {
panic(err) panic(err)
} }

20
ssh.go
View File

@@ -527,11 +527,11 @@ func sshStartCpuProfile(fs any, a []string, w sshd.StringWriter) error {
return err return err
} }
func sshVersion(ifce *Interface, fs any, a []string, w sshd.StringWriter) error { func sshVersion(ifce *Interface, _ any, _ []string, w sshd.StringWriter) error {
return w.WriteLine(fmt.Sprintf("%s", ifce.version)) return w.WriteLine(ifce.version)
} }
func sshQueryLighthouse(ifce *Interface, fs any, a []string, w sshd.StringWriter) error { func sshQueryLighthouse(ifce *Interface, _ any, a []string, w sshd.StringWriter) error {
if len(a) == 0 { if len(a) == 0 {
return w.WriteLine("No vpn address was provided") return w.WriteLine("No vpn address was provided")
} }
@@ -584,7 +584,7 @@ func sshCloseTunnel(ifce *Interface, fs any, a []string, w sshd.StringWriter) er
hostInfo.ConnectionState, hostInfo.ConnectionState,
hostInfo, hostInfo,
[]byte{}, []byte{},
make([]byte, 12, 12), make([]byte, 12),
make([]byte, mtu), make([]byte, mtu),
) )
} }
@@ -614,12 +614,12 @@ func sshCreateTunnel(ifce *Interface, fs any, a []string, w sshd.StringWriter) e
hostInfo := ifce.hostMap.QueryVpnAddr(vpnAddr) hostInfo := ifce.hostMap.QueryVpnAddr(vpnAddr)
if hostInfo != nil { if hostInfo != nil {
return w.WriteLine(fmt.Sprintf("Tunnel already exists")) return w.WriteLine("Tunnel already exists")
} }
hostInfo = ifce.handshakeManager.QueryVpnAddr(vpnAddr) hostInfo = ifce.handshakeManager.QueryVpnAddr(vpnAddr)
if hostInfo != nil { if hostInfo != nil {
return w.WriteLine(fmt.Sprintf("Tunnel already handshaking")) return w.WriteLine("Tunnel already handshaking")
} }
var addr netip.AddrPort var addr netip.AddrPort
@@ -735,7 +735,7 @@ func sshGetMutexProfile(fs any, a []string, w sshd.StringWriter) error {
return w.WriteLine(fmt.Sprintf("Mutex profile created at %s", a)) return w.WriteLine(fmt.Sprintf("Mutex profile created at %s", a))
} }
func sshLogLevel(l *logrus.Logger, fs any, a []string, w sshd.StringWriter) error { func sshLogLevel(l *logrus.Logger, _ any, a []string, w sshd.StringWriter) error {
if len(a) == 0 { if len(a) == 0 {
return w.WriteLine(fmt.Sprintf("Log level is: %s", l.Level)) return w.WriteLine(fmt.Sprintf("Log level is: %s", l.Level))
} }
@@ -749,7 +749,7 @@ func sshLogLevel(l *logrus.Logger, fs any, a []string, w sshd.StringWriter) erro
return w.WriteLine(fmt.Sprintf("Log level is: %s", l.Level)) return w.WriteLine(fmt.Sprintf("Log level is: %s", l.Level))
} }
func sshLogFormat(l *logrus.Logger, fs any, a []string, w sshd.StringWriter) error { func sshLogFormat(l *logrus.Logger, _ any, a []string, w sshd.StringWriter) error {
if len(a) == 0 { if len(a) == 0 {
return w.WriteLine(fmt.Sprintf("Log format is: %s", reflect.TypeOf(l.Formatter))) return w.WriteLine(fmt.Sprintf("Log format is: %s", reflect.TypeOf(l.Formatter)))
} }
@@ -822,10 +822,10 @@ func sshPrintCert(ifce *Interface, fs any, a []string, w sshd.StringWriter) erro
return w.WriteLine(cert.String()) return w.WriteLine(cert.String())
} }
func sshPrintRelays(ifce *Interface, fs any, a []string, w sshd.StringWriter) error { func sshPrintRelays(ifce *Interface, fs any, _ []string, w sshd.StringWriter) error {
args, ok := fs.(*sshPrintTunnelFlags) args, ok := fs.(*sshPrintTunnelFlags)
if !ok { if !ok {
w.WriteLine(fmt.Sprintf("sshPrintRelays failed to convert args type")) w.WriteLine("sshPrintRelays failed to convert args type")
return nil return nil
} }

View File

@@ -23,7 +23,6 @@ type SSHServer struct {
trustedCAs []ssh.PublicKey trustedCAs []ssh.PublicKey
// List of available commands // List of available commands
helpCommand *Command
commands *radix.Tree commands *radix.Tree
listener net.Listener listener net.Listener
@@ -43,7 +42,7 @@ func NewSSHServer(l *logrus.Entry) (*SSHServer, error) {
conns: make(map[int]*session), conns: make(map[int]*session),
} }
cc := ssh.CertChecker{ cc := &ssh.CertChecker{
IsUserAuthority: func(auth ssh.PublicKey) bool { IsUserAuthority: func(auth ssh.PublicKey) bool {
for _, ca := range s.trustedCAs { for _, ca := range s.trustedCAs {
if bytes.Equal(ca.Marshal(), auth.Marshal()) { if bytes.Equal(ca.Marshal(), auth.Marshal()) {
@@ -77,10 +76,11 @@ func NewSSHServer(l *logrus.Entry) (*SSHServer, error) {
}, },
} }
s.certChecker = cc
s.config = &ssh.ServerConfig{ s.config = &ssh.ServerConfig{
PublicKeyCallback: cc.Authenticate, PublicKeyCallback: cc.Authenticate,
ServerVersion: fmt.Sprintf("SSH-2.0-Nebula???"), ServerVersion: "SSH-2.0-Nebula???",
} }
s.RegisterCommand(&Command{ s.RegisterCommand(&Command{

View File

@@ -170,7 +170,6 @@ func (s *session) dispatchCommand(line string, w StringWriter) {
} }
_ = execCommand(c, args[1:], w) _ = execCommand(c, args[1:], w)
return
} }
func (s *session) Close() { func (s *session) Close() {

View File

@@ -30,15 +30,11 @@ func (NoopConn) Rebind() error {
func (NoopConn) LocalAddr() (netip.AddrPort, error) { func (NoopConn) LocalAddr() (netip.AddrPort, error) {
return netip.AddrPort{}, nil return netip.AddrPort{}, nil
} }
func (NoopConn) ListenOut(_ EncReader) { func (NoopConn) ListenOut(_ EncReader) {}
return
}
func (NoopConn) WriteTo(_ []byte, _ netip.AddrPort) error { func (NoopConn) WriteTo(_ []byte, _ netip.AddrPort) error {
return nil return nil
} }
func (NoopConn) ReloadConfig(_ *config.C) { func (NoopConn) ReloadConfig(_ *config.C) {}
return
}
func (NoopConn) Close() error { func (NoopConn) Close() error {
return nil return nil
} }

View File

@@ -1,5 +0,0 @@
package udp
import "errors"
var ErrInvalidIPv6RemoteForSocket = errors.New("listener is IPv4, but writing to IPv6 remote")

View File

@@ -3,62 +3,20 @@
package udp package udp
// Darwin support is primarily implemented in udp_generic, besides NewListenConfig
import ( import (
"context"
"encoding/binary"
"errors"
"fmt" "fmt"
"net" "net"
"net/netip" "net/netip"
"syscall" "syscall"
"unsafe"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/config"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )
type StdConn struct {
*net.UDPConn
isV4 bool
sysFd uintptr
l *logrus.Logger
}
var _ Conn = &StdConn{}
func NewListener(l *logrus.Logger, ip netip.Addr, port int, multi bool, batch int) (Conn, error) { func NewListener(l *logrus.Logger, ip netip.Addr, port int, multi bool, batch int) (Conn, error) {
lc := NewListenConfig(multi) return NewGenericListener(l, ip, port, multi, batch)
pc, err := lc.ListenPacket(context.TODO(), "udp", net.JoinHostPort(ip.String(), fmt.Sprintf("%v", port)))
if err != nil {
return nil, err
}
if uc, ok := pc.(*net.UDPConn); ok {
c := &StdConn{UDPConn: uc, l: l}
rc, err := uc.SyscallConn()
if err != nil {
return nil, fmt.Errorf("failed to open udp socket: %w", err)
}
err = rc.Control(func(fd uintptr) {
c.sysFd = fd
})
if err != nil {
return nil, fmt.Errorf("failed to get udp fd: %w", err)
}
la, err := c.LocalAddr()
if err != nil {
return nil, err
}
c.isV4 = la.Addr().Is4()
return c, nil
}
return nil, fmt.Errorf("unexpected PacketConn: %T %#v", pc, pc)
} }
func NewListenConfig(multi bool) net.ListenConfig { func NewListenConfig(multi bool) net.ListenConfig {
@@ -85,116 +43,16 @@ func NewListenConfig(multi bool) net.ListenConfig {
} }
} }
//go:linkname sendto golang.org/x/sys/unix.sendto func (u *GenericConn) Rebind() error {
//go:noescape rc, err := u.UDPConn.SyscallConn()
func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen int32) (err error)
func (u *StdConn) WriteTo(b []byte, ap netip.AddrPort) error {
var sa unsafe.Pointer
var addrLen int32
if u.isV4 {
if ap.Addr().Is6() {
return ErrInvalidIPv6RemoteForSocket
}
var rsa unix.RawSockaddrInet6
rsa.Family = unix.AF_INET6
rsa.Addr = ap.Addr().As16()
binary.BigEndian.PutUint16((*[2]byte)(unsafe.Pointer(&rsa.Port))[:], ap.Port())
sa = unsafe.Pointer(&rsa)
addrLen = syscall.SizeofSockaddrInet4
} else {
var rsa unix.RawSockaddrInet6
rsa.Family = unix.AF_INET6
rsa.Addr = ap.Addr().As16()
binary.BigEndian.PutUint16((*[2]byte)(unsafe.Pointer(&rsa.Port))[:], ap.Port())
sa = unsafe.Pointer(&rsa)
addrLen = syscall.SizeofSockaddrInet6
}
// Golang stdlib doesn't handle EAGAIN correctly in some situations so we do writes ourselves
// See https://github.com/golang/go/issues/73919
for {
//_, _, err := unix.Syscall6(unix.SYS_SENDTO, u.sysFd, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), 0, sa, addrLen)
err := sendto(int(u.sysFd), b, 0, sa, addrLen)
if err == nil {
// Written, get out before the error handling
return nil
}
if errors.Is(err, syscall.EINTR) {
// Write was interrupted, retry
continue
}
if errors.Is(err, syscall.EAGAIN) {
return &net.OpError{Op: "sendto", Err: unix.EWOULDBLOCK}
}
if errors.Is(err, syscall.EBADF) {
return net.ErrClosed
}
return &net.OpError{Op: "sendto", Err: err}
}
}
func (u *StdConn) LocalAddr() (netip.AddrPort, error) {
a := u.UDPConn.LocalAddr()
switch v := a.(type) {
case *net.UDPAddr:
addr, ok := netip.AddrFromSlice(v.IP)
if !ok {
return netip.AddrPort{}, fmt.Errorf("LocalAddr returned invalid IP address: %s", v.IP)
}
return netip.AddrPortFrom(addr, uint16(v.Port)), nil
default:
return netip.AddrPort{}, fmt.Errorf("LocalAddr returned: %#v", a)
}
}
func (u *StdConn) ReloadConfig(c *config.C) {
// TODO
}
func NewUDPStatsEmitter(udpConns []Conn) func() {
// No UDP stats for non-linux
return func() {}
}
func (u *StdConn) ListenOut(r EncReader) {
buffer := make([]byte, MTU)
for {
// Just read one packet at a time
n, rua, err := u.ReadFromUDPAddrPort(buffer)
if err != nil {
if errors.Is(err, net.ErrClosed) {
u.l.WithError(err).Debug("udp socket is closed, exiting read loop")
return
}
u.l.WithError(err).Error("unexpected udp socket receive error")
}
r(netip.AddrPortFrom(rua.Addr().Unmap(), rua.Port()), buffer[:n])
}
}
func (u *StdConn) Rebind() error {
var err error
if u.isV4 {
err = syscall.SetsockoptInt(int(u.sysFd), syscall.IPPROTO_IP, syscall.IP_BOUND_IF, 0)
} else {
err = syscall.SetsockoptInt(int(u.sysFd), syscall.IPPROTO_IPV6, syscall.IPV6_BOUND_IF, 0)
}
if err != nil { if err != nil {
u.l.WithError(err).Error("Failed to rebind udp socket") return err
} }
return nil return rc.Control(func(fd uintptr) {
err := syscall.SetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_BOUND_IF, 0)
if err != nil {
u.l.WithError(err).Error("Failed to rebind udp socket")
}
})
} }

View File

@@ -1,7 +1,6 @@
//go:build (!linux || android) && !e2e_testing && !darwin //go:build (!linux || android) && !e2e_testing
// +build !linux android // +build !linux android
// +build !e2e_testing // +build !e2e_testing
// +build !darwin
// udp_generic implements the nebula UDP interface in pure Go stdlib. This // udp_generic implements the nebula UDP interface in pure Go stdlib. This
// means it can be used on platforms like Darwin and Windows. // means it can be used on platforms like Darwin and Windows.
@@ -34,7 +33,7 @@ func NewGenericListener(l *logrus.Logger, ip netip.Addr, port int, multi bool, b
if uc, ok := pc.(*net.UDPConn); ok { if uc, ok := pc.(*net.UDPConn); ok {
return &GenericConn{UDPConn: uc, l: l}, nil return &GenericConn{UDPConn: uc, l: l}, nil
} }
return nil, fmt.Errorf("Unexpected PacketConn: %T %#v", pc, pc) return nil, fmt.Errorf("unexpected PacketConn: %T %#v", pc, pc)
} }
func (u *GenericConn) WriteTo(b []byte, addr netip.AddrPort) error { func (u *GenericConn) WriteTo(b []byte, addr netip.AddrPort) error {
@@ -67,10 +66,6 @@ func NewUDPStatsEmitter(udpConns []Conn) func() {
return func() {} return func() {}
} }
type rawMessage struct {
Len uint32
}
func (u *GenericConn) ListenOut(r EncReader) { func (u *GenericConn) ListenOut(r EncReader) {
buffer := make([]byte, MTU) buffer := make([]byte, MTU)

View File

@@ -221,7 +221,7 @@ func (u *StdConn) writeTo6(b []byte, ip netip.AddrPort) error {
func (u *StdConn) writeTo4(b []byte, ip netip.AddrPort) error { func (u *StdConn) writeTo4(b []byte, ip netip.AddrPort) error {
if !ip.Addr().Is4() { if !ip.Addr().Is4() {
return ErrInvalidIPv6RemoteForSocket return fmt.Errorf("Listener is IPv4, but writing to IPv6 remote")
} }
var rsa unix.RawSockaddrInet4 var rsa unix.RawSockaddrInet4

View File

@@ -92,25 +92,6 @@ func (u *RIOConn) bind(sa windows.Sockaddr) error {
// Enable v4 for this socket // Enable v4 for this socket
syscall.SetsockoptInt(syscall.Handle(u.sock), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, 0) syscall.SetsockoptInt(syscall.Handle(u.sock), syscall.IPPROTO_IPV6, syscall.IPV6_V6ONLY, 0)
// Disable reporting of PORT_UNREACHABLE and NET_UNREACHABLE errors from the UDP socket receive call.
// These errors are returned on Windows during UDP receives based on the receipt of ICMP packets. Disable
// the UDP receive error returns with these ioctl calls.
ret := uint32(0)
flag := uint32(0)
size := uint32(unsafe.Sizeof(flag))
err = syscall.WSAIoctl(syscall.Handle(u.sock), syscall.SIO_UDP_CONNRESET, (*byte)(unsafe.Pointer(&flag)), size, nil, 0, &ret, nil, 0)
if err != nil {
return err
}
ret = 0
flag = 0
size = uint32(unsafe.Sizeof(flag))
SIO_UDP_NETRESET := uint32(syscall.IOC_IN | syscall.IOC_VENDOR | 15)
err = syscall.WSAIoctl(syscall.Handle(u.sock), SIO_UDP_NETRESET, (*byte)(unsafe.Pointer(&flag)), size, nil, 0, &ret, nil, 0)
if err != nil {
return err
}
err = u.rx.Open() err = u.rx.Open()
if err != nil { if err != nil {
return err return err
@@ -141,12 +122,8 @@ func (u *RIOConn) ListenOut(r EncReader) {
// Just read one packet at a time // Just read one packet at a time
n, rua, err := u.receive(buffer) n, rua, err := u.receive(buffer)
if err != nil { if err != nil {
if errors.Is(err, net.ErrClosed) { u.l.WithError(err).Debug("udp socket is closed, exiting read loop")
u.l.WithError(err).Debug("udp socket is closed, exiting read loop") return
return
}
u.l.WithError(err).Error("unexpected udp socket receive error")
continue
} }
r(netip.AddrPortFrom(netip.AddrFrom16(rua.Addr).Unmap(), (rua.Port>>8)|((rua.Port&0xff)<<8)), buffer[:n]) r(netip.AddrPortFrom(netip.AddrFrom16(rua.Addr).Unmap(), (rua.Port>>8)|((rua.Port&0xff)<<8)), buffer[:n])