Compare commits

...

172 Commits

Author SHA1 Message Date
JackDoan
9cef6752c9 more incompatibilities, was this a good idea at all? 2025-10-17 15:52:15 -05:00
JackDoan
a0c6cea6fc backport incompatible bart change (grr) 2025-10-17 12:05:23 -05:00
JackDoan
fc165a8b75 update CHANGELOG.md 2025-10-17 11:54:35 -05:00
JackDoan
845d72b97b update bart, x/crypto, x/net 2025-10-17 11:52:58 -05:00
Nate Brown
7c3f533950 Better words (#1497) 2025-10-10 10:31:46 -05:00
Nate Brown
824cd3f0d6 Update CHANGELOG for Nebula v1.9.7 2025-10-07 21:10:16 -05:00
Nate Brown
9f692175e1 HostInfo.remoteCidr should only be populated with the entire vpn ip address issued in the certificate (#1494) 2025-10-07 17:35:58 -05:00
Nate Brown
22af56f156 Fix recv_error receipt limit allowance for v1.9.x (#1459)
* Fix recv_error receipt limit allowance

* backport #1463 recv_error behavior changes

---------

Co-authored-by: JackDoan <me@jackdoan.com>
2025-09-04 15:52:32 -05:00
brad-defined
1d73e463cd Quietly log error on UDP_NETRESET ioctl on Windows. (#1453)
* Quietly log error on UDP_NETRESET ioctl on Windows.

* dampen unexpected error warnings
2025-08-19 17:33:31 -04:00
brad-defined
105e0ec66c v1.9.6 (#1434)
Update CHANGELOG for Nebula v1.9.6
2025-07-18 08:39:33 -04:00
Nate Brown
4870bb680d Darwin udp fix (#1426) 2025-07-01 16:41:29 -05:00
brad-defined
a1498ca8f8 Store relay states in a slice for consistent ordering (#1422) 2025-06-24 12:04:00 -04:00
Nate Brown
9877648da9 Drop inactive tunnels (#1413) 2025-06-23 11:32:50 -05:00
brad-defined
8e0a7bcbb7 Disable UDP receive error returns due to ICMP messages on Windows. (#1412) 2025-05-22 08:55:45 -04:00
brad-defined
8c29b15c6d fix relay migration panic (#1403) 2025-05-13 14:58:58 -04:00
brad-defined
04d7a8ccba Retry UDP receive on Windows in some receive error cases (#1404) 2025-05-13 14:58:37 -04:00
Nate Brown
b55b9019a7 v1.9.5 (#1285)
Update CHANGELOG for Nebula v1.9.5
2024-12-06 09:50:24 -05:00
Nate Brown
2e85d138cd [v1.9.x] do not panic when loading a V2 CA certificate (#1282)
Co-authored-by: Jack Doan <jackdoan@rivian.com>
2024-12-03 09:49:54 -06:00
brad-defined
9bfdfbafc1 Backport reestablish relays from cert-v2 to release-1.9 (#1277) 2024-11-20 21:49:53 -06:00
Wade Simmons
ab81b62ea0 v1.9.4 (#1210)
Update CHANGELOG for Nebula v1.9.4
2024-09-09 14:11:44 -04:00
dependabot[bot]
45bbad2f21 Bump the golang-x-dependencies group with 4 updates (#1195)
Bumps the golang-x-dependencies group with 4 updates: [golang.org/x/crypto](https://github.com/golang/crypto), [golang.org/x/net](https://github.com/golang/net), [golang.org/x/sys](https://github.com/golang/sys) and [golang.org/x/term](https://github.com/golang/term).


Updates `golang.org/x/crypto` from 0.25.0 to 0.26.0
- [Commits](https://github.com/golang/crypto/compare/v0.25.0...v0.26.0)

Updates `golang.org/x/net` from 0.27.0 to 0.28.0
- [Commits](https://github.com/golang/net/compare/v0.27.0...v0.28.0)

Updates `golang.org/x/sys` from 0.23.0 to 0.24.0
- [Commits](https://github.com/golang/sys/compare/v0.23.0...v0.24.0)

Updates `golang.org/x/term` from 0.22.0 to 0.23.0
- [Commits](https://github.com/golang/term/compare/v0.22.0...v0.23.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/net
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/sys
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/term
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-09-03 16:47:36 -04:00
Jack Doan
3dc56e1184 Support UDP dialling with gvisor (#1181) 2024-08-26 12:38:32 -05:00
Wade Simmons
0736cfa562 udp: fix endianness for port (#1194)
If the host OS is already big endian, we were swapping bytes when we
shouldn't have. Use the Go helper to make sure we do the endianness
correctly

Fixes: #1189
2024-08-14 12:53:00 -04:00
Jack Doan
248cf194cd fix integer wraparound in the calculation of handshake timeouts on 32-bit targets (#1185)
Fixes: #1169
2024-08-13 09:25:18 -04:00
dependabot[bot]
8a6a0f0636 Bump the golang-x-dependencies group with 2 updates (#1190)
Bumps the golang-x-dependencies group with 2 updates: [golang.org/x/sync](https://github.com/golang/sync) and [golang.org/x/sys](https://github.com/golang/sys).


Updates `golang.org/x/sync` from 0.7.0 to 0.8.0
- [Commits](https://github.com/golang/sync/compare/v0.7.0...v0.8.0)

Updates `golang.org/x/sys` from 0.22.0 to 0.23.0
- [Commits](https://github.com/golang/sys/compare/v0.22.0...v0.23.0)

---
updated-dependencies:
- dependency-name: golang.org/x/sync
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/sys
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-08-07 11:58:46 -04:00
Wade Simmons
f5f6c269ac fix rare panic when local index collision happens (#1191)
A local index collision happens when two tunnels attempt to use the same
random int32 index ID. This is a rare chance, and we have code to deal
with it, but we have a panic because we return the wrong thing in this
case. This change should fix the panic.
2024-08-07 11:53:32 -04:00
brad-defined
9a63fa0a07 Make some Nebula state programmatically available via control object (#1188) 2024-08-01 13:40:05 -04:00
Nate Brown
e264a0ff88 Switch most everything to netip in prep for ipv6 in the overlay (#1173) 2024-07-31 10:18:56 -05:00
dependabot[bot]
00458302ca Bump the golang-x-dependencies group with 4 updates (#1174)
Bumps the golang-x-dependencies group with 4 updates: [golang.org/x/crypto](https://github.com/golang/crypto), [golang.org/x/net](https://github.com/golang/net), [golang.org/x/sys](https://github.com/golang/sys) and [golang.org/x/term](https://github.com/golang/term).


Updates `golang.org/x/crypto` from 0.24.0 to 0.25.0
- [Commits](https://github.com/golang/crypto/compare/v0.24.0...v0.25.0)

Updates `golang.org/x/net` from 0.26.0 to 0.27.0
- [Commits](https://github.com/golang/net/compare/v0.26.0...v0.27.0)

Updates `golang.org/x/sys` from 0.21.0 to 0.22.0
- [Commits](https://github.com/golang/sys/compare/v0.21.0...v0.22.0)

Updates `golang.org/x/term` from 0.21.0 to 0.22.0
- [Commits](https://github.com/golang/term/compare/v0.21.0...v0.22.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/net
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/sys
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/term
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-07-29 11:42:33 -04:00
Wade Simmons
e6009b8491 github actions: use macos-latest (#1171)
macos-11 was deprecated and removed:

> The macos-11 label has been deprecated and will no longer be available after 28 June 2024.

We can just use macos-latest instead.
2024-07-02 11:50:51 -04:00
dependabot[bot]
b9aace1e58 Bump github.com/prometheus/client_golang from 1.19.0 to 1.19.1 (#1147)
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.19.0 to 1.19.1.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.19.0...v1.19.1)

---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-24 14:54:51 -04:00
dependabot[bot]
a76723eaf5 Bump Apple-Actions/import-codesign-certs from 2 to 3 (#1146)
Bumps [Apple-Actions/import-codesign-certs](https://github.com/apple-actions/import-codesign-certs) from 2 to 3.
- [Release notes](https://github.com/apple-actions/import-codesign-certs/releases)
- [Commits](https://github.com/apple-actions/import-codesign-certs/compare/v2...v3)

---
updated-dependencies:
- dependency-name: Apple-Actions/import-codesign-certs
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-24 14:54:05 -04:00
Caleb Jasik
8109cf2170 Add puncuation to doc comment (#1164)
* Add puncuation to doc comment

* Fix list formatting inside `EncryptDanger` doc comment
2024-06-24 14:50:17 -04:00
Wade Simmons
97e9834f82 cleanup SK_MEMINFO vars (#1162)
We had to manually define these types before, but the latest release of
`golang.org/x/sys` adds these definitions:

- 6dfb94eaa3

Since we just updated with this PR, we can clean this up now:

- https://github.com/slackhq/nebula/pull/1161
2024-06-24 14:47:14 -04:00
dependabot[bot]
506ba5ab5b Bump github.com/miekg/dns from 1.1.59 to 1.1.61 (#1168)
Bumps [github.com/miekg/dns](https://github.com/miekg/dns) from 1.1.59 to 1.1.61.
- [Changelog](https://github.com/miekg/dns/blob/master/Makefile.release)
- [Commits](https://github.com/miekg/dns/compare/v1.1.59...v1.1.61)

---
updated-dependencies:
- dependency-name: github.com/miekg/dns
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-24 14:46:27 -04:00
dependabot[bot]
d372df56ab Bump google.golang.org/protobuf in the protobuf-dependencies group (#1167)
Bumps the protobuf-dependencies group with 1 update: google.golang.org/protobuf.


Updates `google.golang.org/protobuf` from 1.34.1 to 1.34.2

---
updated-dependencies:
- dependency-name: google.golang.org/protobuf
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: protobuf-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-24 14:45:52 -04:00
dependabot[bot]
40cfd00e87 Bump the golang-x-dependencies group with 4 updates (#1161)
Bumps the golang-x-dependencies group with 4 updates: [golang.org/x/crypto](https://github.com/golang/crypto), [golang.org/x/net](https://github.com/golang/net), [golang.org/x/sys](https://github.com/golang/sys) and [golang.org/x/term](https://github.com/golang/term).


Updates `golang.org/x/crypto` from 0.23.0 to 0.24.0
- [Commits](https://github.com/golang/crypto/compare/v0.23.0...v0.24.0)

Updates `golang.org/x/net` from 0.25.0 to 0.26.0
- [Commits](https://github.com/golang/net/compare/v0.25.0...v0.26.0)

Updates `golang.org/x/sys` from 0.20.0 to 0.21.0
- [Commits](https://github.com/golang/sys/compare/v0.20.0...v0.21.0)

Updates `golang.org/x/term` from 0.20.0 to 0.21.0
- [Commits](https://github.com/golang/term/compare/v0.20.0...v0.21.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/net
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/sys
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/term
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-06-10 16:08:43 -04:00
Wade Simmons
b14bad586a v1.9.3 (#1160)
Update CHANGELOG for Nebula v1.9.3
2024-06-06 13:17:07 -04:00
Wade Simmons
4c066d8c32 initialize messageCounter to 2 instead of verifying later (#1156)
Clean up the messageCounter checks added in #1154. Instead of checking that
messageCounter is still at 2, just initialize it to 2 and only increment for
non-handshake messages. Handshake packets will always be packets 1 and 2.
2024-06-06 13:03:07 -04:00
Wade Simmons
249ae41fec v1.9.2 (#1155)
Update CHANGELOG for Nebula v1.9.2
2024-06-03 15:50:02 -04:00
Wade Simmons
d9cae9e062 ensure messageCounter is set before handshake is complete (#1154)
Ensure we set messageCounter to 2 before the handshake is marked as
complete.
2024-06-03 15:40:51 -04:00
Wade Simmons
a92056a7db v1.9.1 (#1152)
Update CHANGELOG for Nebula v1.9.1
2024-05-29 14:06:46 -04:00
Wade Simmons
4eb1da0958 remove deadlock in GetOrHandshake (#1151)
We had a rare deadlock in GetOrHandshake because we kept the hostmap
lock when we do the call to StartHandshake. StartHandshake can block
while sending to the lighthouse query worker channel, and that worker
needs to be able to grab the hostmap lock to do its work. Other calls
for StartHandshake don't hold the hostmap lock so we should be able to
drop it here.

This lock was originally added with: https://github.com/slackhq/nebula/pull/954
2024-05-29 12:52:52 -04:00
Wade Simmons
50b24c102e v1.9.0 (#1137)
Update CHANGELOG for Nebula v1.9.0

Co-authored-by: John Maguire <john@defined.net>
2024-05-08 10:31:24 -04:00
dependabot[bot]
c0130f8161 Bump the golang-x-dependencies group with 4 updates (#1138)
Bumps the golang-x-dependencies group with 4 updates: [golang.org/x/crypto](https://github.com/golang/crypto), [golang.org/x/net](https://github.com/golang/net), [golang.org/x/sys](https://github.com/golang/sys) and [golang.org/x/term](https://github.com/golang/term).


Updates `golang.org/x/crypto` from 0.22.0 to 0.23.0
- [Commits](https://github.com/golang/crypto/compare/v0.22.0...v0.23.0)

Updates `golang.org/x/net` from 0.24.0 to 0.25.0
- [Commits](https://github.com/golang/net/compare/v0.24.0...v0.25.0)

Updates `golang.org/x/sys` from 0.19.0 to 0.20.0
- [Commits](https://github.com/golang/sys/compare/v0.19.0...v0.20.0)

Updates `golang.org/x/term` from 0.19.0 to 0.20.0
- [Commits](https://github.com/golang/term/compare/v0.19.0...v0.20.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/net
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/sys
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/term
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-06 16:17:50 -04:00
dependabot[bot]
f19a28645e Bump google.golang.org/protobuf in the protobuf-dependencies group (#1139)
Bumps the protobuf-dependencies group with 1 update: google.golang.org/protobuf.


Updates `google.golang.org/protobuf` from 1.34.0 to 1.34.1

---
updated-dependencies:
- dependency-name: google.golang.org/protobuf
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: protobuf-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-06 16:17:05 -04:00
Jack Doan
fd1906b16f minor text fixes (#1135) 2024-05-03 20:43:40 -05:00
Wade Simmons
d6e4b88bb5 release: use download-action v4 in docker section (#1134)
We missed this upgrade in #1047
2024-05-03 11:35:55 -04:00
dependabot[bot]
18f69af455 Bump actions/download-artifact from 3 to 4 (#1047)
Bumps [actions/download-artifact](https://github.com/actions/download-artifact) from 3 to 4.
- [Release notes](https://github.com/actions/download-artifact/releases)
- [Commits](https://github.com/actions/download-artifact/compare/v3...v4)

---
updated-dependencies:
- dependency-name: actions/download-artifact
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-02 11:25:22 -04:00
dependabot[bot]
aa18d7fa4f Bump actions/upload-artifact from 3 to 4 (#1046)
* Bump actions/upload-artifact from 3 to 4

Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 3 to 4.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/v3...v4)

---
updated-dependencies:
- dependency-name: actions/upload-artifact
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>

* try to fix upload conflict

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Wade Simmons <wsimmons@slack-corp.com>
2024-05-02 11:24:58 -04:00
John Maguire
b5c3486796 Push Docker images as part of the release workflow (#1037) 2024-05-02 09:37:11 -04:00
dependabot[bot]
f39bfbb7fa Bump google.golang.org/protobuf in the protobuf-dependencies group (#1133)
Bumps the protobuf-dependencies group with 1 update: google.golang.org/protobuf.


Updates `google.golang.org/protobuf` from 1.33.0 to 1.34.0

---
updated-dependencies:
- dependency-name: google.golang.org/protobuf
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: protobuf-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-30 11:45:05 -04:00
Wade Simmons
4f4941e187 Add Vagrant based smoke tests (#1067)
* WIP smoke test freebsd

* fix bitrot

We now test that the firewall blocks inbound on host3 from host2

* WIP ipv6 test

* cleanup

* rename to make clear

* fix filename

* restore

* no sudo docker

* WIP

* WIP

* WIP

* WIP

* extra smoke tests

* WIP

* WIP

* add over improvements made in smoke.sh

* more tests

* use generic/freebsd14

* cleanup from test

* smoke test openbsd-amd64

* add netbsd-amd64

* try to fix vagrant
2024-04-30 11:02:16 -04:00
fyl
5f17db5dfa Add support for LoongArch64 (#1003) 2024-04-30 09:55:44 -05:00
John Maguire
f31bab5f1a Add support for SSH CAs (#1098)
- Accept certs signed by trusted CAs
- Username must match the cert principal if set
- Any username can be used if cert principal is empty
- Don't allow removed pubkeys/CAs to be used after reload
2024-04-30 10:50:17 -04:00
kindknow
9cd944d320 chore: fix function name in comment (#1111) 2024-04-30 09:43:38 -05:00
John Maguire
f7db0eb5cc Remove Vagrant example (#1129) 2024-04-30 09:40:24 -05:00
dependabot[bot]
7e7d5e00ca Bump github.com/prometheus/client_golang from 1.18.0 to 1.19.0 (#1086)
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.18.0 to 1.19.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.18.0...v1.19.0)

---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-30 10:30:18 -04:00
Wade Simmons
24f336ec56 switch off deprecated elliptic.Marshal (#1108)
elliptic.Marshal was deprecated, we can replace it with the ECDH methods
even though we aren't using ECDH here. See:

- f03fb147d7

We still using elliptic.Unmarshal because this issue needs to be
resolved:

- https://github.com/golang/go/issues/63963
2024-04-30 10:02:49 -04:00
John Maguire
d7f52dec41 Fix errant capitalisation in DNS TXT response (#1127)
Co-authored-by: Oliver Marriott <hello@omarriott.com>
2024-04-30 09:58:56 -04:00
NODA Kai
e54f9dd206 dns_server.go: parseQuery: set NXDOMAIN if there's no Answer to return (#845) 2024-04-30 09:56:57 -04:00
Andrew Kraut
df78158cfa Create service script for open-rc (#711) 2024-04-30 09:53:00 -04:00
Robin Candau
8b55caa15e Remove Arch nebula.service file (#1132) 2024-04-30 07:45:23 -04:00
Jon Rafkind
7ed9f2a688 add ssh command to print device info (#763) 2024-04-29 16:09:34 -05:00
Wade Simmons
3aca576b07 update to go1.22 (#981)
* update to go1.21

Since the first minor version update has already been released, we can
probably feel comfortable updating to go1.21. This version now enforces
that the go version on the system is compatible with the version
specified in go.mod, so we can remove the old logic around checking the
minimum version in the Makefile.

- https://go.dev/doc/go1.21#tools

> To improve forwards compatibility, Go 1.21 now reads the go line in a go.work or go.mod file as a strict minimum requirement: go 1.21.0 means that the workspace or module cannot be used with Go 1.20 or with Go 1.21rc1. This allows projects that depend on fixes made in later versions of Go to ensure that they are not used with earlier versions. It also gives better error reporting for projects that make use of new Go features: when the problem is that a newer Go version is needed, that problem is reported clearly, instead of attempting to build the code and printing errors about unresolved imports or syntax errors.

* update to go1.22

* bump gvisor

* fix merge conflicts

* use latest gvisor `go` branch

Need to use the latest commit on the `go` branch, see:

- https://github.com/google/gvisor?tab=readme-ov-file#using-go-get

* mod tidy

* more fixes

* give smoketest more time

Is this why it is failing?

* also a little more sleep here

---------

Co-authored-by: Jack Doan <me@jackdoan.com>
2024-04-29 16:44:42 -04:00
Nate Brown
a99618e95c Don't log invalid certificates (#1116) 2024-04-29 15:21:00 -05:00
Caleb Jasik
8e94eb974e Add suggested filenames for collected profiles in the ssh commands (#1109) 2024-04-29 15:20:46 -05:00
John Maguire
41e2e1de02 Remove Fedora nebula.service file (#1128) 2024-04-29 15:30:22 -04:00
dependabot[bot]
d95fb4a314 Bump the golang-x-dependencies group with 5 updates (#1110)
Bumps the golang-x-dependencies group with 5 updates:

| Package | From | To |
| --- | --- | --- |
| [golang.org/x/crypto](https://github.com/golang/crypto) | `0.21.0` | `0.22.0` |
| [golang.org/x/net](https://github.com/golang/net) | `0.22.0` | `0.24.0` |
| [golang.org/x/sync](https://github.com/golang/sync) | `0.6.0` | `0.7.0` |
| [golang.org/x/sys](https://github.com/golang/sys) | `0.18.0` | `0.19.0` |
| [golang.org/x/term](https://github.com/golang/term) | `0.18.0` | `0.19.0` |


Updates `golang.org/x/crypto` from 0.21.0 to 0.22.0
- [Commits](https://github.com/golang/crypto/compare/v0.21.0...v0.22.0)

Updates `golang.org/x/net` from 0.22.0 to 0.24.0
- [Commits](https://github.com/golang/net/compare/v0.22.0...v0.24.0)

Updates `golang.org/x/sync` from 0.6.0 to 0.7.0
- [Commits](https://github.com/golang/sync/compare/v0.6.0...v0.7.0)

Updates `golang.org/x/sys` from 0.18.0 to 0.19.0
- [Commits](https://github.com/golang/sys/compare/v0.18.0...v0.19.0)

Updates `golang.org/x/term` from 0.18.0 to 0.19.0
- [Commits](https://github.com/golang/term/compare/v0.18.0...v0.19.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/net
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/sync
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/sys
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/term
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-23 13:50:53 -04:00
dependabot[bot]
cdcea00669 Bump github.com/miekg/dns from 1.1.58 to 1.1.59 (#1126)
Bumps [github.com/miekg/dns](https://github.com/miekg/dns) from 1.1.58 to 1.1.59.
- [Changelog](https://github.com/miekg/dns/blob/master/Makefile.release)
- [Commits](https://github.com/miekg/dns/compare/v1.1.58...v1.1.59)

---
updated-dependencies:
- dependency-name: github.com/miekg/dns
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-23 11:08:08 -04:00
dependabot[bot]
9bd92a7fc2 Bump golang.org/x/net from 0.22.0 to 0.23.0 (#1123)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.22.0 to 0.23.0.
- [Commits](https://github.com/golang/net/compare/v0.22.0...v0.23.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-23 11:06:15 -04:00
Nate Brown
a5a07cc760 Allow :: in lighthouse.dns.host config (#1115) 2024-04-11 21:44:36 -05:00
Nate Brown
c1711bc9c5 Remove tcp rtt tracking from the firewall (#1114) 2024-04-11 21:44:22 -05:00
Wade Simmons
7efa750aef avoid deadlock in lighthouse queryWorker (#1112)
* avoid deadlock in lighthouse queryWorker

If the lighthouse queryWorker tries to grab to call StartHandshake on
a lighthouse vpnIp, we can deadlock on the handshake_manager lock. This
change drops the handshake_manager lock before we send on the lighthouse
queryChan (which could block), and also avoids sending to the channel if
this is a lighthouse IP itself.

* need to hold lock during cacheCb
2024-04-11 17:00:01 -04:00
Nate Brown
a390125935 Support reloading preferred_ranges (#1043) 2024-04-03 22:14:51 -05:00
Nate Brown
bbb15f8cb1 Unsafe route reload (#1083) 2024-03-28 15:17:28 -05:00
John Maguire
8b68a08723 Fix "any" firewall rules for unsafe_routes (#1099) 2024-03-28 15:17:12 -05:00
dependabot[bot]
f8fb9759e9 Bump the golang-x-dependencies group with 1 update (#1094)
Bumps the golang-x-dependencies group with 1 update: [golang.org/x/net](https://github.com/golang/net).


Updates `golang.org/x/net` from 0.21.0 to 0.22.0
- [Commits](https://github.com/golang/net/compare/v0.21.0...v0.22.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-22 12:58:13 -04:00
dependabot[bot]
1f1d660200 Bump google.golang.org/protobuf from 1.32.0 to 1.33.0 (#1092)
Bumps google.golang.org/protobuf from 1.32.0 to 1.33.0.

---
updated-dependencies:
- dependency-name: google.golang.org/protobuf
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-18 11:12:13 -04:00
dependabot[bot]
279265058f Bump github.com/stretchr/testify from 1.8.4 to 1.9.0 (#1087)
Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.8.4 to 1.9.0.
- [Release notes](https://github.com/stretchr/testify/releases)
- [Commits](https://github.com/stretchr/testify/compare/v1.8.4...v1.9.0)

---
updated-dependencies:
- dependency-name: github.com/stretchr/testify
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-18 11:06:18 -04:00
dependabot[bot]
2a778de07e Bump github.com/flynn/noise from 1.0.1 to 1.1.0 (#1072)
Bumps [github.com/flynn/noise](https://github.com/flynn/noise) from 1.0.1 to 1.1.0.
- [Commits](https://github.com/flynn/noise/compare/v1.0.1...v1.1.0)

---
updated-dependencies:
- dependency-name: github.com/flynn/noise
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-18 10:47:53 -04:00
dependabot[bot]
2affd371e3 Bump the golang-x-dependencies group with 4 updates (#1085)
Bumps the golang-x-dependencies group with 4 updates: [golang.org/x/crypto](https://github.com/golang/crypto), [golang.org/x/net](https://github.com/golang/net), [golang.org/x/sys](https://github.com/golang/sys) and [golang.org/x/term](https://github.com/golang/term).


Updates `golang.org/x/crypto` from 0.18.0 to 0.21.0
- [Commits](https://github.com/golang/crypto/compare/v0.18.0...v0.21.0)

Updates `golang.org/x/net` from 0.20.0 to 0.21.0
- [Commits](https://github.com/golang/net/compare/v0.20.0...v0.21.0)

Updates `golang.org/x/sys` from 0.16.0 to 0.18.0
- [Commits](https://github.com/golang/sys/compare/v0.16.0...v0.18.0)

Updates `golang.org/x/term` from 0.16.0 to 0.18.0
- [Commits](https://github.com/golang/term/compare/v0.16.0...v0.18.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/net
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/sys
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/term
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-03-18 10:43:17 -04:00
Nate Brown
cc8b3cc961 Add config option for local_cidr control 2024-02-15 11:46:45 -06:00
Nate Brown
f346cf4109 At the end 2024-02-05 10:23:10 -06:00
Nate Brown
8f44f22c37 In the middle 2024-02-05 10:23:10 -06:00
John Maguire
8822f1366c Add link to logs guide in bug report template (#1065) 2024-02-01 12:40:23 -05:00
brad-defined
e3f5a129c1 Return full error context from ContextualError.Error() (#1069) 2024-01-31 15:31:46 -05:00
mrx
0f0534d739 Fix UDP listener on IPv4-only Linux (#787)
On some systems, IPv6 is disabled (for example, CIS benchmark recommends to disable it when not used), but currently all UDP connections are using AF_INET6 sockets.
When we are binding AF_INET6 socket to an address like ::ffff:1.2.3.4 (IPv4 addresses are parsed by net.ParseIP this way), we can't send or receive IPv6 packets anyway, so this will not break any scenarios.

---------

Co-authored-by: Wade Simmons <wsimmons@slack-corp.com>
2024-01-30 15:08:14 -05:00
dependabot[bot]
c5a403b7a8 Bump github.com/vishvananda/netlink (#1034)
Bumps [github.com/vishvananda/netlink](https://github.com/vishvananda/netlink) from 1.1.1-0.20211118161826-650dca95af54 to 1.2.1-beta.2.
- [Release notes](https://github.com/vishvananda/netlink/releases)
- [Commits](https://github.com/vishvananda/netlink/commits/v1.2.1-beta.2)

---
updated-dependencies:
- dependency-name: github.com/vishvananda/netlink
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-23 10:40:29 -05:00
dependabot[bot]
f23d328561 Bump the protobuf-dependencies group with 1 update (#1053)
Bumps the protobuf-dependencies group with 1 update: google.golang.org/protobuf.


Updates `google.golang.org/protobuf` from 1.31.0 to 1.32.0

---
updated-dependencies:
- dependency-name: google.golang.org/protobuf
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: protobuf-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-23 10:39:53 -05:00
dependabot[bot]
a977ee653d Bump github.com/miekg/dns from 1.1.57 to 1.1.58 (#1063)
Bumps [github.com/miekg/dns](https://github.com/miekg/dns) from 1.1.57 to 1.1.58.
- [Changelog](https://github.com/miekg/dns/blob/master/Makefile.release)
- [Commits](https://github.com/miekg/dns/compare/v1.1.57...v1.1.58)

---
updated-dependencies:
- dependency-name: github.com/miekg/dns
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-23 10:37:53 -05:00
Lingfeng Zhang
1f83d1758d Support inlined sshd host key (#1054) 2024-01-22 13:58:44 -05:00
dependabot[bot]
3210198276 Bump github.com/prometheus/client_golang from 1.17.0 to 1.18.0 (#1055)
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.17.0 to 1.18.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.17.0...v1.18.0)

---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-12 10:26:39 -05:00
dependabot[bot]
0cef634635 Bump github.com/miekg/dns from 1.1.56 to 1.1.57 (#1022)
Bumps [github.com/miekg/dns](https://github.com/miekg/dns) from 1.1.56 to 1.1.57.
- [Changelog](https://github.com/miekg/dns/blob/master/Makefile.release)
- [Commits](https://github.com/miekg/dns/compare/v1.1.56...v1.1.57)

---
updated-dependencies:
- dependency-name: github.com/miekg/dns
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-12 09:57:38 -05:00
dependabot[bot]
637dc18bf8 Bump the golang-x-dependencies group with 3 updates (#1059)
Bumps the golang-x-dependencies group with 3 updates: [golang.org/x/crypto](https://github.com/golang/crypto), [golang.org/x/net](https://github.com/golang/net) and [golang.org/x/sync](https://github.com/golang/sync).


Updates `golang.org/x/crypto` from 0.17.0 to 0.18.0
- [Commits](https://github.com/golang/crypto/compare/v0.17.0...v0.18.0)

Updates `golang.org/x/net` from 0.19.0 to 0.20.0
- [Commits](https://github.com/golang/net/compare/v0.19.0...v0.20.0)

Updates `golang.org/x/sync` from 0.5.0 to 0.6.0
- [Commits](https://github.com/golang/sync/compare/v0.5.0...v0.6.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/net
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/sync
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-01-12 09:55:41 -05:00
Wade Simmons
ea36949d8a v1.8.2 (#1058)
Update CHANGELOG for Nebula v1.8.2
2024-01-08 15:40:04 -05:00
Wade Simmons
0564d0a2cf when listen.port is zero, fix multiple routines (#1057)
This used to work correctly because when the multiple routines work was
first added in #382, but an important part to discover the listen port
before opening the other listeners on the same socket was lost in this
PR: #653.

This change should fix the regression and allow multiple routines to
work correctly when listen.port is set to `0`.

Thanks to @rawdigits for tracking down and discovering this regression.
2024-01-08 13:49:44 -05:00
nezu
b22ba6eb49 Update Arch Linux package link (#1024) 2023-12-27 10:38:24 -06:00
Wade Simmons
3a221812f6 test: build all non-main modules for mobile (#1036)
Ensure that we don't break the build for mobile by doing a `go build`
for all of the non-main modules in the repo. Should hopefully catch
issues like #1035 sooner.
2023-12-21 11:59:21 -05:00
dependabot[bot]
927ff4cc03 Bump github.com/flynn/noise from 1.0.0 to 1.0.1 (#1038)
Bumps [github.com/flynn/noise](https://github.com/flynn/noise) from 1.0.0 to 1.0.1.
- [Commits](https://github.com/flynn/noise/compare/v1.0.0...v1.0.1)

---
updated-dependencies:
- dependency-name: github.com/flynn/noise
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-21 11:57:53 -05:00
Wade Simmons
e5945a60aa v1.8.1 (#1049)
Update CHANGELOG for Nebula v1.8.1
2023-12-19 15:11:25 -05:00
Nate Brown
072edd56b3 Fix re-entrant GetOrHandshake issues (#1044) 2023-12-19 11:58:31 -06:00
dependabot[bot]
beb5f6bddc Bump golang.org/x/crypto from 0.16.0 to 0.17.0 (#1048)
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.16.0 to 0.17.0.
- [Commits](https://github.com/golang/crypto/compare/v0.16.0...v0.17.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-19 10:57:09 -05:00
dependabot[bot]
8be9792059 Bump actions/setup-go from 4 to 5 (#1039)
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 4 to 5.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/setup-go
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-13 22:45:09 -06:00
John Maguire
af2fc48378 Fix mobile builds (#1035) 2023-12-06 16:18:21 -05:00
Wade Simmons
1d2f95e718 v1.8.0 (#1017)
Update CHANGELOG for Nebula v1.8.0
2023-12-06 14:38:58 -05:00
Lars Lehtonen
3a8743d511 cmd/nebula-cert: fix clobbered error (#1032)
* cmd/nebula-cert: fix clobbered error

Signed-off-by: Lars Lehtonen <lars.lehtonen@gmail.com>

* apply suggestions from Nate

This makes it much clearer what is happening in the code

---------

Signed-off-by: Lars Lehtonen <lars.lehtonen@gmail.com>
Co-authored-by: Wade Simmons <wsimmons@slack-corp.com>
2023-12-06 13:20:49 -05:00
Dave Russell
0209402942 SIGHUP is only useful when config was loaded from a file (#1030)
Have (*config.C).CatchHUP() return early when there is no file
path available from which to reload.
This will allow wrapping service to manage their own signal
trapping (which is particularly important if they've used
config from a string.
2023-12-06 10:13:38 -05:00
dependabot[bot]
fb55f5b762 Bump the golang-x-dependencies group with 3 updates (#1028)
Bumps the golang-x-dependencies group with 3 updates: [golang.org/x/crypto](https://github.com/golang/crypto), [golang.org/x/net](https://github.com/golang/net) and [golang.org/x/sync](https://github.com/golang/sync).


Updates `golang.org/x/crypto` from 0.14.0 to 0.16.0
- [Commits](https://github.com/golang/crypto/compare/v0.14.0...v0.16.0)

Updates `golang.org/x/net` from 0.17.0 to 0.19.0
- [Commits](https://github.com/golang/net/compare/v0.17.0...v0.19.0)

Updates `golang.org/x/sync` from 0.3.0 to 0.5.0
- [Commits](https://github.com/golang/sync/compare/v0.3.0...v0.5.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/net
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/sync
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-04 11:12:52 -05:00
Ben Ritcey
01cddb8013 Added firewall.rules.hash metric (#1010)
* Added firewall.rules.hash metric

Added a FNV-1 hash of the firewall rules as a Prometheus value.

* Switch FNV has to int64, include both hashes in log messages

* Use a uint32 for the FNV hash

Let go-metrics cast the uint32 to a int64, so it won't be lossy
when it eventually emits a float64 Prometheus metric.
2023-11-28 11:56:47 -05:00
Tristan Rice
1083279a45 add gvisor based service library (#965)
* add service/ library
2023-11-21 11:50:18 -05:00
Wade Simmons
fe16ea566d firewall reject packets: cleanup error cases (#957) 2023-11-13 12:43:51 -06:00
Nate Brown
3356e03d85 Default pki.disconnect_invalid to true and make it reloadable (#859) 2023-11-13 12:39:38 -06:00
dependabot[bot]
f41db52560 Bump the golang-x-dependencies group with 1 update (#1006)
Bumps the golang-x-dependencies group with 1 update: [golang.org/x/sys](https://github.com/golang/sys).

- [Commits](https://github.com/golang/sys/compare/v0.13.0...v0.14.0)

---
updated-dependencies:
- dependency-name: golang.org/x/sys
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-11-13 07:58:45 -08:00
Nate Brown
5181cb0474 Use generics for CIDRTrees to avoid casting issues (#1004) 2023-11-02 17:05:08 -05:00
Nate Brown
a44e1b8b05 Clean up a hostinfo to reduce memory usage (#955) 2023-11-02 16:53:59 -05:00
guangwu
276978377a chore: remove refs to deprecated io/ioutil (#987)
Signed-off-by: guoguangwu <guoguangwu@magic-shield.com>
2023-10-31 10:35:13 -04:00
dependabot[bot]
777eb96aea Bump github.com/prometheus/client_golang from 1.16.0 to 1.17.0 (#984)
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.16.0 to 1.17.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.16.0...v1.17.0)

---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-31 10:33:04 -04:00
Wade Simmons
0912ef14f4 github actions smoke-test: run with data race detector (#988)
Run the github actions smoke tests with data race detector enabled, so
we can detect if a PR introduces a simple data race.
2023-10-31 10:32:39 -04:00
Lars Lehtonen
77a8ce1712 main: fix dropped error (#1002)
This isn't an actual issue because the current implementation of NewSSHServer never returns an error (https://github.com/slackhq/nebula/blob/v1.7.2/sshd/server.go#L56), but still good to fix so no surprises happen in the future.
2023-10-31 10:32:08 -04:00
John Maguire
87b628ba24 Fix truncated comment in config.yml (#999) 2023-10-27 08:39:34 -04:00
Nate Brown
50d6a1e8ca QueryServer needs to be done outside of the lock (#996) 2023-10-17 15:43:51 -05:00
dependabot[bot]
e78fe0b9ef Bump golang.org/x/net from 0.15.0 to 0.17.0 (#990)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.15.0 to 0.17.0.
- [Commits](https://github.com/golang/net/compare/v0.15.0...v0.17.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-16 13:28:59 -04:00
Nate Brown
5fccbb8676 Retry wintun creation (#985) 2023-10-16 10:06:43 -05:00
dependabot[bot]
c289c7a7ca Bump github.com/miekg/dns from 1.1.55 to 1.1.56 (#979)
Bumps [github.com/miekg/dns](https://github.com/miekg/dns) from 1.1.55 to 1.1.56.
- [Changelog](https://github.com/miekg/dns/blob/master/Makefile.release)
- [Commits](https://github.com/miekg/dns/compare/v1.1.55...v1.1.56)

---
updated-dependencies:
- dependency-name: github.com/miekg/dns
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-22 09:48:26 -04:00
dependabot[bot]
e3fbfbfd4d Bump golang.org/x/net from 0.14.0 to 0.15.0 (#977)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.14.0 to 0.15.0.
- [Commits](https://github.com/golang/net/compare/v0.14.0...v0.15.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-22 09:47:45 -04:00
dependabot[bot]
282ca4368e Bump golang.org/x/crypto from 0.12.0 to 0.13.0 (#976)
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.12.0 to 0.13.0.
- [Commits](https://github.com/golang/crypto/compare/v0.12.0...v0.13.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-22 09:47:00 -04:00
Wade Simmons
280fa026ea smoke-test: don't assume docker needs sudo (#958)
Let the host deal with this detail if necessary
2023-09-07 13:57:41 -04:00
Lars Lehtonen
dbdb48f182 cert: fix dropped errors (#961) 2023-09-07 13:54:01 -04:00
Nate Brown
f7e392995a Fix rebind to not put the socket in blocking mode (#972) 2023-09-07 11:56:09 -05:00
dependabot[bot]
d271df8da8 Bump golang.org/x/term from 0.11.0 to 0.12.0 (#967)
Bumps [golang.org/x/term](https://github.com/golang/term) from 0.11.0 to 0.12.0.
- [Commits](https://github.com/golang/term/compare/v0.11.0...v0.12.0)

---
updated-dependencies:
- dependency-name: golang.org/x/term
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-05 12:47:55 -04:00
dependabot[bot]
eea5e6a5df Bump actions/checkout from 3 to 4 (#969)
Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v3...v4)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-05 11:43:56 -04:00
dependabot[bot]
790268a176 Bump golang.org/x/sys from 0.11.0 to 0.12.0 (#968)
Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.11.0 to 0.12.0.
- [Commits](https://github.com/golang/sys/compare/v0.11.0...v0.12.0)

---
updated-dependencies:
- dependency-name: golang.org/x/sys
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-05 11:42:08 -04:00
brad-defined
06b480e177 Fix relay migration (#964)
* Fix for relay migration on rehandshaking issue. On rehandshake, the relay tunnel doesn't migrate to the new hostinfo object correctly, due to an incorrect Nebula IP sent in the CreateRelayRequest message.
* Add a test for this case

---------

Co-authored-by: Nate Brown <nbrown.us@gmail.com>
2023-09-05 09:29:27 -04:00
Nate Brown
076ebc6c6e Simplify getting a hostinfo or starting a handshake with one (#954) 2023-08-21 18:51:45 -05:00
Nate Brown
7edcf620c0 We only need the certificate in ConnectionState (#953) 2023-08-21 14:11:06 -05:00
Nate Brown
5a131b2975 Combine ca, cert, and key handling (#952) 2023-08-14 21:32:40 -05:00
Nate Brown
223cc6e660 Limit how often a busy tunnel can requery the lighthouse (#940)
Co-authored-by: Wade Simmons <wadey@slack-corp.com>
2023-08-08 13:26:41 -05:00
Wade Simmons
5671c6607c dependabot: group together common deps (#950)
Group together deps that are often updated together.

- https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#groups
2023-08-08 13:15:42 -04:00
dependabot[bot]
7ecafbe61d Bump golang.org/x/net from 0.13.0 to 0.14.0 (#947)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.13.0 to 0.14.0.
- [Commits](https://github.com/golang/net/compare/v0.13.0...v0.14.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-08 10:04:46 -05:00
dependabot[bot]
546eb3bfbc Bump golang.org/x/crypto from 0.11.0 to 0.12.0 (#949)
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.11.0 to 0.12.0.
- [Commits](https://github.com/golang/crypto/compare/v0.11.0...v0.12.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-07 21:28:06 -05:00
dependabot[bot]
7364d99e34 Bump golang.org/x/term from 0.10.0 to 0.11.0 (#946)
Bumps [golang.org/x/term](https://github.com/golang/term) from 0.10.0 to 0.11.0.
- [Commits](https://github.com/golang/term/compare/v0.10.0...v0.11.0)

---
updated-dependencies:
- dependency-name: golang.org/x/term
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-07 21:07:30 -05:00
dependabot[bot]
83b6dc7b16 Bump golang.org/x/net from 0.12.0 to 0.13.0 (#943)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.12.0 to 0.13.0.
- [Commits](https://github.com/golang/net/compare/v0.12.0...v0.13.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-02 14:28:32 -04:00
Wade Simmons
3d0da7c859 update mergo to 1.0.0 (#941)
The mergo package has moved to a vanity URL. This causes fun issues with
dependabot. Update to the new release:

- https://github.com/darccio/mergo/releases/tag/v1.0.0
- https://github.com/darccio/mergo/compare/v0.3.15...v1.0.0
2023-08-02 14:00:20 -04:00
Caleb Jasik
ed00f5d530 Remove unused config code (last edited 4yrs ago) (#938) 2023-07-31 15:59:20 -05:00
dependabot[bot]
38e56a4858 Bump golang.org/x/net from 0.9.0 to 0.12.0 (#931) 2023-07-27 15:43:16 -05:00
dependabot[bot]
fce93ccb54 Bump google.golang.org/protobuf from 1.30.0 to 1.31.0 (#930) 2023-07-27 15:42:33 -05:00
dependabot[bot]
0d715effbc Bump Apple-Actions/import-codesign-certs from 1 to 2 (#923) 2023-07-27 15:31:36 -05:00
dependabot[bot]
0c003b64f1 Bump golang.org/x/term from 0.8.0 to 0.10.0 (#928) 2023-07-27 14:38:36 -05:00
Nate Brown
14d0106716 Send the lh update worker into its own routine instead of taking over the reload routine (#935) 2023-07-27 14:38:10 -05:00
dependabot[bot]
959b015b3b Bump github.com/sirupsen/logrus from 1.9.0 to 1.9.3 (#933) 2023-07-27 14:36:36 -05:00
Nate Brown
0bffa76b5e Build for openbsd (#812) 2023-07-27 14:27:35 -05:00
c0repwn3r
03e70210a5 Add support for NetBSD (#916) 2023-07-27 13:44:47 -05:00
Nate Brown
9c6592b159 Guard e2e udp and tun channels when closed (#934) 2023-07-26 12:52:14 -05:00
dependabot[bot]
e5af94e27a Bump github.com/prometheus/client_golang from 1.15.1 to 1.16.0 (#927)
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.15.1 to 1.16.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.15.1...v1.16.0)

---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-24 13:56:09 -04:00
dependabot[bot]
96f51f78ea Bump golang.org/x/sys from 0.8.0 to 0.10.0 (#926)
Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.8.0 to 0.10.0.
- [Commits](https://github.com/golang/sys/compare/v0.8.0...v0.10.0)

---
updated-dependencies:
- dependency-name: golang.org/x/sys
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-24 13:53:39 -04:00
Nate Brown
a10baeee92 Pull hostmap and pending hostmap apart, remove unused functions (#843) 2023-07-24 12:37:52 -05:00
dependabot[bot]
52c9e360e7 Bump github.com/miekg/dns from 1.1.54 to 1.1.55 (#925)
Bumps [github.com/miekg/dns](https://github.com/miekg/dns) from 1.1.54 to 1.1.55.
- [Changelog](https://github.com/miekg/dns/blob/master/Makefile.release)
- [Commits](https://github.com/miekg/dns/compare/v1.1.54...v1.1.55)

---
updated-dependencies:
- dependency-name: github.com/miekg/dns
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-24 12:52:29 -04:00
dependabot[bot]
8caaff7109 Bump github.com/stretchr/testify from 1.8.2 to 1.8.4 (#924)
Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.8.2 to 1.8.4.
- [Release notes](https://github.com/stretchr/testify/releases)
- [Commits](https://github.com/stretchr/testify/compare/v1.8.2...v1.8.4)

---
updated-dependencies:
- dependency-name: github.com/stretchr/testify
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-24 12:51:31 -04:00
Nate Brown
1e3c155896 Attempt to notify systemd of service readiness on linux (#929) 2023-07-24 11:30:18 -05:00
Wade Simmons
f5db03c834 add dependabot config (#922)
This should give us PRs weekly with dependency updates, and also let us
manually check for updates when needed.

- https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
2023-07-21 17:21:58 -04:00
Nate Brown
c5ce945852 Update README to include a link to go install docs (#919) 2023-07-20 21:30:38 -05:00
John Maguire
7e380bde7e Document new DNS config options (#879) 2023-07-10 15:19:05 -04:00
Nate Brown
a3e59a38ef Use registered io on Windows when possible (#905) 2023-07-10 12:43:48 -05:00
John Maguire
8ba5d64dbc Add support for naming FreeBSD tun devices (#903) 2023-06-22 12:13:31 -04:00
Nate Brown
3bbf5f4e67 Use an interface for udp conns (#901) 2023-06-14 10:48:52 -05:00
Wade Simmons
928731acfe fix up the release workflow (#891)
actions/create-release is deprecated, just switch to using `gh` cli.
This is actually much easier anyways!
2023-06-14 11:45:01 -04:00
Nate Brown
57eb80e9fb v1.7.2 (#887)
Update CHANGELOG for Nebula v1.7.2
2023-06-01 11:05:07 -04:00
brad-defined
96f4dcaab8 Fix reconfig freeze attempting to send to an unbuffered, unread channel (#886)
* Fixes a reocnfig freeze where the reconfig attempts to send to an unbuffered channel with no readers.
Only create stop channel when a DNS goroutine is created, and only send when the channel exists.
Buffer to size 1 so that the stop message can be immediately sent even if the goroutine is busy doing DNS lookups.
2023-05-31 16:05:46 -04:00
Wade Simmons
6d8c5f437c GitHub actions update setup-go (#881)
This does caching for us, so we can remove our manual caching of modules
2023-05-23 13:24:33 -04:00
John Maguire
165b671e70 v1.7.1 (#878)
Update CHANGELOG for Nebula v1.7.1
2023-05-18 15:39:24 -04:00
brad-defined
6be0bad68a Fix static_host_map DNS lookup Linux issue - put v4 addr into v6 slice(#877) 2023-05-18 14:13:32 -04:00
162 changed files with 8854 additions and 5833 deletions

View File

@@ -14,7 +14,7 @@ body:
- type: input - type: input
id: version id: version
attributes: attributes:
label: What version of `nebula` are you using? label: What version of `nebula` are you using? (`nebula -version`)
placeholder: 0.0.0 placeholder: 0.0.0
validations: validations:
required: true required: true
@@ -41,10 +41,17 @@ body:
attributes: attributes:
label: Logs from affected hosts label: Logs from affected hosts
description: | description: |
Provide logs from all affected hosts during the time of the issue. Please provide logs from ALL affected hosts during the time of the issue. If you do not provide logs we will be unable to assist you!
[Learn how to find Nebula logs here.](https://nebula.defined.net/docs/guides/viewing-nebula-logs/)
Improve formatting by using <code>```</code> at the beginning and end of each log block. Improve formatting by using <code>```</code> at the beginning and end of each log block.
value: |
```
```
validations: validations:
required: false required: true
- type: textarea - type: textarea
id: configs id: configs
@@ -52,6 +59,11 @@ body:
label: Config files from affected hosts label: Config files from affected hosts
description: | description: |
Provide config files for all affected hosts. Provide config files for all affected hosts.
Improve formatting by using <code>```</code> at the beginning and end of each config file. Improve formatting by using <code>```</code> at the beginning and end of each config file.
value: |
```
```
validations: validations:
required: false required: true

22
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,22 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "weekly"
groups:
golang-x-dependencies:
patterns:
- "golang.org/x/*"
zx2c4-dependencies:
patterns:
- "golang.zx2c4.com/*"
protobuf-dependencies:
patterns:
- "github.com/golang/protobuf"
- "google.golang.org/protobuf"

View File

@@ -14,21 +14,12 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Set up Go 1.20 - uses: actions/checkout@v4
uses: actions/setup-go@v2
with:
go-version: "1.20"
id: go
- name: Check out code into the Go module directory - uses: actions/setup-go@v5
uses: actions/checkout@v2
- uses: actions/cache@v2
with: with:
path: ~/go/pkg/mod go-version: '1.22'
key: ${{ runner.os }}-gofmt1.20-${{ hashFiles('**/go.sum') }} check-latest: true
restore-keys: |
${{ runner.os }}-gofmt1.20-
- name: Install goimports - name: Install goimports
run: | run: |

View File

@@ -7,25 +7,24 @@ name: Create release and upload binaries
jobs: jobs:
build-linux: build-linux:
name: Build Linux All name: Build Linux/BSD All
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Set up Go 1.20 - uses: actions/checkout@v4
uses: actions/setup-go@v2
with:
go-version: "1.20"
- name: Checkout code - uses: actions/setup-go@v5
uses: actions/checkout@v2 with:
go-version: '1.22'
check-latest: true
- name: Build - name: Build
run: | run: |
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" release-linux release-freebsd make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" release-linux release-freebsd release-openbsd release-netbsd
mkdir release mkdir release
mv build/*.tar.gz release mv build/*.tar.gz release
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v4
with: with:
name: linux-latest name: linux-latest
path: release path: release
@@ -34,13 +33,12 @@ jobs:
name: Build Windows name: Build Windows
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- name: Set up Go 1.20 - uses: actions/checkout@v4
uses: actions/setup-go@v2
with:
go-version: "1.20"
- name: Checkout code - uses: actions/setup-go@v5
uses: actions/checkout@v2 with:
go-version: '1.22'
check-latest: true
- name: Build - name: Build
run: | run: |
@@ -57,7 +55,7 @@ jobs:
mv dist\windows\wintun build\dist\windows\ mv dist\windows\wintun build\dist\windows\
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v4
with: with:
name: windows-latest name: windows-latest
path: build path: build
@@ -66,19 +64,18 @@ jobs:
name: Build Universal Darwin name: Build Universal Darwin
env: env:
HAS_SIGNING_CREDS: ${{ secrets.AC_USERNAME != '' }} HAS_SIGNING_CREDS: ${{ secrets.AC_USERNAME != '' }}
runs-on: macos-11 runs-on: macos-latest
steps: steps:
- name: Set up Go 1.20 - uses: actions/checkout@v4
uses: actions/setup-go@v2
with:
go-version: "1.20"
- name: Checkout code - uses: actions/setup-go@v5
uses: actions/checkout@v2 with:
go-version: '1.22'
check-latest: true
- name: Import certificates - name: Import certificates
if: env.HAS_SIGNING_CREDS == 'true' if: env.HAS_SIGNING_CREDS == 'true'
uses: Apple-Actions/import-codesign-certs@v1 uses: Apple-Actions/import-codesign-certs@v3
with: with:
p12-file-base64: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_P12_BASE64 }} p12-file-base64: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_P12_BASE64 }}
p12-password: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_PASSWORD }} p12-password: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_PASSWORD }}
@@ -107,22 +104,72 @@ jobs:
fi fi
- name: Upload artifacts - name: Upload artifacts
uses: actions/upload-artifact@v2 uses: actions/upload-artifact@v4
with: with:
name: darwin-latest name: darwin-latest
path: ./release/* path: ./release/*
build-docker:
name: Create and Upload Docker Images
# Technically we only need build-linux to succeed, but if any platforms fail we'll
# want to investigate and restart the build
needs: [build-linux, build-darwin, build-windows]
runs-on: ubuntu-latest
env:
HAS_DOCKER_CREDS: ${{ vars.DOCKERHUB_USERNAME != '' && secrets.DOCKERHUB_TOKEN != '' }}
# XXX It's not possible to write a conditional here, so instead we do it on every step
#if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
steps:
# Be sure to checkout the code before downloading artifacts, or they will
# be overwritten
- name: Checkout code
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
uses: actions/checkout@v4
- name: Download artifacts
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
uses: actions/download-artifact@v4
with:
name: linux-latest
path: artifacts
- name: Login to Docker Hub
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
uses: docker/login-action@v3
with:
username: ${{ vars.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Set up Docker Buildx
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
uses: docker/setup-buildx-action@v3
- name: Build and push images
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
env:
DOCKER_IMAGE_REPO: ${{ vars.DOCKER_IMAGE_REPO || 'nebulaoss/nebula' }}
DOCKER_IMAGE_TAG: ${{ vars.DOCKER_IMAGE_TAG || 'latest' }}
run: |
mkdir -p build/linux-{amd64,arm64}
tar -zxvf artifacts/nebula-linux-amd64.tar.gz -C build/linux-amd64/
tar -zxvf artifacts/nebula-linux-arm64.tar.gz -C build/linux-arm64/
docker buildx build . --push -f docker/Dockerfile --platform linux/amd64,linux/arm64 --tag "${DOCKER_IMAGE_REPO}:${DOCKER_IMAGE_TAG}" --tag "${DOCKER_IMAGE_REPO}:${GITHUB_REF#refs/tags/v}"
release: release:
name: Create and Upload Release name: Create and Upload Release
needs: [build-linux, build-darwin, build-windows] needs: [build-linux, build-darwin, build-windows]
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v4
- name: Download artifacts - name: Download artifacts
uses: actions/download-artifact@v2 uses: actions/download-artifact@v4
with:
path: artifacts
- name: Zip Windows - name: Zip Windows
run: | run: |
cd windows-latest cd artifacts/windows-latest
cp windows-amd64/* . cp windows-amd64/* .
zip -r nebula-windows-amd64.zip nebula.exe nebula-cert.exe dist zip -r nebula-windows-amd64.zip nebula.exe nebula-cert.exe dist
cp windows-arm64/* . cp windows-arm64/* .
@@ -130,6 +177,7 @@ jobs:
- name: Create sha256sum - name: Create sha256sum
run: | run: |
cd artifacts
for dir in linux-latest darwin-latest windows-latest for dir in linux-latest darwin-latest windows-latest
do do
( (
@@ -159,195 +207,12 @@ jobs:
- name: Create Release - name: Create Release
id: create_release id: create_release
uses: actions/create-release@v1
env: env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with: run: |
tag_name: ${{ github.ref }} cd artifacts
release_name: Release ${{ github.ref }} gh release create \
draft: false --verify-tag \
prerelease: false --title "Release ${{ github.ref_name }}" \
"${{ github.ref_name }}" \
## SHASUM256.txt *-latest/*.zip *-latest/*.tar.gz
## Upload assets (I wish we could just upload the whole folder at once...
##
- name: Upload SHASUM256.txt
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./SHASUM256.txt
asset_name: SHASUM256.txt
asset_content_type: text/plain
- name: Upload darwin zip
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./darwin-latest/nebula-darwin.zip
asset_name: nebula-darwin.zip
asset_content_type: application/zip
- name: Upload windows-amd64
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./windows-latest/nebula-windows-amd64.zip
asset_name: nebula-windows-amd64.zip
asset_content_type: application/zip
- name: Upload windows-arm64
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./windows-latest/nebula-windows-arm64.zip
asset_name: nebula-windows-arm64.zip
asset_content_type: application/zip
- name: Upload linux-amd64
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-amd64.tar.gz
asset_name: nebula-linux-amd64.tar.gz
asset_content_type: application/gzip
- name: Upload linux-386
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-386.tar.gz
asset_name: nebula-linux-386.tar.gz
asset_content_type: application/gzip
- name: Upload linux-ppc64le
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-ppc64le.tar.gz
asset_name: nebula-linux-ppc64le.tar.gz
asset_content_type: application/gzip
- name: Upload linux-arm-5
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-arm-5.tar.gz
asset_name: nebula-linux-arm-5.tar.gz
asset_content_type: application/gzip
- name: Upload linux-arm-6
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-arm-6.tar.gz
asset_name: nebula-linux-arm-6.tar.gz
asset_content_type: application/gzip
- name: Upload linux-arm-7
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-arm-7.tar.gz
asset_name: nebula-linux-arm-7.tar.gz
asset_content_type: application/gzip
- name: Upload linux-arm64
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-arm64.tar.gz
asset_name: nebula-linux-arm64.tar.gz
asset_content_type: application/gzip
- name: Upload linux-mips
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-mips.tar.gz
asset_name: nebula-linux-mips.tar.gz
asset_content_type: application/gzip
- name: Upload linux-mipsle
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-mipsle.tar.gz
asset_name: nebula-linux-mipsle.tar.gz
asset_content_type: application/gzip
- name: Upload linux-mips64
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-mips64.tar.gz
asset_name: nebula-linux-mips64.tar.gz
asset_content_type: application/gzip
- name: Upload linux-mips64le
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-mips64le.tar.gz
asset_name: nebula-linux-mips64le.tar.gz
asset_content_type: application/gzip
- name: Upload linux-mips-softfloat
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-mips-softfloat.tar.gz
asset_name: nebula-linux-mips-softfloat.tar.gz
asset_content_type: application/gzip
- name: Upload linux-riscv64
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-riscv64.tar.gz
asset_name: nebula-linux-riscv64.tar.gz
asset_content_type: application/gzip
- name: Upload freebsd-amd64
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-freebsd-amd64.tar.gz
asset_name: nebula-freebsd-amd64.tar.gz
asset_content_type: application/gzip

48
.github/workflows/smoke-extra.yml vendored Normal file
View File

@@ -0,0 +1,48 @@
name: smoke-extra
on:
push:
branches:
- master
pull_request:
types: [opened, synchronize, labeled, reopened]
paths:
- '.github/workflows/smoke**'
- '**Makefile'
- '**.go'
- '**.proto'
- 'go.mod'
- 'go.sum'
jobs:
smoke-extra:
if: github.ref == 'refs/heads/master' || contains(github.event.pull_request.labels.*.name, 'smoke-test-extra')
name: Run extra smoke tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
check-latest: true
- name: install vagrant
run: sudo apt-get update && sudo apt-get install -y vagrant virtualbox
- name: freebsd-amd64
run: make smoke-vagrant/freebsd-amd64
- name: openbsd-amd64
run: make smoke-vagrant/openbsd-amd64
- name: netbsd-amd64
run: make smoke-vagrant/netbsd-amd64
- name: linux-386
run: make smoke-vagrant/linux-386
- name: linux-amd64-ipv6disable
run: make smoke-vagrant/linux-amd64-ipv6disable
timeout-minutes: 30

View File

@@ -18,24 +18,15 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Set up Go 1.20 - uses: actions/checkout@v4
uses: actions/setup-go@v2
with:
go-version: "1.20"
id: go
- name: Check out code into the Go module directory - uses: actions/setup-go@v5
uses: actions/checkout@v2
- uses: actions/cache@v2
with: with:
path: ~/go/pkg/mod go-version: '1.22'
key: ${{ runner.os }}-go1.20-${{ hashFiles('**/go.sum') }} check-latest: true
restore-keys: |
${{ runner.os }}-go1.20-
- name: build - name: build
run: make bin-docker run: make bin-docker CGO_ENABLED=1 BUILD_ARGS=-race
- name: setup docker image - name: setup docker image
working-directory: ./.github/workflows/smoke working-directory: ./.github/workflows/smoke

View File

@@ -41,4 +41,4 @@ EOF
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24" ../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
) )
sudo docker build -t nebula:smoke-relay . docker build -t nebula:smoke-relay .

View File

@@ -11,6 +11,11 @@ mkdir ./build
cp ../../../../build/linux-amd64/nebula . cp ../../../../build/linux-amd64/nebula .
cp ../../../../build/linux-amd64/nebula-cert . cp ../../../../build/linux-amd64/nebula-cert .
if [ "$1" ]
then
cp "../../../../build/$1/nebula" "$1-nebula"
fi
HOST="lighthouse1" \ HOST="lighthouse1" \
AM_LIGHTHOUSE=true \ AM_LIGHTHOUSE=true \
../genconfig.sh >lighthouse1.yml ../genconfig.sh >lighthouse1.yml
@@ -36,4 +41,4 @@ mkdir ./build
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24" ../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
) )
sudo docker build -t "nebula:${NAME:-smoke}" . docker build -t "nebula:${NAME:-smoke}" .

View File

@@ -47,7 +47,7 @@ listen:
port: ${LISTEN_PORT:-4242} port: ${LISTEN_PORT:-4242}
tun: tun:
dev: ${TUN_DEV:-nebula1} dev: ${TUN_DEV:-tun0}
firewall: firewall:
inbound_action: reject inbound_action: reject

View File

@@ -14,24 +14,24 @@ cleanup() {
set +e set +e
if [ "$(jobs -r)" ] if [ "$(jobs -r)" ]
then then
sudo docker kill lighthouse1 host2 host3 host4 docker kill lighthouse1 host2 host3 host4
fi fi
} }
trap cleanup EXIT trap cleanup EXIT
sudo docker run --name lighthouse1 --rm nebula:smoke-relay -config lighthouse1.yml -test docker run --name lighthouse1 --rm nebula:smoke-relay -config lighthouse1.yml -test
sudo docker run --name host2 --rm nebula:smoke-relay -config host2.yml -test docker run --name host2 --rm nebula:smoke-relay -config host2.yml -test
sudo docker run --name host3 --rm nebula:smoke-relay -config host3.yml -test docker run --name host3 --rm nebula:smoke-relay -config host3.yml -test
sudo docker run --name host4 --rm nebula:smoke-relay -config host4.yml -test docker run --name host4 --rm nebula:smoke-relay -config host4.yml -test
sudo docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' & docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
sleep 1 sleep 1
sudo docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' & docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
sleep 1 sleep 1
sudo docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' & docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
sleep 1 sleep 1
sudo docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' & docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
sleep 1 sleep 1
set +x set +x
@@ -39,44 +39,44 @@ echo
echo " *** Testing ping from lighthouse1" echo " *** Testing ping from lighthouse1"
echo echo
set -x set -x
sudo docker exec lighthouse1 ping -c1 192.168.100.2 docker exec lighthouse1 ping -c1 192.168.100.2
sudo docker exec lighthouse1 ping -c1 192.168.100.3 docker exec lighthouse1 ping -c1 192.168.100.3
sudo docker exec lighthouse1 ping -c1 192.168.100.4 docker exec lighthouse1 ping -c1 192.168.100.4
set +x set +x
echo echo
echo " *** Testing ping from host2" echo " *** Testing ping from host2"
echo echo
set -x set -x
sudo docker exec host2 ping -c1 192.168.100.1 docker exec host2 ping -c1 192.168.100.1
# Should fail because no relay configured in this direction # Should fail because no relay configured in this direction
! sudo docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1 ! docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
! sudo docker exec host2 ping -c1 192.168.100.4 -w5 || exit 1 ! docker exec host2 ping -c1 192.168.100.4 -w5 || exit 1
set +x set +x
echo echo
echo " *** Testing ping from host3" echo " *** Testing ping from host3"
echo echo
set -x set -x
sudo docker exec host3 ping -c1 192.168.100.1 docker exec host3 ping -c1 192.168.100.1
sudo docker exec host3 ping -c1 192.168.100.2 docker exec host3 ping -c1 192.168.100.2
sudo docker exec host3 ping -c1 192.168.100.4 docker exec host3 ping -c1 192.168.100.4
set +x set +x
echo echo
echo " *** Testing ping from host4" echo " *** Testing ping from host4"
echo echo
set -x set -x
sudo docker exec host4 ping -c1 192.168.100.1 docker exec host4 ping -c1 192.168.100.1
# Should fail because relays not allowed # Should fail because relays not allowed
! sudo docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1 ! docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
sudo docker exec host4 ping -c1 192.168.100.3 docker exec host4 ping -c1 192.168.100.3
sudo docker exec host4 sh -c 'kill 1' docker exec host4 sh -c 'kill 1'
sudo docker exec host3 sh -c 'kill 1' docker exec host3 sh -c 'kill 1'
sudo docker exec host2 sh -c 'kill 1' docker exec host2 sh -c 'kill 1'
sudo docker exec lighthouse1 sh -c 'kill 1' docker exec lighthouse1 sh -c 'kill 1'
sleep 1 sleep 5
if [ "$(jobs -r)" ] if [ "$(jobs -r)" ]
then then

105
.github/workflows/smoke/smoke-vagrant.sh vendored Executable file
View File

@@ -0,0 +1,105 @@
#!/bin/bash
set -e -x
set -o pipefail
export VAGRANT_CWD="$PWD/vagrant-$1"
mkdir -p logs
cleanup() {
echo
echo " *** cleanup"
echo
set +e
if [ "$(jobs -r)" ]
then
docker kill lighthouse1 host2
fi
vagrant destroy -f
}
trap cleanup EXIT
CONTAINER="nebula:${NAME:-smoke}"
docker run --name lighthouse1 --rm "$CONTAINER" -config lighthouse1.yml -test
docker run --name host2 --rm "$CONTAINER" -config host2.yml -test
vagrant up
vagrant ssh -c "cd /nebula && /nebula/$1-nebula -config host3.yml -test"
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
sleep 1
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
sleep 1
vagrant ssh -c "cd /nebula && sudo sh -c 'echo \$\$ >/nebula/pid && exec /nebula/$1-nebula -config host3.yml'" &
sleep 15
# grab tcpdump pcaps for debugging
docker exec lighthouse1 tcpdump -i nebula1 -q -w - -U 2>logs/lighthouse1.inside.log >logs/lighthouse1.inside.pcap &
docker exec lighthouse1 tcpdump -i eth0 -q -w - -U 2>logs/lighthouse1.outside.log >logs/lighthouse1.outside.pcap &
docker exec host2 tcpdump -i nebula1 -q -w - -U 2>logs/host2.inside.log >logs/host2.inside.pcap &
docker exec host2 tcpdump -i eth0 -q -w - -U 2>logs/host2.outside.log >logs/host2.outside.pcap &
# vagrant ssh -c "tcpdump -i nebula1 -q -w - -U" 2>logs/host3.inside.log >logs/host3.inside.pcap &
# vagrant ssh -c "tcpdump -i eth0 -q -w - -U" 2>logs/host3.outside.log >logs/host3.outside.pcap &
docker exec host2 ncat -nklv 0.0.0.0 2000 &
vagrant ssh -c "ncat -nklv 0.0.0.0 2000" &
#docker exec host2 ncat -e '/usr/bin/echo host2' -nkluv 0.0.0.0 3000 &
#vagrant ssh -c "ncat -e '/usr/bin/echo host3' -nkluv 0.0.0.0 3000" &
set +x
echo
echo " *** Testing ping from lighthouse1"
echo
set -x
docker exec lighthouse1 ping -c1 192.168.100.2
docker exec lighthouse1 ping -c1 192.168.100.3
set +x
echo
echo " *** Testing ping from host2"
echo
set -x
docker exec host2 ping -c1 192.168.100.1
# Should fail because not allowed by host3 inbound firewall
! docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
set +x
echo
echo " *** Testing ncat from host2"
echo
set -x
# Should fail because not allowed by host3 inbound firewall
#! docker exec host2 ncat -nzv -w5 192.168.100.3 2000 || exit 1
#! docker exec host2 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
set +x
echo
echo " *** Testing ping from host3"
echo
set -x
vagrant ssh -c "ping -c1 192.168.100.1"
vagrant ssh -c "ping -c1 192.168.100.2"
set +x
echo
echo " *** Testing ncat from host3"
echo
set -x
#vagrant ssh -c "ncat -nzv -w5 192.168.100.2 2000"
#vagrant ssh -c "ncat -nzuv -w5 192.168.100.2 3000" | grep -q host2
vagrant ssh -c "sudo xargs kill </nebula/pid"
docker exec host2 sh -c 'kill 1'
docker exec lighthouse1 sh -c 'kill 1'
sleep 1
if [ "$(jobs -r)" ]
then
echo "nebula still running after SIGTERM sent" >&2
exit 1
fi

View File

@@ -14,7 +14,7 @@ cleanup() {
set +e set +e
if [ "$(jobs -r)" ] if [ "$(jobs -r)" ]
then then
sudo docker kill lighthouse1 host2 host3 host4 docker kill lighthouse1 host2 host3 host4
fi fi
} }
@@ -22,51 +22,51 @@ trap cleanup EXIT
CONTAINER="nebula:${NAME:-smoke}" CONTAINER="nebula:${NAME:-smoke}"
sudo docker run --name lighthouse1 --rm "$CONTAINER" -config lighthouse1.yml -test docker run --name lighthouse1 --rm "$CONTAINER" -config lighthouse1.yml -test
sudo docker run --name host2 --rm "$CONTAINER" -config host2.yml -test docker run --name host2 --rm "$CONTAINER" -config host2.yml -test
sudo docker run --name host3 --rm "$CONTAINER" -config host3.yml -test docker run --name host3 --rm "$CONTAINER" -config host3.yml -test
sudo docker run --name host4 --rm "$CONTAINER" -config host4.yml -test docker run --name host4 --rm "$CONTAINER" -config host4.yml -test
sudo docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' & docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
sleep 1 sleep 1
sudo docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' & docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
sleep 1 sleep 1
sudo docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' & docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
sleep 1 sleep 1
sudo docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' & docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
sleep 1 sleep 1
# grab tcpdump pcaps for debugging # grab tcpdump pcaps for debugging
sudo docker exec lighthouse1 tcpdump -i nebula1 -q -w - -U 2>logs/lighthouse1.inside.log >logs/lighthouse1.inside.pcap & docker exec lighthouse1 tcpdump -i nebula1 -q -w - -U 2>logs/lighthouse1.inside.log >logs/lighthouse1.inside.pcap &
sudo docker exec lighthouse1 tcpdump -i eth0 -q -w - -U 2>logs/lighthouse1.outside.log >logs/lighthouse1.outside.pcap & docker exec lighthouse1 tcpdump -i eth0 -q -w - -U 2>logs/lighthouse1.outside.log >logs/lighthouse1.outside.pcap &
sudo docker exec host2 tcpdump -i nebula1 -q -w - -U 2>logs/host2.inside.log >logs/host2.inside.pcap & docker exec host2 tcpdump -i nebula1 -q -w - -U 2>logs/host2.inside.log >logs/host2.inside.pcap &
sudo docker exec host2 tcpdump -i eth0 -q -w - -U 2>logs/host2.outside.log >logs/host2.outside.pcap & docker exec host2 tcpdump -i eth0 -q -w - -U 2>logs/host2.outside.log >logs/host2.outside.pcap &
sudo docker exec host3 tcpdump -i nebula1 -q -w - -U 2>logs/host3.inside.log >logs/host3.inside.pcap & docker exec host3 tcpdump -i nebula1 -q -w - -U 2>logs/host3.inside.log >logs/host3.inside.pcap &
sudo docker exec host3 tcpdump -i eth0 -q -w - -U 2>logs/host3.outside.log >logs/host3.outside.pcap & docker exec host3 tcpdump -i eth0 -q -w - -U 2>logs/host3.outside.log >logs/host3.outside.pcap &
sudo docker exec host4 tcpdump -i nebula1 -q -w - -U 2>logs/host4.inside.log >logs/host4.inside.pcap & docker exec host4 tcpdump -i nebula1 -q -w - -U 2>logs/host4.inside.log >logs/host4.inside.pcap &
sudo docker exec host4 tcpdump -i eth0 -q -w - -U 2>logs/host4.outside.log >logs/host4.outside.pcap & docker exec host4 tcpdump -i eth0 -q -w - -U 2>logs/host4.outside.log >logs/host4.outside.pcap &
sudo docker exec host2 ncat -nklv 0.0.0.0 2000 & docker exec host2 ncat -nklv 0.0.0.0 2000 &
sudo docker exec host3 ncat -nklv 0.0.0.0 2000 & docker exec host3 ncat -nklv 0.0.0.0 2000 &
sudo docker exec host2 ncat -e '/usr/bin/echo host2' -nkluv 0.0.0.0 3000 & docker exec host2 ncat -e '/usr/bin/echo host2' -nkluv 0.0.0.0 3000 &
sudo docker exec host3 ncat -e '/usr/bin/echo host3' -nkluv 0.0.0.0 3000 & docker exec host3 ncat -e '/usr/bin/echo host3' -nkluv 0.0.0.0 3000 &
set +x set +x
echo echo
echo " *** Testing ping from lighthouse1" echo " *** Testing ping from lighthouse1"
echo echo
set -x set -x
sudo docker exec lighthouse1 ping -c1 192.168.100.2 docker exec lighthouse1 ping -c1 192.168.100.2
sudo docker exec lighthouse1 ping -c1 192.168.100.3 docker exec lighthouse1 ping -c1 192.168.100.3
set +x set +x
echo echo
echo " *** Testing ping from host2" echo " *** Testing ping from host2"
echo echo
set -x set -x
sudo docker exec host2 ping -c1 192.168.100.1 docker exec host2 ping -c1 192.168.100.1
# Should fail because not allowed by host3 inbound firewall # Should fail because not allowed by host3 inbound firewall
! sudo docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1 ! docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
set +x set +x
echo echo
@@ -74,34 +74,34 @@ echo " *** Testing ncat from host2"
echo echo
set -x set -x
# Should fail because not allowed by host3 inbound firewall # Should fail because not allowed by host3 inbound firewall
! sudo docker exec host2 ncat -nzv -w5 192.168.100.3 2000 || exit 1 ! docker exec host2 ncat -nzv -w5 192.168.100.3 2000 || exit 1
! sudo docker exec host2 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1 ! docker exec host2 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
set +x set +x
echo echo
echo " *** Testing ping from host3" echo " *** Testing ping from host3"
echo echo
set -x set -x
sudo docker exec host3 ping -c1 192.168.100.1 docker exec host3 ping -c1 192.168.100.1
sudo docker exec host3 ping -c1 192.168.100.2 docker exec host3 ping -c1 192.168.100.2
set +x set +x
echo echo
echo " *** Testing ncat from host3" echo " *** Testing ncat from host3"
echo echo
set -x set -x
sudo docker exec host3 ncat -nzv -w5 192.168.100.2 2000 docker exec host3 ncat -nzv -w5 192.168.100.2 2000
sudo docker exec host3 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2 docker exec host3 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2
set +x set +x
echo echo
echo " *** Testing ping from host4" echo " *** Testing ping from host4"
echo echo
set -x set -x
sudo docker exec host4 ping -c1 192.168.100.1 docker exec host4 ping -c1 192.168.100.1
# Should fail because not allowed by host4 outbound firewall # Should fail because not allowed by host4 outbound firewall
! sudo docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1 ! docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
! sudo docker exec host4 ping -c1 192.168.100.3 -w5 || exit 1 ! docker exec host4 ping -c1 192.168.100.3 -w5 || exit 1
set +x set +x
echo echo
@@ -109,10 +109,10 @@ echo " *** Testing ncat from host4"
echo echo
set -x set -x
# Should fail because not allowed by host4 outbound firewall # Should fail because not allowed by host4 outbound firewall
! sudo docker exec host4 ncat -nzv -w5 192.168.100.2 2000 || exit 1 ! docker exec host4 ncat -nzv -w5 192.168.100.2 2000 || exit 1
! sudo docker exec host4 ncat -nzv -w5 192.168.100.3 2000 || exit 1 ! docker exec host4 ncat -nzv -w5 192.168.100.3 2000 || exit 1
! sudo docker exec host4 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2 || exit 1 ! docker exec host4 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2 || exit 1
! sudo docker exec host4 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1 ! docker exec host4 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
set +x set +x
echo echo
@@ -120,16 +120,16 @@ echo " *** Testing conntrack"
echo echo
set -x set -x
# host2 can ping host3 now that host3 pinged it first # host2 can ping host3 now that host3 pinged it first
sudo docker exec host2 ping -c1 192.168.100.3 docker exec host2 ping -c1 192.168.100.3
# host4 can ping host2 once conntrack established # host4 can ping host2 once conntrack established
sudo docker exec host2 ping -c1 192.168.100.4 docker exec host2 ping -c1 192.168.100.4
sudo docker exec host4 ping -c1 192.168.100.2 docker exec host4 ping -c1 192.168.100.2
sudo docker exec host4 sh -c 'kill 1' docker exec host4 sh -c 'kill 1'
sudo docker exec host3 sh -c 'kill 1' docker exec host3 sh -c 'kill 1'
sudo docker exec host2 sh -c 'kill 1' docker exec host2 sh -c 'kill 1'
sudo docker exec lighthouse1 sh -c 'kill 1' docker exec lighthouse1 sh -c 'kill 1'
sleep 1 sleep 5
if [ "$(jobs -r)" ] if [ "$(jobs -r)" ]
then then

View File

@@ -0,0 +1,7 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
Vagrant.configure("2") do |config|
config.vm.box = "generic/freebsd14"
config.vm.synced_folder "../build", "/nebula", type: "rsync"
end

View File

@@ -0,0 +1,7 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
Vagrant.configure("2") do |config|
config.vm.box = "ubuntu/xenial32"
config.vm.synced_folder "../build", "/nebula"
end

View File

@@ -0,0 +1,16 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
Vagrant.configure("2") do |config|
config.vm.box = "ubuntu/jammy64"
config.vm.synced_folder "../build", "/nebula"
config.vm.provision :shell do |shell|
shell.inline = <<-EOF
sed -i 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="ipv6.disable=1"/' /etc/default/grub
update-grub
EOF
shell.privileged = true
shell.reboot = true
end
end

View File

@@ -0,0 +1,7 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
Vagrant.configure("2") do |config|
config.vm.box = "generic/netbsd9"
config.vm.synced_folder "../build", "/nebula", type: "rsync"
end

View File

@@ -0,0 +1,7 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
Vagrant.configure("2") do |config|
config.vm.box = "generic/openbsd7"
config.vm.synced_folder "../build", "/nebula", type: "rsync"
end

View File

@@ -18,21 +18,12 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Set up Go 1.20 - uses: actions/checkout@v4
uses: actions/setup-go@v2
with:
go-version: "1.20"
id: go
- name: Check out code into the Go module directory - uses: actions/setup-go@v5
uses: actions/checkout@v2
- uses: actions/cache@v2
with: with:
path: ~/go/pkg/mod go-version: '1.22'
key: ${{ runner.os }}-go1.20-${{ hashFiles('**/go.sum') }} check-latest: true
restore-keys: |
${{ runner.os }}-go1.20-
- name: Build - name: Build
run: make all run: make all
@@ -46,10 +37,13 @@ jobs:
- name: End 2 end - name: End 2 end
run: make e2evv run: make e2evv
- uses: actions/upload-artifact@v3 - name: Build test mobile
run: make build-test-mobile
- uses: actions/upload-artifact@v4
with: with:
name: e2e packet flow name: e2e packet flow linux-latest
path: e2e/mermaid/ path: e2e/mermaid/linux-latest
if-no-files-found: warn if-no-files-found: warn
test-linux-boringcrypto: test-linux-boringcrypto:
@@ -57,21 +51,12 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Set up Go 1.20 - uses: actions/checkout@v4
uses: actions/setup-go@v2
with:
go-version: "1.20"
id: go
- name: Check out code into the Go module directory - uses: actions/setup-go@v5
uses: actions/checkout@v2
- uses: actions/cache@v2
with: with:
path: ~/go/pkg/mod go-version: '1.22'
key: ${{ runner.os }}-go1.20-${{ hashFiles('**/go.sum') }} check-latest: true
restore-keys: |
${{ runner.os }}-go1.20-
- name: Build - name: Build
run: make bin-boringcrypto run: make bin-boringcrypto
@@ -87,24 +72,15 @@ jobs:
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
strategy: strategy:
matrix: matrix:
os: [windows-latest, macos-11] os: [windows-latest, macos-latest]
steps: steps:
- name: Set up Go 1.20 - uses: actions/checkout@v4
uses: actions/setup-go@v2
with:
go-version: "1.20"
id: go
- name: Check out code into the Go module directory - uses: actions/setup-go@v5
uses: actions/checkout@v2
- uses: actions/cache@v2
with: with:
path: ~/go/pkg/mod go-version: '1.22'
key: ${{ runner.os }}-go1.20-${{ hashFiles('**/go.sum') }} check-latest: true
restore-keys: |
${{ runner.os }}-go1.20-
- name: Build nebula - name: Build nebula
run: go build ./cmd/nebula run: go build ./cmd/nebula
@@ -121,8 +97,8 @@ jobs:
- name: End 2 end - name: End 2 end
run: make e2evv run: make e2evv
- uses: actions/upload-artifact@v3 - uses: actions/upload-artifact@v4
with: with:
name: e2e packet flow name: e2e packet flow ${{ matrix.os }}
path: e2e/mermaid/ path: e2e/mermaid/${{ matrix.os }}
if-no-files-found: warn if-no-files-found: warn

View File

@@ -7,6 +7,234 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased] ## [Unreleased]
### Changed
- Various dependency updates.
## [1.9.7] - 2025-10-10
### Security
- Fix an issue where Nebula could incorrectly accept and process a packet from an erroneous source IP when the sender's
certificate is configured with unsafe_routes (cert v1/v2) or multiple IPs (cert v2). (#1494)
### Changed
- Disable sending `recv_error` messages when a packet is received outside the allowable counter window. (#1459)
- Improve error messages and remove some unnecessary fatal conditions in the Windows and generic udp listener. (#1543)
## [1.9.6] - 2025-7-15
### Added
- Support dropping inactive tunnels. This is disabled by default in this release but can be enabled with `tunnels.drop_inactive`. See example config for more details. (#1413)
### Fixed
- Fix Darwin freeze due to presence of some Network Extensions (#1426)
- Ensure the same relay tunnel is always used when multiple relay tunnels are present (#1422)
- Fix Windows freeze due to ICMP error handling (#1412)
- Fix relay migration panic (#1403)
## [1.9.5] - 2024-12-05
### Added
- Gracefully ignore v2 certificates. (#1282)
### Fixed
- Fix relays that refuse to re-establish after one of the remote tunnel pairs breaks. (#1277)
## [1.9.4] - 2024-09-09
### Added
- Support UDP dialing with gVisor. (#1181)
### Changed
- Make some Nebula state programmatically available via control object. (#1188)
- Switch internal representation of IPs to netip, to prepare for IPv6 support
in the overlay. (#1173)
- Minor build and cleanup changes. (#1171, #1164, #1162)
- Various dependency updates. (#1195, #1190, #1174, #1168, #1167, #1161, #1147, #1146)
### Fixed
- Fix a bug on big endian hosts, like mips. (#1194)
- Fix a rare panic if a local index collision happens. (#1191)
- Fix integer wraparound in the calculation of handshake timeouts on 32-bit targets. (#1185)
## [1.9.3] - 2024-06-06
### Fixed
- Initialize messageCounter to 2 instead of verifying later. (#1156)
## [1.9.2] - 2024-06-03
### Fixed
- Ensure messageCounter is set before handshake is complete. (#1154)
## [1.9.1] - 2024-05-29
### Fixed
- Fixed a potential deadlock in GetOrHandshake. (#1151)
## [1.9.0] - 2024-05-07
### Deprecated
- This release adds a new setting `default_local_cidr_any` that defaults to
true to match previous behavior, but will default to false in the next
release (1.10). When set to false, `local_cidr` is matched correctly for
firewall rules on hosts acting as unsafe routers, and should be set for any
firewall rules you want to allow unsafe route hosts to access. See the issue
and example config for more details. (#1071, #1099)
### Added
- Nebula now has an official Docker image `nebulaoss/nebula` that is
distroless and contains just the `nebula` and `nebula-cert` binaries. You
can find it here: https://hub.docker.com/r/nebulaoss/nebula (#1037)
- Experimental binaries for `loong64` are now provided. (#1003)
- Added example service script for OpenRC. (#711)
- The SSH daemon now supports inlined host keys. (#1054)
- The SSH daemon now supports certificates with `sshd.trusted_cas`. (#1098)
### Changed
- Config setting `tun.unsafe_routes` is now reloadable. (#1083)
- Small documentation and internal improvements. (#1065, #1067, #1069, #1108,
#1109, #1111, #1135)
- Various dependency updates. (#1139, #1138, #1134, #1133, #1126, #1123, #1110,
#1094, #1092, #1087, #1086, #1085, #1072, #1063, #1059, #1055, #1053, #1047,
#1046, #1034, #1022)
### Removed
- Support for the deprecated `local_range` option has been removed. Please
change to `preferred_ranges` (which is also now reloadable). (#1043)
- We are now building with go1.22, which means that for Windows you need at
least Windows 10 or Windows Server 2016. This is because support for earlier
versions was removed in Go 1.21. See https://go.dev/doc/go1.21#windows (#981)
- Removed vagrant example, as it was unmaintained. (#1129)
- Removed Fedora and Arch nebula.service files, as they are maintained in the
upstream repos. (#1128, #1132)
- Remove the TCP round trip tracking metrics, as they never had correct data
and were an experiment to begin with. (#1114)
### Fixed
- Fixed a potential deadlock introduced in 1.8.1. (#1112)
- Fixed support for Linux when IPv6 has been disabled at the OS level. (#787)
- DNS will return NXDOMAIN now when there are no results. (#845)
- Allow `::` in `lighthouse.dns.host`. (#1115)
- Capitalization of `NotAfter` fixed in DNS TXT response. (#1127)
- Don't log invalid certificates. It is untrusted data and can cause a large
volume of logs. (#1116)
## [1.8.2] - 2024-01-08
### Fixed
- Fix multiple routines when listen.port is zero. This was a regression
introduced in v1.6.0. (#1057)
### Changed
- Small dependency update for Noise. (#1038)
## [1.8.1] - 2023-12-19
### Security
- Update `golang.org/x/crypto`, which includes a fix for CVE-2023-48795. (#1048)
### Fixed
- Fix a deadlock introduced in v1.8.0 that could occur during handshakes. (#1044)
- Fix mobile builds. (#1035)
## [1.8.0] - 2023-12-06
### Deprecated
- The next minor release of Nebula, 1.9.0, will require at least Windows 10 or
Windows Server 2016. This is because support for earlier versions was removed
in Go 1.21. See https://go.dev/doc/go1.21#windows
### Added
- Linux: Notify systemd of service readiness. This should resolve timing issues
with services that depend on Nebula being active. For an example of how to
enable this, see: `examples/service_scripts/nebula.service`. (#929)
- Windows: Use Registered IO (RIO) when possible. Testing on a Windows 11
machine shows ~50x improvement in throughput. (#905)
- NetBSD, OpenBSD: Added rudimentary support. (#916, #812)
- FreeBSD: Add support for naming tun devices. (#903)
### Changed
- `pki.disconnect_invalid` will now default to true. This means that once a
certificate expires, the tunnel will be disconnected. If you use SIGHUP to
reload certificates without restarting Nebula, you should ensure all of your
clients are on 1.7.0 or newer before you enable this feature. (#859)
- Limit how often a busy tunnel can requery the lighthouse. The new config
option `timers.requery_wait_duration` defaults to `60s`. (#940)
- The internal structures for hostmaps were refactored to reduce memory usage
and the potential for subtle bugs. (#843, #938, #953, #954, #955)
- Lots of dependency updates.
### Fixed
- Windows: Retry wintun device creation if it fails the first time. (#985)
- Fix issues with firewall reject packets that could cause panics. (#957)
- Fix relay migration during re-handshakes. (#964)
- Various other refactors and fixes. (#935, #952, #972, #961, #996, #1002,
#987, #1004, #1030, #1032, ...)
## [1.7.2] - 2023-06-01
### Fixed
- Fix a freeze during config reload if the `static_host_map` config was changed. (#886)
## [1.7.1] - 2023-05-18
### Fixed
- Fix IPv4 addresses returned by `static_host_map` DNS lookup queries being
treated as IPv6 addresses. (#877)
## [1.7.0] - 2023-05-17 ## [1.7.0] - 2023-05-17
### Added ### Added
@@ -475,7 +703,20 @@ created.)
- Initial public release. - Initial public release.
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.7.0...HEAD [Unreleased]: https://github.com/slackhq/nebula/compare/v1.9.7...HEAD
[1.9.7]: https://github.com/slackhq/nebula/releases/tag/v1.9.7
[1.9.6]: https://github.com/slackhq/nebula/releases/tag/v1.9.6
[1.9.5]: https://github.com/slackhq/nebula/releases/tag/v1.9.5
[1.9.4]: https://github.com/slackhq/nebula/releases/tag/v1.9.4
[1.9.3]: https://github.com/slackhq/nebula/releases/tag/v1.9.3
[1.9.2]: https://github.com/slackhq/nebula/releases/tag/v1.9.2
[1.9.1]: https://github.com/slackhq/nebula/releases/tag/v1.9.1
[1.9.0]: https://github.com/slackhq/nebula/releases/tag/v1.9.0
[1.8.2]: https://github.com/slackhq/nebula/releases/tag/v1.8.2
[1.8.1]: https://github.com/slackhq/nebula/releases/tag/v1.8.1
[1.8.0]: https://github.com/slackhq/nebula/releases/tag/v1.8.0
[1.7.2]: https://github.com/slackhq/nebula/releases/tag/v1.7.2
[1.7.1]: https://github.com/slackhq/nebula/releases/tag/v1.7.1
[1.7.0]: https://github.com/slackhq/nebula/releases/tag/v1.7.0 [1.7.0]: https://github.com/slackhq/nebula/releases/tag/v1.7.0
[1.6.1]: https://github.com/slackhq/nebula/releases/tag/v1.6.1 [1.6.1]: https://github.com/slackhq/nebula/releases/tag/v1.6.1
[1.6.0]: https://github.com/slackhq/nebula/releases/tag/v1.6.0 [1.6.0]: https://github.com/slackhq/nebula/releases/tag/v1.6.0

View File

@@ -33,6 +33,5 @@ l.WithError(err).
WithField("vpnIp", IntIp(hostinfo.hostId)). WithField("vpnIp", IntIp(hostinfo.hostId)).
WithField("udpAddr", addr). WithField("udpAddr", addr).
WithField("handshake", m{"stage": 1, "style": "ix"}). WithField("handshake", m{"stage": 1, "style": "ix"}).
WithField("cert", remoteCert).
Info("Invalid certificate from host") Info("Invalid certificate from host")
``` ```

View File

@@ -1,20 +1,14 @@
GOMINVERSION = 1.20
NEBULA_CMD_PATH = "./cmd/nebula" NEBULA_CMD_PATH = "./cmd/nebula"
GO111MODULE = on
export GO111MODULE
CGO_ENABLED = 0 CGO_ENABLED = 0
export CGO_ENABLED export CGO_ENABLED
# Set up OS specific bits # Set up OS specific bits
ifeq ($(OS),Windows_NT) ifeq ($(OS),Windows_NT)
#TODO: we should be able to ditch awk as well
GOVERSION := $(shell go version | awk "{print substr($$3, 3)}")
GOISMIN := $(shell IF "$(GOVERSION)" GEQ "$(GOMINVERSION)" ECHO 1)
NEBULA_CMD_SUFFIX = .exe NEBULA_CMD_SUFFIX = .exe
NULL_FILE = nul NULL_FILE = nul
# RIO on windows does pointer stuff that makes go vet angry
VET_FLAGS = -unsafeptr=false
else else
GOVERSION := $(shell go version | awk '{print substr($$3, 3)}')
GOISMIN := $(shell expr "$(GOVERSION)" ">=" "$(GOMINVERSION)")
NEBULA_CMD_SUFFIX = NEBULA_CMD_SUFFIX =
NULL_FILE = /dev/null NULL_FILE = /dev/null
endif endif
@@ -28,6 +22,9 @@ ifndef BUILD_NUMBER
endif endif
endif endif
DOCKER_IMAGE_REPO ?= nebulaoss/nebula
DOCKER_IMAGE_TAG ?= latest
LDFLAGS = -X main.Build=$(BUILD_NUMBER) LDFLAGS = -X main.Build=$(BUILD_NUMBER)
ALL_LINUX = linux-amd64 \ ALL_LINUX = linux-amd64 \
@@ -42,12 +39,24 @@ ALL_LINUX = linux-amd64 \
linux-mips64 \ linux-mips64 \
linux-mips64le \ linux-mips64le \
linux-mips-softfloat \ linux-mips-softfloat \
linux-riscv64 linux-riscv64 \
linux-loong64
ALL_FREEBSD = freebsd-amd64 \
freebsd-arm64
ALL_OPENBSD = openbsd-amd64 \
openbsd-arm64
ALL_NETBSD = netbsd-amd64 \
netbsd-arm64
ALL = $(ALL_LINUX) \ ALL = $(ALL_LINUX) \
$(ALL_FREEBSD) \
$(ALL_OPENBSD) \
$(ALL_NETBSD) \
darwin-amd64 \ darwin-amd64 \
darwin-arm64 \ darwin-arm64 \
freebsd-amd64 \
windows-amd64 \ windows-amd64 \
windows-arm64 windows-arm64
@@ -69,13 +78,21 @@ e2evvvv: e2ev
e2e-bench: TEST_FLAGS = -bench=. -benchmem -run=^$ e2e-bench: TEST_FLAGS = -bench=. -benchmem -run=^$
e2e-bench: e2e e2e-bench: e2e
DOCKER_BIN = build/linux-amd64/nebula build/linux-amd64/nebula-cert
all: $(ALL:%=build/%/nebula) $(ALL:%=build/%/nebula-cert) all: $(ALL:%=build/%/nebula) $(ALL:%=build/%/nebula-cert)
docker: docker/linux-$(shell go env GOARCH)
release: $(ALL:%=build/nebula-%.tar.gz) release: $(ALL:%=build/nebula-%.tar.gz)
release-linux: $(ALL_LINUX:%=build/nebula-%.tar.gz) release-linux: $(ALL_LINUX:%=build/nebula-%.tar.gz)
release-freebsd: build/nebula-freebsd-amd64.tar.gz release-freebsd: $(ALL_FREEBSD:%=build/nebula-%.tar.gz)
release-openbsd: $(ALL_OPENBSD:%=build/nebula-%.tar.gz)
release-netbsd: $(ALL_NETBSD:%=build/nebula-%.tar.gz)
release-boringcrypto: build/nebula-linux-$(shell go env GOARCH)-boringcrypto.tar.gz release-boringcrypto: build/nebula-linux-$(shell go env GOARCH)-boringcrypto.tar.gz
@@ -93,6 +110,9 @@ bin-darwin: build/darwin-amd64/nebula build/darwin-amd64/nebula-cert
bin-freebsd: build/freebsd-amd64/nebula build/freebsd-amd64/nebula-cert bin-freebsd: build/freebsd-amd64/nebula build/freebsd-amd64/nebula-cert
mv $? . mv $? .
bin-freebsd-arm64: build/freebsd-arm64/nebula build/freebsd-arm64/nebula-cert
mv $? .
bin-boringcrypto: build/linux-$(shell go env GOARCH)-boringcrypto/nebula build/linux-$(shell go env GOARCH)-boringcrypto/nebula-cert bin-boringcrypto: build/linux-$(shell go env GOARCH)-boringcrypto/nebula build/linux-$(shell go env GOARCH)-boringcrypto/nebula-cert
mv $? . mv $? .
@@ -113,6 +133,8 @@ build/linux-mips-softfloat/%: LDFLAGS += -s -w
# boringcrypto # boringcrypto
build/linux-amd64-boringcrypto/%: GOENV += GOEXPERIMENT=boringcrypto CGO_ENABLED=1 build/linux-amd64-boringcrypto/%: GOENV += GOEXPERIMENT=boringcrypto CGO_ENABLED=1
build/linux-arm64-boringcrypto/%: GOENV += GOEXPERIMENT=boringcrypto CGO_ENABLED=1 build/linux-arm64-boringcrypto/%: GOENV += GOEXPERIMENT=boringcrypto CGO_ENABLED=1
build/linux-amd64-boringcrypto/%: LDFLAGS += -checklinkname=0
build/linux-arm64-boringcrypto/%: LDFLAGS += -checklinkname=0
build/%/nebula: .FORCE build/%/nebula: .FORCE
GOOS=$(firstword $(subst -, , $*)) \ GOOS=$(firstword $(subst -, , $*)) \
@@ -136,19 +158,28 @@ build/nebula-%.tar.gz: build/%/nebula build/%/nebula-cert
build/nebula-%.zip: build/%/nebula.exe build/%/nebula-cert.exe build/nebula-%.zip: build/%/nebula.exe build/%/nebula-cert.exe
cd build/$* && zip ../nebula-$*.zip nebula.exe nebula-cert.exe cd build/$* && zip ../nebula-$*.zip nebula.exe nebula-cert.exe
docker/%: build/%/nebula build/%/nebula-cert
docker build . $(DOCKER_BUILD_ARGS) -f docker/Dockerfile --platform "$(subst -,/,$*)" --tag "${DOCKER_IMAGE_REPO}:${DOCKER_IMAGE_TAG}" --tag "${DOCKER_IMAGE_REPO}:$(BUILD_NUMBER)"
vet: vet:
go vet -v ./... go vet $(VET_FLAGS) -v ./...
test: test:
go test -v ./... go test -v ./...
test-boringcrypto: test-boringcrypto:
GOEXPERIMENT=boringcrypto CGO_ENABLED=1 go test -v ./... GOEXPERIMENT=boringcrypto CGO_ENABLED=1 go test -ldflags "-checklinkname=0" -v ./...
test-cov-html: test-cov-html:
go test -coverprofile=coverage.out go test -coverprofile=coverage.out
go tool cover -html=coverage.out go tool cover -html=coverage.out
build-test-mobile:
GOARCH=amd64 GOOS=ios go build $(shell go list ./... | grep -v '/cmd/\|/examples/')
GOARCH=arm64 GOOS=ios go build $(shell go list ./... | grep -v '/cmd/\|/examples/')
GOARCH=amd64 GOOS=android go build $(shell go list ./... | grep -v '/cmd/\|/examples/')
GOARCH=arm64 GOOS=android go build $(shell go list ./... | grep -v '/cmd/\|/examples/')
bench: bench:
go test -bench=. go test -bench=.
@@ -190,8 +221,13 @@ smoke-relay-docker: bin-docker
cd .github/workflows/smoke/ && ./smoke-relay.sh cd .github/workflows/smoke/ && ./smoke-relay.sh
smoke-docker-race: BUILD_ARGS = -race smoke-docker-race: BUILD_ARGS = -race
smoke-docker-race: CGO_ENABLED = 1
smoke-docker-race: smoke-docker smoke-docker-race: smoke-docker
smoke-vagrant/%: bin-docker build/%/nebula
cd .github/workflows/smoke/ && ./build.sh $*
cd .github/workflows/smoke/ && ./smoke-vagrant.sh $*
.FORCE: .FORCE:
.PHONY: e2e e2ev e2evv e2evvv e2evvvv test test-cov-html bench bench-cpu bench-cpu-long bin proto release service smoke-docker smoke-docker-race .PHONY: bench bench-cpu bench-cpu-long bin build-test-mobile e2e e2ev e2evv e2evvv e2evvvv proto release service smoke-docker smoke-docker-race test test-cov-html smoke-vagrant/%
.DEFAULT_GOAL := bin .DEFAULT_GOAL := bin

View File

@@ -27,20 +27,36 @@ Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for
#### Distribution Packages #### Distribution Packages
- [Arch Linux](https://archlinux.org/packages/community/x86_64/nebula/) - [Arch Linux](https://archlinux.org/packages/extra/x86_64/nebula/)
``` ```
$ sudo pacman -S nebula $ sudo pacman -S nebula
``` ```
- [Fedora Linux](https://src.fedoraproject.org/rpms/nebula) - [Fedora Linux](https://src.fedoraproject.org/rpms/nebula)
``` ```
$ sudo dnf install nebula $ sudo dnf install nebula
``` ```
- [Debian Linux](https://packages.debian.org/source/stable/nebula)
```
$ sudo apt install nebula
```
- [Alpine Linux](https://pkgs.alpinelinux.org/packages?name=nebula)
```
$ sudo apk add nebula
```
- [macOS Homebrew](https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/nebula.rb) - [macOS Homebrew](https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/nebula.rb)
``` ```
$ brew install nebula $ brew install nebula
``` ```
- [Docker](https://hub.docker.com/r/nebulaoss/nebula)
```
$ docker pull nebulaoss/nebula
```
#### Mobile #### Mobile
- [iOS](https://apps.apple.com/us/app/mobile-nebula/id1509587936?itsct=apps_box&amp;itscg=30200) - [iOS](https://apps.apple.com/us/app/mobile-nebula/id1509587936?itsct=apps_box&amp;itscg=30200)
@@ -108,7 +124,7 @@ For each host, copy the nebula binary to the host, along with `config.yml` from
## Building Nebula from source ## Building Nebula from source
Download go and clone this repo. Change to the nebula directory. Make sure you have [go](https://go.dev/doc/install) installed and clone this repo. Change to the nebula directory.
To build nebula for all platforms: To build nebula for all platforms:
`make all` `make all`

View File

@@ -2,17 +2,16 @@ package nebula
import ( import (
"fmt" "fmt"
"net" "net/netip"
"regexp" "regexp"
"github.com/slackhq/nebula/cidr" "github.com/gaissmai/bart"
"github.com/slackhq/nebula/config" "github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/iputil"
) )
type AllowList struct { type AllowList struct {
// The values of this cidrTree are `bool`, signifying allow/deny // The values of this cidrTree are `bool`, signifying allow/deny
cidrTree *cidr.Tree6 cidrTree *bart.Table[bool]
} }
type RemoteAllowList struct { type RemoteAllowList struct {
@@ -20,7 +19,7 @@ type RemoteAllowList struct {
// Inside Range Specific, keys of this tree are inside CIDRs and values // Inside Range Specific, keys of this tree are inside CIDRs and values
// are *AllowList // are *AllowList
insideAllowLists *cidr.Tree6 insideAllowLists *bart.Table[*AllowList]
} }
type LocalAllowList struct { type LocalAllowList struct {
@@ -88,7 +87,7 @@ func newAllowList(k string, raw interface{}, handleKey func(key string, value in
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, raw) return nil, fmt.Errorf("config `%s` has invalid type: %T", k, raw)
} }
tree := cidr.NewTree6() tree := new(bart.Table[bool])
// Keep track of the rules we have added for both ipv4 and ipv6 // Keep track of the rules we have added for both ipv4 and ipv6
type allowListRules struct { type allowListRules struct {
@@ -122,18 +121,20 @@ func newAllowList(k string, raw interface{}, handleKey func(key string, value in
return nil, fmt.Errorf("config `%s` has invalid value (type %T): %v", k, rawValue, rawValue) return nil, fmt.Errorf("config `%s` has invalid value (type %T): %v", k, rawValue, rawValue)
} }
_, ipNet, err := net.ParseCIDR(rawCIDR) ipNet, err := netip.ParsePrefix(rawCIDR)
if err != nil { if err != nil {
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR) return nil, fmt.Errorf("config `%s` has invalid CIDR: %s. %w", k, rawCIDR, err)
} }
// TODO: should we error on duplicate CIDRs in the config? ipNet = netip.PrefixFrom(ipNet.Addr().Unmap(), ipNet.Bits())
tree.AddCIDR(ipNet, value)
maskBits, maskSize := ipNet.Mask.Size() // TODO: should we error on duplicate CIDRs in the config?
tree.Insert(ipNet, value)
maskBits := ipNet.Bits()
var rules *allowListRules var rules *allowListRules
if maskSize == 32 { if ipNet.Addr().Is4() {
rules = &rules4 rules = &rules4
} else { } else {
rules = &rules6 rules = &rules6
@@ -156,8 +157,7 @@ func newAllowList(k string, raw interface{}, handleKey func(key string, value in
if !rules4.defaultSet { if !rules4.defaultSet {
if rules4.allValuesMatch { if rules4.allValuesMatch {
_, zeroCIDR, _ := net.ParseCIDR("0.0.0.0/0") tree.Insert(netip.PrefixFrom(netip.IPv4Unspecified(), 0), !rules4.allValues)
tree.AddCIDR(zeroCIDR, !rules4.allValues)
} else { } else {
return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for 0.0.0.0/0", k) return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for 0.0.0.0/0", k)
} }
@@ -165,8 +165,7 @@ func newAllowList(k string, raw interface{}, handleKey func(key string, value in
if !rules6.defaultSet { if !rules6.defaultSet {
if rules6.allValuesMatch { if rules6.allValuesMatch {
_, zeroCIDR, _ := net.ParseCIDR("::/0") tree.Insert(netip.PrefixFrom(netip.IPv6Unspecified(), 0), !rules6.allValues)
tree.AddCIDR(zeroCIDR, !rules6.allValues)
} else { } else {
return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for ::/0", k) return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for ::/0", k)
} }
@@ -218,13 +217,13 @@ func getAllowListInterfaces(k string, v interface{}) ([]AllowListNameRule, error
return nameRules, nil return nameRules, nil
} }
func getRemoteAllowRanges(c *config.C, k string) (*cidr.Tree6, error) { func getRemoteAllowRanges(c *config.C, k string) (*bart.Table[*AllowList], error) {
value := c.Get(k) value := c.Get(k)
if value == nil { if value == nil {
return nil, nil return nil, nil
} }
remoteAllowRanges := cidr.NewTree6() remoteAllowRanges := new(bart.Table[*AllowList])
rawMap, ok := value.(map[interface{}]interface{}) rawMap, ok := value.(map[interface{}]interface{})
if !ok { if !ok {
@@ -241,60 +240,27 @@ func getRemoteAllowRanges(c *config.C, k string) (*cidr.Tree6, error) {
return nil, err return nil, err
} }
_, ipNet, err := net.ParseCIDR(rawCIDR) ipNet, err := netip.ParsePrefix(rawCIDR)
if err != nil { if err != nil {
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR) return nil, fmt.Errorf("config `%s` has invalid CIDR: %s. %w", k, rawCIDR, err)
} }
remoteAllowRanges.AddCIDR(ipNet, allowList) remoteAllowRanges.Insert(netip.PrefixFrom(ipNet.Addr().Unmap(), ipNet.Bits()), allowList)
} }
return remoteAllowRanges, nil return remoteAllowRanges, nil
} }
func (al *AllowList) Allow(ip net.IP) bool { func (al *AllowList) Allow(ip netip.Addr) bool {
if al == nil { if al == nil {
return true return true
} }
result := al.cidrTree.MostSpecificContains(ip) result, _ := al.cidrTree.Lookup(ip)
switch v := result.(type) { return result
case bool:
return v
default:
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
}
} }
func (al *AllowList) AllowIpV4(ip iputil.VpnIp) bool { func (al *LocalAllowList) Allow(ip netip.Addr) bool {
if al == nil {
return true
}
result := al.cidrTree.MostSpecificContainsIpV4(ip)
switch v := result.(type) {
case bool:
return v
default:
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
}
}
func (al *AllowList) AllowIpV6(hi, lo uint64) bool {
if al == nil {
return true
}
result := al.cidrTree.MostSpecificContainsIpV6(hi, lo)
switch v := result.(type) {
case bool:
return v
default:
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
}
}
func (al *LocalAllowList) Allow(ip net.IP) bool {
if al == nil { if al == nil {
return true return true
} }
@@ -316,45 +282,25 @@ func (al *LocalAllowList) AllowName(name string) bool {
return !al.nameRules[0].Allow return !al.nameRules[0].Allow
} }
func (al *RemoteAllowList) AllowUnknownVpnIp(ip net.IP) bool { func (al *RemoteAllowList) AllowUnknownVpnIp(ip netip.Addr) bool {
if al == nil { if al == nil {
return true return true
} }
return al.AllowList.Allow(ip) return al.AllowList.Allow(ip)
} }
func (al *RemoteAllowList) Allow(vpnIp iputil.VpnIp, ip net.IP) bool { func (al *RemoteAllowList) Allow(vpnIp netip.Addr, ip netip.Addr) bool {
if !al.getInsideAllowList(vpnIp).Allow(ip) { if !al.getInsideAllowList(vpnIp).Allow(ip) {
return false return false
} }
return al.AllowList.Allow(ip) return al.AllowList.Allow(ip)
} }
func (al *RemoteAllowList) AllowIpV4(vpnIp iputil.VpnIp, ip iputil.VpnIp) bool { func (al *RemoteAllowList) getInsideAllowList(vpnIp netip.Addr) *AllowList {
if al == nil {
return true
}
if !al.getInsideAllowList(vpnIp).AllowIpV4(ip) {
return false
}
return al.AllowList.AllowIpV4(ip)
}
func (al *RemoteAllowList) AllowIpV6(vpnIp iputil.VpnIp, hi, lo uint64) bool {
if al == nil {
return true
}
if !al.getInsideAllowList(vpnIp).AllowIpV6(hi, lo) {
return false
}
return al.AllowList.AllowIpV6(hi, lo)
}
func (al *RemoteAllowList) getInsideAllowList(vpnIp iputil.VpnIp) *AllowList {
if al.insideAllowLists != nil { if al.insideAllowLists != nil {
inside := al.insideAllowLists.MostSpecificContainsIpV4(vpnIp) inside, ok := al.insideAllowLists.Lookup(vpnIp)
if inside != nil { if ok {
return inside.(*AllowList) return inside
} }
} }
return nil return nil

View File

@@ -1,11 +1,11 @@
package nebula package nebula
import ( import (
"net" "net/netip"
"regexp" "regexp"
"testing" "testing"
"github.com/slackhq/nebula/cidr" "github.com/gaissmai/bart"
"github.com/slackhq/nebula/config" "github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/test" "github.com/slackhq/nebula/test"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@@ -18,7 +18,7 @@ func TestNewAllowListFromConfig(t *testing.T) {
"192.168.0.0": true, "192.168.0.0": true,
} }
r, err := newAllowListFromConfig(c, "allowlist", nil) r, err := newAllowListFromConfig(c, "allowlist", nil)
assert.EqualError(t, err, "config `allowlist` has invalid CIDR: 192.168.0.0") assert.EqualError(t, err, "config `allowlist` has invalid CIDR: 192.168.0.0. netip.ParsePrefix(\"192.168.0.0\"): no '/'")
assert.Nil(t, r) assert.Nil(t, r)
c.Settings["allowlist"] = map[interface{}]interface{}{ c.Settings["allowlist"] = map[interface{}]interface{}{
@@ -98,26 +98,26 @@ func TestNewAllowListFromConfig(t *testing.T) {
} }
func TestAllowList_Allow(t *testing.T) { func TestAllowList_Allow(t *testing.T) {
assert.Equal(t, true, ((*AllowList)(nil)).Allow(net.ParseIP("1.1.1.1"))) assert.Equal(t, true, ((*AllowList)(nil)).Allow(netip.MustParseAddr("1.1.1.1")))
tree := cidr.NewTree6() tree := new(bart.Table[bool])
tree.AddCIDR(cidr.Parse("0.0.0.0/0"), true) tree.Insert(netip.MustParsePrefix("0.0.0.0/0"), true)
tree.AddCIDR(cidr.Parse("10.0.0.0/8"), false) tree.Insert(netip.MustParsePrefix("10.0.0.0/8"), false)
tree.AddCIDR(cidr.Parse("10.42.42.42/32"), true) tree.Insert(netip.MustParsePrefix("10.42.42.42/32"), true)
tree.AddCIDR(cidr.Parse("10.42.0.0/16"), true) tree.Insert(netip.MustParsePrefix("10.42.0.0/16"), true)
tree.AddCIDR(cidr.Parse("10.42.42.0/24"), true) tree.Insert(netip.MustParsePrefix("10.42.42.0/24"), true)
tree.AddCIDR(cidr.Parse("10.42.42.0/24"), false) tree.Insert(netip.MustParsePrefix("10.42.42.0/24"), false)
tree.AddCIDR(cidr.Parse("::1/128"), true) tree.Insert(netip.MustParsePrefix("::1/128"), true)
tree.AddCIDR(cidr.Parse("::2/128"), false) tree.Insert(netip.MustParsePrefix("::2/128"), false)
al := &AllowList{cidrTree: tree} al := &AllowList{cidrTree: tree}
assert.Equal(t, true, al.Allow(net.ParseIP("1.1.1.1"))) assert.Equal(t, true, al.Allow(netip.MustParseAddr("1.1.1.1")))
assert.Equal(t, false, al.Allow(net.ParseIP("10.0.0.4"))) assert.Equal(t, false, al.Allow(netip.MustParseAddr("10.0.0.4")))
assert.Equal(t, true, al.Allow(net.ParseIP("10.42.42.42"))) assert.Equal(t, true, al.Allow(netip.MustParseAddr("10.42.42.42")))
assert.Equal(t, false, al.Allow(net.ParseIP("10.42.42.41"))) assert.Equal(t, false, al.Allow(netip.MustParseAddr("10.42.42.41")))
assert.Equal(t, true, al.Allow(net.ParseIP("10.42.0.1"))) assert.Equal(t, true, al.Allow(netip.MustParseAddr("10.42.0.1")))
assert.Equal(t, true, al.Allow(net.ParseIP("::1"))) assert.Equal(t, true, al.Allow(netip.MustParseAddr("::1")))
assert.Equal(t, false, al.Allow(net.ParseIP("::2"))) assert.Equal(t, false, al.Allow(netip.MustParseAddr("::2")))
} }
func TestLocalAllowList_AllowName(t *testing.T) { func TestLocalAllowList_AllowName(t *testing.T) {

View File

@@ -1,41 +1,36 @@
package nebula package nebula
import ( import (
"encoding/binary"
"fmt" "fmt"
"math" "math"
"net" "net"
"net/netip"
"strconv" "strconv"
"github.com/slackhq/nebula/cidr" "github.com/gaissmai/bart"
"github.com/slackhq/nebula/config" "github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/iputil"
) )
// This allows us to "guess" what the remote might be for a host while we wait // This allows us to "guess" what the remote might be for a host while we wait
// for the lighthouse response. See "lighthouse.calculated_remotes" in the // for the lighthouse response. See "lighthouse.calculated_remotes" in the
// example config file. // example config file.
type calculatedRemote struct { type calculatedRemote struct {
ipNet net.IPNet ipNet netip.Prefix
maskIP iputil.VpnIp mask netip.Prefix
mask iputil.VpnIp port uint32
port uint32
} }
func newCalculatedRemote(ipNet *net.IPNet, port int) (*calculatedRemote, error) { func newCalculatedRemote(maskCidr netip.Prefix, port int) (*calculatedRemote, error) {
// Ensure this is an IPv4 mask that we expect masked := maskCidr.Masked()
ones, bits := ipNet.Mask.Size()
if ones == 0 || bits != 32 {
return nil, fmt.Errorf("invalid mask: %v", ipNet)
}
if port < 0 || port > math.MaxUint16 { if port < 0 || port > math.MaxUint16 {
return nil, fmt.Errorf("invalid port: %d", port) return nil, fmt.Errorf("invalid port: %d", port)
} }
return &calculatedRemote{ return &calculatedRemote{
ipNet: *ipNet, ipNet: maskCidr,
maskIP: iputil.Ip2VpnIp(ipNet.IP), mask: masked,
mask: iputil.Ip2VpnIp(ipNet.Mask), port: uint32(port),
port: uint32(port),
}, nil }, nil
} }
@@ -43,21 +38,41 @@ func (c *calculatedRemote) String() string {
return fmt.Sprintf("CalculatedRemote(mask=%v port=%d)", c.ipNet, c.port) return fmt.Sprintf("CalculatedRemote(mask=%v port=%d)", c.ipNet, c.port)
} }
func (c *calculatedRemote) Apply(ip iputil.VpnIp) *Ip4AndPort { func (c *calculatedRemote) Apply(ip netip.Addr) *Ip4AndPort {
// Combine the masked bytes of the "mask" IP with the unmasked bytes // Combine the masked bytes of the "mask" IP with the unmasked bytes
// of the overlay IP // of the overlay IP
masked := (c.maskIP & c.mask) | (ip & ^c.mask) if c.ipNet.Addr().Is4() {
return c.apply4(ip)
return &Ip4AndPort{Ip: uint32(masked), Port: c.port} }
return c.apply6(ip)
} }
func NewCalculatedRemotesFromConfig(c *config.C, k string) (*cidr.Tree4, error) { func (c *calculatedRemote) apply4(ip netip.Addr) *Ip4AndPort {
//TODO: IPV6-WORK this can be less crappy
maskb := net.CIDRMask(c.mask.Bits(), c.mask.Addr().BitLen())
mask := binary.BigEndian.Uint32(maskb[:])
b := c.mask.Addr().As4()
maskIp := binary.BigEndian.Uint32(b[:])
b = ip.As4()
intIp := binary.BigEndian.Uint32(b[:])
return &Ip4AndPort{(maskIp & mask) | (intIp & ^mask), c.port}
}
func (c *calculatedRemote) apply6(ip netip.Addr) *Ip4AndPort {
//TODO: IPV6-WORK
panic("Can not calculate ipv6 remote addresses")
}
func NewCalculatedRemotesFromConfig(c *config.C, k string) (*bart.Table[[]*calculatedRemote], error) {
value := c.Get(k) value := c.Get(k)
if value == nil { if value == nil {
return nil, nil return nil, nil
} }
calculatedRemotes := cidr.NewTree4() calculatedRemotes := new(bart.Table[[]*calculatedRemote])
rawMap, ok := value.(map[any]any) rawMap, ok := value.(map[any]any)
if !ok { if !ok {
@@ -69,17 +84,18 @@ func NewCalculatedRemotesFromConfig(c *config.C, k string) (*cidr.Tree4, error)
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey) return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
} }
_, ipNet, err := net.ParseCIDR(rawCIDR) cidr, err := netip.ParsePrefix(rawCIDR)
if err != nil { if err != nil {
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR) return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
} }
//TODO: IPV6-WORK this does not verify that rawValue contains the same bits as cidr here
entry, err := newCalculatedRemotesListFromConfig(rawValue) entry, err := newCalculatedRemotesListFromConfig(rawValue)
if err != nil { if err != nil {
return nil, fmt.Errorf("config '%s.%s': %w", k, rawCIDR, err) return nil, fmt.Errorf("config '%s.%s': %w", k, rawCIDR, err)
} }
calculatedRemotes.AddCIDR(ipNet, entry) calculatedRemotes.Insert(cidr, entry)
} }
return calculatedRemotes, nil return calculatedRemotes, nil
@@ -117,7 +133,7 @@ func newCalculatedRemotesEntryFromConfig(raw any) (*calculatedRemote, error) {
if !ok { if !ok {
return nil, fmt.Errorf("invalid mask (type %T): %v", rawValue, rawValue) return nil, fmt.Errorf("invalid mask (type %T): %v", rawValue, rawValue)
} }
_, ipNet, err := net.ParseCIDR(rawMask) maskCidr, err := netip.ParsePrefix(rawMask)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid mask: %s", rawMask) return nil, fmt.Errorf("invalid mask: %s", rawMask)
} }
@@ -139,5 +155,5 @@ func newCalculatedRemotesEntryFromConfig(raw any) (*calculatedRemote, error) {
return nil, fmt.Errorf("invalid port (type %T): %v", rawValue, rawValue) return nil, fmt.Errorf("invalid port (type %T): %v", rawValue, rawValue)
} }
return newCalculatedRemote(ipNet, port) return newCalculatedRemote(maskCidr, port)
} }

View File

@@ -1,27 +1,25 @@
package nebula package nebula
import ( import (
"net" "net/netip"
"testing" "testing"
"github.com/slackhq/nebula/iputil"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestCalculatedRemoteApply(t *testing.T) { func TestCalculatedRemoteApply(t *testing.T) {
_, ipNet, err := net.ParseCIDR("192.168.1.0/24") ipNet, err := netip.ParsePrefix("192.168.1.0/24")
require.NoError(t, err) require.NoError(t, err)
c, err := newCalculatedRemote(ipNet, 4242) c, err := newCalculatedRemote(ipNet, 4242)
require.NoError(t, err) require.NoError(t, err)
input := iputil.Ip2VpnIp([]byte{10, 0, 10, 182}) input, err := netip.ParseAddr("10.0.10.182")
assert.NoError(t, err)
expected := &Ip4AndPort{ expected, err := netip.ParseAddr("192.168.1.182")
Ip: uint32(iputil.Ip2VpnIp([]byte{192, 168, 1, 182})), assert.NoError(t, err)
Port: 4242,
}
assert.Equal(t, expected, c.Apply(input)) assert.Equal(t, NewIp4AndPortFromNetIP(expected, 4242), c.Apply(input))
} }

163
cert.go
View File

@@ -1,163 +0,0 @@
package nebula
import (
"errors"
"fmt"
"io/ioutil"
"strings"
"time"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config"
)
type CertState struct {
certificate *cert.NebulaCertificate
rawCertificate []byte
rawCertificateNoKey []byte
publicKey []byte
privateKey []byte
}
func NewCertState(certificate *cert.NebulaCertificate, privateKey []byte) (*CertState, error) {
// Marshal the certificate to ensure it is valid
rawCertificate, err := certificate.Marshal()
if err != nil {
return nil, fmt.Errorf("invalid nebula certificate on interface: %s", err)
}
publicKey := certificate.Details.PublicKey
cs := &CertState{
rawCertificate: rawCertificate,
certificate: certificate, // PublicKey has been set to nil above
privateKey: privateKey,
publicKey: publicKey,
}
cs.certificate.Details.PublicKey = nil
rawCertNoKey, err := cs.certificate.Marshal()
if err != nil {
return nil, fmt.Errorf("error marshalling certificate no key: %s", err)
}
cs.rawCertificateNoKey = rawCertNoKey
// put public key back
cs.certificate.Details.PublicKey = cs.publicKey
return cs, nil
}
func NewCertStateFromConfig(c *config.C) (*CertState, error) {
var pemPrivateKey []byte
var err error
privPathOrPEM := c.GetString("pki.key", "")
if privPathOrPEM == "" {
return nil, errors.New("no pki.key path or PEM data provided")
}
if strings.Contains(privPathOrPEM, "-----BEGIN") {
pemPrivateKey = []byte(privPathOrPEM)
privPathOrPEM = "<inline>"
} else {
pemPrivateKey, err = ioutil.ReadFile(privPathOrPEM)
if err != nil {
return nil, fmt.Errorf("unable to read pki.key file %s: %s", privPathOrPEM, err)
}
}
rawKey, _, curve, err := cert.UnmarshalPrivateKey(pemPrivateKey)
if err != nil {
return nil, fmt.Errorf("error while unmarshaling pki.key %s: %s", privPathOrPEM, err)
}
var rawCert []byte
pubPathOrPEM := c.GetString("pki.cert", "")
if pubPathOrPEM == "" {
return nil, errors.New("no pki.cert path or PEM data provided")
}
if strings.Contains(pubPathOrPEM, "-----BEGIN") {
rawCert = []byte(pubPathOrPEM)
pubPathOrPEM = "<inline>"
} else {
rawCert, err = ioutil.ReadFile(pubPathOrPEM)
if err != nil {
return nil, fmt.Errorf("unable to read pki.cert file %s: %s", pubPathOrPEM, err)
}
}
nebulaCert, _, err := cert.UnmarshalNebulaCertificateFromPEM(rawCert)
if err != nil {
return nil, fmt.Errorf("error while unmarshaling pki.cert %s: %s", pubPathOrPEM, err)
}
if nebulaCert.Expired(time.Now()) {
return nil, fmt.Errorf("nebula certificate for this host is expired")
}
if len(nebulaCert.Details.Ips) == 0 {
return nil, fmt.Errorf("no IPs encoded in certificate")
}
if err = nebulaCert.VerifyPrivateKey(curve, rawKey); err != nil {
return nil, fmt.Errorf("private key is not a pair with public key in nebula cert")
}
return NewCertState(nebulaCert, rawKey)
}
func loadCAFromConfig(l *logrus.Logger, c *config.C) (*cert.NebulaCAPool, error) {
var rawCA []byte
var err error
caPathOrPEM := c.GetString("pki.ca", "")
if caPathOrPEM == "" {
return nil, errors.New("no pki.ca path or PEM data provided")
}
if strings.Contains(caPathOrPEM, "-----BEGIN") {
rawCA = []byte(caPathOrPEM)
} else {
rawCA, err = ioutil.ReadFile(caPathOrPEM)
if err != nil {
return nil, fmt.Errorf("unable to read pki.ca file %s: %s", caPathOrPEM, err)
}
}
CAs, err := cert.NewCAPoolFromBytes(rawCA)
if errors.Is(err, cert.ErrExpired) {
var expired int
for _, cert := range CAs.CAs {
if cert.Expired(time.Now()) {
expired++
l.WithField("cert", cert).Warn("expired certificate present in CA pool")
}
}
if expired >= len(CAs.CAs) {
return nil, errors.New("no valid CA certificates present")
}
} else if err != nil {
return nil, fmt.Errorf("error while adding CA certificate to CA trust store: %s", err)
}
for _, fp := range c.GetStringSlice("pki.blocklist", []string{}) {
l.WithField("fingerprint", fp).Info("Blocklisting cert")
CAs.BlocklistFingerprint(fp)
}
// Support deprecated config for at least one minor release to allow for migrations
//TODO: remove in 2022 or later
for _, fp := range c.GetStringSlice("pki.blacklist", []string{}) {
l.WithField("fingerprint", fp).Info("Blocklisting cert")
l.Warn("pki.blacklist is deprecated and will not be supported in a future release. Please migrate your config to use pki.blocklist")
CAs.BlocklistFingerprint(fp)
}
return CAs, nil
}

View File

@@ -24,31 +24,39 @@ func NewCAPool() *NebulaCAPool {
// NewCAPoolFromBytes will create a new CA pool from the provided // NewCAPoolFromBytes will create a new CA pool from the provided
// input bytes, which must be a PEM-encoded set of nebula certificates. // input bytes, which must be a PEM-encoded set of nebula certificates.
// If the pool contains unsupported certificates, they will generate warnings
// in the []error return arg.
// If the pool contains any expired certificates, an ErrExpired will be // If the pool contains any expired certificates, an ErrExpired will be
// returned along with the pool. The caller must handle any such errors. // returned along with the pool. The caller must handle any such errors.
func NewCAPoolFromBytes(caPEMs []byte) (*NebulaCAPool, error) { func NewCAPoolFromBytes(caPEMs []byte) (*NebulaCAPool, []error, error) {
pool := NewCAPool() pool := NewCAPool()
var err error var err error
var expired bool var warnings []error
good := 0
for { for {
caPEMs, err = pool.AddCACertificate(caPEMs) caPEMs, err = pool.AddCACertificate(caPEMs)
if errors.Is(err, ErrExpired) { if errors.Is(err, ErrExpired) {
expired = true warnings = append(warnings, err)
err = nil } else if errors.Is(err, ErrInvalidPEMCertificateUnsupported) {
} warnings = append(warnings, err)
if err != nil { } else if err != nil {
return nil, err return nil, warnings, err
} else {
// Only consider a good certificate if there were no errors present
good++
} }
if len(caPEMs) == 0 || strings.TrimSpace(string(caPEMs)) == "" { if len(caPEMs) == 0 || strings.TrimSpace(string(caPEMs)) == "" {
break break
} }
} }
if expired { if good == 0 {
return pool, ErrExpired return nil, warnings, errors.New("no valid CA certificates present")
} }
return pool, nil return pool, warnings, nil
} }
// AddCACertificate verifies a Nebula CA certificate and adds it to the pool // AddCACertificate verifies a Nebula CA certificate and adds it to the pool

View File

@@ -28,6 +28,7 @@ const publicKeyLen = 32
const ( const (
CertBanner = "NEBULA CERTIFICATE" CertBanner = "NEBULA CERTIFICATE"
CertificateV2Banner = "NEBULA CERTIFICATE V2"
X25519PrivateKeyBanner = "NEBULA X25519 PRIVATE KEY" X25519PrivateKeyBanner = "NEBULA X25519 PRIVATE KEY"
X25519PublicKeyBanner = "NEBULA X25519 PUBLIC KEY" X25519PublicKeyBanner = "NEBULA X25519 PUBLIC KEY"
EncryptedEd25519PrivateKeyBanner = "NEBULA ED25519 ENCRYPTED PRIVATE KEY" EncryptedEd25519PrivateKeyBanner = "NEBULA ED25519 ENCRYPTED PRIVATE KEY"
@@ -163,6 +164,9 @@ func UnmarshalNebulaCertificateFromPEM(b []byte) (*NebulaCertificate, []byte, er
if p == nil { if p == nil {
return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block") return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block")
} }
if p.Type == CertificateV2Banner {
return nil, r, fmt.Errorf("%w: %s", ErrInvalidPEMCertificateUnsupported, p.Type)
}
if p.Type != CertBanner { if p.Type != CertBanner {
return nil, r, fmt.Errorf("bytes did not contain a proper nebula certificate banner") return nil, r, fmt.Errorf("bytes did not contain a proper nebula certificate banner")
} }
@@ -272,6 +276,9 @@ func EncryptAndMarshalSigningPrivateKey(curve Curve, b []byte, passphrase []byte
}, },
Ciphertext: ciphertext, Ciphertext: ciphertext,
}) })
if err != nil {
return nil, err
}
switch curve { switch curve {
case Curve_CURVE25519: case Curve_CURVE25519:
@@ -321,7 +328,7 @@ func UnmarshalEd25519PrivateKey(b []byte) (ed25519.PrivateKey, []byte, error) {
return k.Bytes, r, nil return k.Bytes, r, nil
} }
// UnmarshalNebulaCertificate will unmarshal a protobuf byte representation of a nebula cert into its // UnmarshalNebulaEncryptedData will unmarshal a protobuf byte representation of a nebula cert into its
// protobuf-generated struct. // protobuf-generated struct.
func UnmarshalNebulaEncryptedData(b []byte) (*NebulaEncryptedData, error) { func UnmarshalNebulaEncryptedData(b []byte) (*NebulaEncryptedData, error) {
if len(b) == 0 { if len(b) == 0 {

View File

@@ -5,6 +5,7 @@ import (
"crypto/ecdsa" "crypto/ecdsa"
"crypto/elliptic" "crypto/elliptic"
"crypto/rand" "crypto/rand"
"errors"
"fmt" "fmt"
"io" "io"
"net" "net"
@@ -572,6 +573,13 @@ CmYKEG5lYnVsYSBQMjU2IHRlc3Qo4s+7mgYw4tXrsAc6QQRkaW2jFmllYvN4+/k2
76gvQAGgBgESRzBFAiEAib0/te6eMiZOKD8gdDeloMTS0wGuX2t0C7TFdUhAQzgC 76gvQAGgBgESRzBFAiEAib0/te6eMiZOKD8gdDeloMTS0wGuX2t0C7TFdUhAQzgC
IBNWYMep3ysx9zCgknfG5dKtwGTaqF++BWKDYdyl34KX IBNWYMep3ysx9zCgknfG5dKtwGTaqF++BWKDYdyl34KX
-----END NEBULA CERTIFICATE----- -----END NEBULA CERTIFICATE-----
`
v2 := `
# valid PEM with the V2 header
-----BEGIN NEBULA CERTIFICATE V2-----
CmYKEG5lYnVsYSBQMjU2IHRlc3Qo4s+7mgYw4tXrsAc6QQRkaW2jFmllYvN4+/k2
-----END NEBULA CERTIFICATE V2-----
` `
rootCA := NebulaCertificate{ rootCA := NebulaCertificate{
@@ -592,33 +600,46 @@ IBNWYMep3ysx9zCgknfG5dKtwGTaqF++BWKDYdyl34KX
}, },
} }
p, err := NewCAPoolFromBytes([]byte(noNewLines)) p, warn, err := NewCAPoolFromBytes([]byte(noNewLines))
assert.Nil(t, err) assert.Nil(t, err)
assert.Nil(t, warn)
assert.Equal(t, p.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name) assert.Equal(t, p.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
assert.Equal(t, p.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name) assert.Equal(t, p.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
pp, err := NewCAPoolFromBytes([]byte(withNewLines)) pp, warn, err := NewCAPoolFromBytes([]byte(withNewLines))
assert.Nil(t, err) assert.Nil(t, err)
assert.Nil(t, warn)
assert.Equal(t, pp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name) assert.Equal(t, pp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
assert.Equal(t, pp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name) assert.Equal(t, pp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
// expired cert, no valid certs // expired cert, no valid certs
ppp, err := NewCAPoolFromBytes([]byte(expired)) ppp, warn, err := NewCAPoolFromBytes([]byte(expired))
assert.Equal(t, ErrExpired, err) assert.Error(t, err, "no valid CA certificates present")
assert.Equal(t, ppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired") assert.Len(t, warn, 1)
assert.Error(t, warn[0], ErrExpired)
assert.Nil(t, ppp)
// expired cert, with valid certs // expired cert, with valid certs
pppp, err := NewCAPoolFromBytes(append([]byte(expired), noNewLines...)) pppp, warn, err := NewCAPoolFromBytes(append([]byte(expired), noNewLines...))
assert.Equal(t, ErrExpired, err) assert.Len(t, warn, 1)
assert.Nil(t, err)
assert.Error(t, warn[0], ErrExpired)
assert.Equal(t, pppp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name) assert.Equal(t, pppp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
assert.Equal(t, pppp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name) assert.Equal(t, pppp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
assert.Equal(t, pppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired") assert.Equal(t, pppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired")
assert.Equal(t, len(pppp.CAs), 3) assert.Equal(t, len(pppp.CAs), 3)
ppppp, err := NewCAPoolFromBytes([]byte(p256)) ppppp, warn, err := NewCAPoolFromBytes([]byte(p256))
assert.Nil(t, err) assert.Nil(t, err)
assert.Nil(t, warn)
assert.Equal(t, ppppp.CAs[string("a7938893ec8c4ef769b06d7f425e5e46f7a7f5ffa49c3bcf4a86b608caba9159")].Details.Name, rootCAP256.Details.Name) assert.Equal(t, ppppp.CAs[string("a7938893ec8c4ef769b06d7f425e5e46f7a7f5ffa49c3bcf4a86b608caba9159")].Details.Name, rootCAP256.Details.Name)
assert.Equal(t, len(ppppp.CAs), 1) assert.Equal(t, len(ppppp.CAs), 1)
pppppp, warn, err := NewCAPoolFromBytes(append([]byte(p256), []byte(v2)...))
assert.Nil(t, err)
assert.True(t, errors.Is(warn[0], ErrInvalidPEMCertificateUnsupported))
assert.Equal(t, pppppp.CAs[string("a7938893ec8c4ef769b06d7f425e5e46f7a7f5ffa49c3bcf4a86b608caba9159")].Details.Name, rootCAP256.Details.Name)
assert.Equal(t, len(pppppp.CAs), 1)
} }
func appendByteSlices(b ...[]byte) []byte { func appendByteSlices(b ...[]byte) []byte {

View File

@@ -77,6 +77,9 @@ func aes256Decrypt(passphrase []byte, kdfParams *Argon2Parameters, data []byte)
} }
gcm, err := cipher.NewGCM(block) gcm, err := cipher.NewGCM(block)
if err != nil {
return nil, err
}
nonce, ciphertext, err := splitNonceCiphertext(data, gcm.NonceSize()) nonce, ciphertext, err := splitNonceCiphertext(data, gcm.NonceSize())
if err != nil { if err != nil {

View File

@@ -5,10 +5,11 @@ import (
) )
var ( var (
ErrRootExpired = errors.New("root certificate is expired") ErrRootExpired = errors.New("root certificate is expired")
ErrExpired = errors.New("certificate is expired") ErrExpired = errors.New("certificate is expired")
ErrNotCA = errors.New("certificate is not a CA") ErrNotCA = errors.New("certificate is not a CA")
ErrNotSelfSigned = errors.New("certificate is not self-signed") ErrNotSelfSigned = errors.New("certificate is not self-signed")
ErrBlockListed = errors.New("certificate is in the block list") ErrBlockListed = errors.New("certificate is in the block list")
ErrSignatureMismatch = errors.New("certificate signature did not match") ErrSignatureMismatch = errors.New("certificate signature did not match")
ErrInvalidPEMCertificateUnsupported = errors.New("bytes contain an unsupported certificate format")
) )

View File

@@ -1,10 +0,0 @@
package cidr
import "net"
// Parse is a convenience function that returns only the IPNet
// This function ignores errors since it is primarily a test helper, the result could be nil
func Parse(s string) *net.IPNet {
_, c, _ := net.ParseCIDR(s)
return c
}

View File

@@ -1,167 +0,0 @@
package cidr
import (
"net"
"github.com/slackhq/nebula/iputil"
)
type Node struct {
left *Node
right *Node
parent *Node
value interface{}
}
type entry struct {
CIDR *net.IPNet
Value *interface{}
}
type Tree4 struct {
root *Node
list []entry
}
const (
startbit = iputil.VpnIp(0x80000000)
)
func NewTree4() *Tree4 {
tree := new(Tree4)
tree.root = &Node{}
tree.list = []entry{}
return tree
}
func (tree *Tree4) AddCIDR(cidr *net.IPNet, val interface{}) {
bit := startbit
node := tree.root
next := tree.root
ip := iputil.Ip2VpnIp(cidr.IP)
mask := iputil.Ip2VpnIp(cidr.Mask)
// Find our last ancestor in the tree
for bit&mask != 0 {
if ip&bit != 0 {
next = node.right
} else {
next = node.left
}
if next == nil {
break
}
bit = bit >> 1
node = next
}
// We already have this range so update the value
if next != nil {
addCIDR := cidr.String()
for i, v := range tree.list {
if addCIDR == v.CIDR.String() {
tree.list = append(tree.list[:i], tree.list[i+1:]...)
break
}
}
tree.list = append(tree.list, entry{CIDR: cidr, Value: &val})
node.value = val
return
}
// Build up the rest of the tree we don't already have
for bit&mask != 0 {
next = &Node{}
next.parent = node
if ip&bit != 0 {
node.right = next
} else {
node.left = next
}
bit >>= 1
node = next
}
// Final node marks our cidr, set the value
node.value = val
tree.list = append(tree.list, entry{CIDR: cidr, Value: &val})
}
// Contains finds the first match, which may be the least specific
func (tree *Tree4) Contains(ip iputil.VpnIp) (value interface{}) {
bit := startbit
node := tree.root
for node != nil {
if node.value != nil {
return node.value
}
if ip&bit != 0 {
node = node.right
} else {
node = node.left
}
bit >>= 1
}
return value
}
// MostSpecificContains finds the most specific match
func (tree *Tree4) MostSpecificContains(ip iputil.VpnIp) (value interface{}) {
bit := startbit
node := tree.root
for node != nil {
if node.value != nil {
value = node.value
}
if ip&bit != 0 {
node = node.right
} else {
node = node.left
}
bit >>= 1
}
return value
}
// Match finds the most specific match
func (tree *Tree4) Match(ip iputil.VpnIp) (value interface{}) {
bit := startbit
node := tree.root
lastNode := node
for node != nil {
lastNode = node
if ip&bit != 0 {
node = node.right
} else {
node = node.left
}
bit >>= 1
}
if bit == 0 && lastNode != nil {
value = lastNode.value
}
return value
}
// List will return all CIDRs and their current values. Do not modify the contents!
func (tree *Tree4) List() []entry {
return tree.list
}

View File

@@ -1,167 +0,0 @@
package cidr
import (
"net"
"testing"
"github.com/slackhq/nebula/iputil"
"github.com/stretchr/testify/assert"
)
func TestCIDRTree_List(t *testing.T) {
tree := NewTree4()
tree.AddCIDR(Parse("1.0.0.0/16"), "1")
tree.AddCIDR(Parse("1.0.0.0/8"), "2")
tree.AddCIDR(Parse("1.0.0.0/16"), "3")
tree.AddCIDR(Parse("1.0.0.0/16"), "4")
list := tree.List()
assert.Len(t, list, 2)
assert.Equal(t, "1.0.0.0/8", list[0].CIDR.String())
assert.Equal(t, "2", *list[0].Value)
assert.Equal(t, "1.0.0.0/16", list[1].CIDR.String())
assert.Equal(t, "4", *list[1].Value)
}
func TestCIDRTree_Contains(t *testing.T) {
tree := NewTree4()
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
tree.AddCIDR(Parse("4.1.1.0/24"), "4a")
tree.AddCIDR(Parse("4.1.1.1/32"), "4b")
tree.AddCIDR(Parse("4.1.2.1/32"), "4c")
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
tests := []struct {
Result interface{}
IP string
}{
{"1", "1.0.0.0"},
{"1", "1.255.255.255"},
{"2", "2.1.0.0"},
{"2", "2.1.255.255"},
{"3", "3.1.1.0"},
{"3", "3.1.1.255"},
{"4a", "4.1.1.255"},
{"4a", "4.1.1.1"},
{"5", "240.0.0.0"},
{"5", "255.255.255.255"},
{nil, "239.0.0.0"},
{nil, "4.1.2.2"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.Contains(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
}
tree = NewTree4()
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
}
func TestCIDRTree_MostSpecificContains(t *testing.T) {
tree := NewTree4()
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
tree.AddCIDR(Parse("4.1.1.0/24"), "4a")
tree.AddCIDR(Parse("4.1.1.0/30"), "4b")
tree.AddCIDR(Parse("4.1.1.1/32"), "4c")
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
tests := []struct {
Result interface{}
IP string
}{
{"1", "1.0.0.0"},
{"1", "1.255.255.255"},
{"2", "2.1.0.0"},
{"2", "2.1.255.255"},
{"3", "3.1.1.0"},
{"3", "3.1.1.255"},
{"4a", "4.1.1.255"},
{"4b", "4.1.1.2"},
{"4c", "4.1.1.1"},
{"5", "240.0.0.0"},
{"5", "255.255.255.255"},
{nil, "239.0.0.0"},
{nil, "4.1.2.2"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
}
tree = NewTree4()
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
assert.Equal(t, "cool", tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
assert.Equal(t, "cool", tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
}
func TestCIDRTree_Match(t *testing.T) {
tree := NewTree4()
tree.AddCIDR(Parse("4.1.1.0/32"), "1a")
tree.AddCIDR(Parse("4.1.1.1/32"), "1b")
tests := []struct {
Result interface{}
IP string
}{
{"1a", "4.1.1.0"},
{"1b", "4.1.1.1"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.Match(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
}
tree = NewTree4()
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
}
func BenchmarkCIDRTree_Contains(b *testing.B) {
tree := NewTree4()
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
tree.AddCIDR(Parse("192.2.1.1/32"), "1")
tree.AddCIDR(Parse("172.2.1.1/32"), "1")
ip := iputil.Ip2VpnIp(net.ParseIP("1.2.1.1"))
b.Run("found", func(b *testing.B) {
for i := 0; i < b.N; i++ {
tree.Contains(ip)
}
})
ip = iputil.Ip2VpnIp(net.ParseIP("1.2.1.255"))
b.Run("not found", func(b *testing.B) {
for i := 0; i < b.N; i++ {
tree.Contains(ip)
}
})
}
func BenchmarkCIDRTree_Match(b *testing.B) {
tree := NewTree4()
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
tree.AddCIDR(Parse("192.2.1.1/32"), "1")
tree.AddCIDR(Parse("172.2.1.1/32"), "1")
ip := iputil.Ip2VpnIp(net.ParseIP("1.2.1.1"))
b.Run("found", func(b *testing.B) {
for i := 0; i < b.N; i++ {
tree.Match(ip)
}
})
ip = iputil.Ip2VpnIp(net.ParseIP("1.2.1.255"))
b.Run("not found", func(b *testing.B) {
for i := 0; i < b.N; i++ {
tree.Match(ip)
}
})
}

View File

@@ -1,185 +0,0 @@
package cidr
import (
"net"
"github.com/slackhq/nebula/iputil"
)
const startbit6 = uint64(1 << 63)
type Tree6 struct {
root4 *Node
root6 *Node
}
func NewTree6() *Tree6 {
tree := new(Tree6)
tree.root4 = &Node{}
tree.root6 = &Node{}
return tree
}
func (tree *Tree6) AddCIDR(cidr *net.IPNet, val interface{}) {
var node, next *Node
cidrIP, ipv4 := isIPV4(cidr.IP)
if ipv4 {
node = tree.root4
next = tree.root4
} else {
node = tree.root6
next = tree.root6
}
for i := 0; i < len(cidrIP); i += 4 {
ip := iputil.Ip2VpnIp(cidrIP[i : i+4])
mask := iputil.Ip2VpnIp(cidr.Mask[i : i+4])
bit := startbit
// Find our last ancestor in the tree
for bit&mask != 0 {
if ip&bit != 0 {
next = node.right
} else {
next = node.left
}
if next == nil {
break
}
bit = bit >> 1
node = next
}
// Build up the rest of the tree we don't already have
for bit&mask != 0 {
next = &Node{}
next.parent = node
if ip&bit != 0 {
node.right = next
} else {
node.left = next
}
bit >>= 1
node = next
}
}
// Final node marks our cidr, set the value
node.value = val
}
// Finds the most specific match
func (tree *Tree6) MostSpecificContains(ip net.IP) (value interface{}) {
var node *Node
wholeIP, ipv4 := isIPV4(ip)
if ipv4 {
node = tree.root4
} else {
node = tree.root6
}
for i := 0; i < len(wholeIP); i += 4 {
ip := iputil.Ip2VpnIp(wholeIP[i : i+4])
bit := startbit
for node != nil {
if node.value != nil {
value = node.value
}
if bit == 0 {
break
}
if ip&bit != 0 {
node = node.right
} else {
node = node.left
}
bit >>= 1
}
}
return value
}
func (tree *Tree6) MostSpecificContainsIpV4(ip iputil.VpnIp) (value interface{}) {
bit := startbit
node := tree.root4
for node != nil {
if node.value != nil {
value = node.value
}
if ip&bit != 0 {
node = node.right
} else {
node = node.left
}
bit >>= 1
}
return value
}
func (tree *Tree6) MostSpecificContainsIpV6(hi, lo uint64) (value interface{}) {
ip := hi
node := tree.root6
for i := 0; i < 2; i++ {
bit := startbit6
for node != nil {
if node.value != nil {
value = node.value
}
if bit == 0 {
break
}
if ip&bit != 0 {
node = node.right
} else {
node = node.left
}
bit >>= 1
}
ip = lo
}
return value
}
func isIPV4(ip net.IP) (net.IP, bool) {
if len(ip) == net.IPv4len {
return ip, true
}
if len(ip) == net.IPv6len && isZeros(ip[0:10]) && ip[10] == 0xff && ip[11] == 0xff {
return ip[12:16], true
}
return ip, false
}
func isZeros(p net.IP) bool {
for i := 0; i < len(p); i++ {
if p[i] != 0 {
return false
}
}
return true
}

View File

@@ -1,81 +0,0 @@
package cidr
import (
"encoding/binary"
"net"
"testing"
"github.com/stretchr/testify/assert"
)
func TestCIDR6Tree_MostSpecificContains(t *testing.T) {
tree := NewTree6()
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
tree.AddCIDR(Parse("4.1.1.1/24"), "4a")
tree.AddCIDR(Parse("4.1.1.1/30"), "4b")
tree.AddCIDR(Parse("4.1.1.1/32"), "4c")
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/64"), "6a")
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/80"), "6b")
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
tests := []struct {
Result interface{}
IP string
}{
{"1", "1.0.0.0"},
{"1", "1.255.255.255"},
{"2", "2.1.0.0"},
{"2", "2.1.255.255"},
{"3", "3.1.1.0"},
{"3", "3.1.1.255"},
{"4a", "4.1.1.255"},
{"4b", "4.1.1.2"},
{"4c", "4.1.1.1"},
{"5", "240.0.0.0"},
{"5", "255.255.255.255"},
{"6a", "1:2:0:4:1:1:1:1"},
{"6b", "1:2:0:4:5:1:1:1"},
{"6c", "1:2:0:4:5:0:0:0"},
{nil, "239.0.0.0"},
{nil, "4.1.2.2"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.MostSpecificContains(net.ParseIP(tt.IP)))
}
tree = NewTree6()
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
tree.AddCIDR(Parse("::/0"), "cool6")
assert.Equal(t, "cool", tree.MostSpecificContains(net.ParseIP("0.0.0.0")))
assert.Equal(t, "cool", tree.MostSpecificContains(net.ParseIP("255.255.255.255")))
assert.Equal(t, "cool6", tree.MostSpecificContains(net.ParseIP("::")))
assert.Equal(t, "cool6", tree.MostSpecificContains(net.ParseIP("1:2:3:4:5:6:7:8")))
}
func TestCIDR6Tree_MostSpecificContainsIpV6(t *testing.T) {
tree := NewTree6()
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/64"), "6a")
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/80"), "6b")
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
tests := []struct {
Result interface{}
IP string
}{
{"6a", "1:2:0:4:1:1:1:1"},
{"6b", "1:2:0:4:5:1:1:1"},
{"6c", "1:2:0:4:5:0:0:0"},
}
for _, tt := range tests {
ip := net.ParseIP(tt.IP)
hi := binary.BigEndian.Uint64(ip[:8])
lo := binary.BigEndian.Uint64(ip[8:])
assert.Equal(t, tt.Result, tree.MostSpecificContainsIpV6(hi, lo))
}
}

View File

@@ -7,7 +7,6 @@ import (
"flag" "flag"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"math" "math"
"net" "net"
"os" "os"
@@ -181,9 +180,15 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
if err != nil { if err != nil {
return fmt.Errorf("error while generating ecdsa keys: %s", err) return fmt.Errorf("error while generating ecdsa keys: %s", err)
} }
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L60
rawPriv = key.D.FillBytes(make([]byte, 32)) // ecdh.PrivateKey lets us get at the encoded bytes, even though
pub = elliptic.Marshal(elliptic.P256(), key.X, key.Y) // we aren't using ECDH here.
eKey, err := key.ECDH()
if err != nil {
return fmt.Errorf("error while converting ecdsa key: %s", err)
}
rawPriv = eKey.Bytes()
pub = eKey.PublicKey().Bytes()
} }
nc := cert.NebulaCertificate{ nc := cert.NebulaCertificate{
@@ -213,27 +218,27 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
return fmt.Errorf("error while signing: %s", err) return fmt.Errorf("error while signing: %s", err)
} }
var b []byte
if *cf.encryption { if *cf.encryption {
b, err := cert.EncryptAndMarshalSigningPrivateKey(curve, rawPriv, passphrase, kdfParams) b, err = cert.EncryptAndMarshalSigningPrivateKey(curve, rawPriv, passphrase, kdfParams)
if err != nil { if err != nil {
return fmt.Errorf("error while encrypting out-key: %s", err) return fmt.Errorf("error while encrypting out-key: %s", err)
} }
err = ioutil.WriteFile(*cf.outKeyPath, b, 0600)
} else { } else {
err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalSigningPrivateKey(curve, rawPriv), 0600) b = cert.MarshalSigningPrivateKey(curve, rawPriv)
} }
err = os.WriteFile(*cf.outKeyPath, b, 0600)
if err != nil { if err != nil {
return fmt.Errorf("error while writing out-key: %s", err) return fmt.Errorf("error while writing out-key: %s", err)
} }
b, err := nc.MarshalToPEM() b, err = nc.MarshalToPEM()
if err != nil { if err != nil {
return fmt.Errorf("error while marshalling certificate: %s", err) return fmt.Errorf("error while marshalling certificate: %s", err)
} }
err = ioutil.WriteFile(*cf.outCertPath, b, 0600) err = os.WriteFile(*cf.outCertPath, b, 0600)
if err != nil { if err != nil {
return fmt.Errorf("error while writing out-crt: %s", err) return fmt.Errorf("error while writing out-crt: %s", err)
} }
@@ -244,7 +249,7 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
return fmt.Errorf("error while generating qr code: %s", err) return fmt.Errorf("error while generating qr code: %s", err)
} }
err = ioutil.WriteFile(*cf.outQRPath, b, 0600) err = os.WriteFile(*cf.outQRPath, b, 0600)
if err != nil { if err != nil {
return fmt.Errorf("error while writing out-qr: %s", err) return fmt.Errorf("error while writing out-qr: %s", err)
} }

View File

@@ -7,7 +7,6 @@ import (
"bytes" "bytes"
"encoding/pem" "encoding/pem"
"errors" "errors"
"io/ioutil"
"os" "os"
"strings" "strings"
"testing" "testing"
@@ -107,7 +106,7 @@ func Test_ca(t *testing.T) {
assert.Equal(t, "", eb.String()) assert.Equal(t, "", eb.String())
// create temp key file // create temp key file
keyF, err := ioutil.TempFile("", "test.key") keyF, err := os.CreateTemp("", "test.key")
assert.Nil(t, err) assert.Nil(t, err)
os.Remove(keyF.Name()) os.Remove(keyF.Name())
@@ -120,7 +119,7 @@ func Test_ca(t *testing.T) {
assert.Equal(t, "", eb.String()) assert.Equal(t, "", eb.String())
// create temp cert file // create temp cert file
crtF, err := ioutil.TempFile("", "test.crt") crtF, err := os.CreateTemp("", "test.crt")
assert.Nil(t, err) assert.Nil(t, err)
os.Remove(crtF.Name()) os.Remove(crtF.Name())
os.Remove(keyF.Name()) os.Remove(keyF.Name())
@@ -134,13 +133,13 @@ func Test_ca(t *testing.T) {
assert.Equal(t, "", eb.String()) assert.Equal(t, "", eb.String())
// read cert and key files // read cert and key files
rb, _ := ioutil.ReadFile(keyF.Name()) rb, _ := os.ReadFile(keyF.Name())
lKey, b, err := cert.UnmarshalEd25519PrivateKey(rb) lKey, b, err := cert.UnmarshalEd25519PrivateKey(rb)
assert.Len(t, b, 0) assert.Len(t, b, 0)
assert.Nil(t, err) assert.Nil(t, err)
assert.Len(t, lKey, 64) assert.Len(t, lKey, 64)
rb, _ = ioutil.ReadFile(crtF.Name()) rb, _ = os.ReadFile(crtF.Name())
lCrt, b, err := cert.UnmarshalNebulaCertificateFromPEM(rb) lCrt, b, err := cert.UnmarshalNebulaCertificateFromPEM(rb)
assert.Len(t, b, 0) assert.Len(t, b, 0)
assert.Nil(t, err) assert.Nil(t, err)
@@ -166,7 +165,7 @@ func Test_ca(t *testing.T) {
assert.Equal(t, "", eb.String()) assert.Equal(t, "", eb.String())
// read encrypted key file and verify default params // read encrypted key file and verify default params
rb, _ = ioutil.ReadFile(keyF.Name()) rb, _ = os.ReadFile(keyF.Name())
k, _ := pem.Decode(rb) k, _ := pem.Decode(rb)
ned, err := cert.UnmarshalNebulaEncryptedData(k.Bytes) ned, err := cert.UnmarshalNebulaEncryptedData(k.Bytes)
assert.Nil(t, err) assert.Nil(t, err)

View File

@@ -4,7 +4,6 @@ import (
"flag" "flag"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
@@ -54,12 +53,12 @@ func keygen(args []string, out io.Writer, errOut io.Writer) error {
return fmt.Errorf("invalid curve: %s", *cf.curve) return fmt.Errorf("invalid curve: %s", *cf.curve)
} }
err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600) err = os.WriteFile(*cf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
if err != nil { if err != nil {
return fmt.Errorf("error while writing out-key: %s", err) return fmt.Errorf("error while writing out-key: %s", err)
} }
err = ioutil.WriteFile(*cf.outPubPath, cert.MarshalPublicKey(curve, pub), 0600) err = os.WriteFile(*cf.outPubPath, cert.MarshalPublicKey(curve, pub), 0600)
if err != nil { if err != nil {
return fmt.Errorf("error while writing out-pub: %s", err) return fmt.Errorf("error while writing out-pub: %s", err)
} }

View File

@@ -2,7 +2,6 @@ package main
import ( import (
"bytes" "bytes"
"io/ioutil"
"os" "os"
"testing" "testing"
@@ -54,7 +53,7 @@ func Test_keygen(t *testing.T) {
assert.Equal(t, "", eb.String()) assert.Equal(t, "", eb.String())
// create temp key file // create temp key file
keyF, err := ioutil.TempFile("", "test.key") keyF, err := os.CreateTemp("", "test.key")
assert.Nil(t, err) assert.Nil(t, err)
defer os.Remove(keyF.Name()) defer os.Remove(keyF.Name())
@@ -67,7 +66,7 @@ func Test_keygen(t *testing.T) {
assert.Equal(t, "", eb.String()) assert.Equal(t, "", eb.String())
// create temp pub file // create temp pub file
pubF, err := ioutil.TempFile("", "test.pub") pubF, err := os.CreateTemp("", "test.pub")
assert.Nil(t, err) assert.Nil(t, err)
defer os.Remove(pubF.Name()) defer os.Remove(pubF.Name())
@@ -80,13 +79,13 @@ func Test_keygen(t *testing.T) {
assert.Equal(t, "", eb.String()) assert.Equal(t, "", eb.String())
// read cert and key files // read cert and key files
rb, _ := ioutil.ReadFile(keyF.Name()) rb, _ := os.ReadFile(keyF.Name())
lKey, b, err := cert.UnmarshalX25519PrivateKey(rb) lKey, b, err := cert.UnmarshalX25519PrivateKey(rb)
assert.Len(t, b, 0) assert.Len(t, b, 0)
assert.Nil(t, err) assert.Nil(t, err)
assert.Len(t, lKey, 32) assert.Len(t, lKey, 32)
rb, _ = ioutil.ReadFile(pubF.Name()) rb, _ = os.ReadFile(pubF.Name())
lPub, b, err := cert.UnmarshalX25519PublicKey(rb) lPub, b, err := cert.UnmarshalX25519PublicKey(rb)
assert.Len(t, b, 0) assert.Len(t, b, 0)
assert.Nil(t, err) assert.Nil(t, err)

View File

@@ -5,7 +5,6 @@ import (
"flag" "flag"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"strings" "strings"
@@ -41,7 +40,7 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
return err return err
} }
rawCert, err := ioutil.ReadFile(*pf.path) rawCert, err := os.ReadFile(*pf.path)
if err != nil { if err != nil {
return fmt.Errorf("unable to read cert; %s", err) return fmt.Errorf("unable to read cert; %s", err)
} }
@@ -87,7 +86,7 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
return fmt.Errorf("error while generating qr code: %s", err) return fmt.Errorf("error while generating qr code: %s", err)
} }
err = ioutil.WriteFile(*pf.outQRPath, b, 0600) err = os.WriteFile(*pf.outQRPath, b, 0600)
if err != nil { if err != nil {
return fmt.Errorf("error while writing out-qr: %s", err) return fmt.Errorf("error while writing out-qr: %s", err)
} }

View File

@@ -2,7 +2,6 @@ package main
import ( import (
"bytes" "bytes"
"io/ioutil"
"os" "os"
"testing" "testing"
"time" "time"
@@ -54,7 +53,7 @@ func Test_printCert(t *testing.T) {
// invalid cert at path // invalid cert at path
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
tf, err := ioutil.TempFile("", "print-cert") tf, err := os.CreateTemp("", "print-cert")
assert.Nil(t, err) assert.Nil(t, err)
defer os.Remove(tf.Name()) defer os.Remove(tf.Name())

View File

@@ -6,7 +6,6 @@ import (
"flag" "flag"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"net" "net"
"os" "os"
"strings" "strings"
@@ -73,7 +72,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
return newHelpErrorf("cannot set both -in-pub and -out-key") return newHelpErrorf("cannot set both -in-pub and -out-key")
} }
rawCAKey, err := ioutil.ReadFile(*sf.caKeyPath) rawCAKey, err := os.ReadFile(*sf.caKeyPath)
if err != nil { if err != nil {
return fmt.Errorf("error while reading ca-key: %s", err) return fmt.Errorf("error while reading ca-key: %s", err)
} }
@@ -112,7 +111,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
return fmt.Errorf("error while parsing ca-key: %s", err) return fmt.Errorf("error while parsing ca-key: %s", err)
} }
rawCACert, err := ioutil.ReadFile(*sf.caCertPath) rawCACert, err := os.ReadFile(*sf.caCertPath)
if err != nil { if err != nil {
return fmt.Errorf("error while reading ca-crt: %s", err) return fmt.Errorf("error while reading ca-crt: %s", err)
} }
@@ -178,7 +177,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
var pub, rawPriv []byte var pub, rawPriv []byte
if *sf.inPubPath != "" { if *sf.inPubPath != "" {
rawPub, err := ioutil.ReadFile(*sf.inPubPath) rawPub, err := os.ReadFile(*sf.inPubPath)
if err != nil { if err != nil {
return fmt.Errorf("error while reading in-pub: %s", err) return fmt.Errorf("error while reading in-pub: %s", err)
} }
@@ -235,7 +234,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
return fmt.Errorf("refusing to overwrite existing key: %s", *sf.outKeyPath) return fmt.Errorf("refusing to overwrite existing key: %s", *sf.outKeyPath)
} }
err = ioutil.WriteFile(*sf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600) err = os.WriteFile(*sf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
if err != nil { if err != nil {
return fmt.Errorf("error while writing out-key: %s", err) return fmt.Errorf("error while writing out-key: %s", err)
} }
@@ -246,7 +245,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
return fmt.Errorf("error while marshalling certificate: %s", err) return fmt.Errorf("error while marshalling certificate: %s", err)
} }
err = ioutil.WriteFile(*sf.outCertPath, b, 0600) err = os.WriteFile(*sf.outCertPath, b, 0600)
if err != nil { if err != nil {
return fmt.Errorf("error while writing out-crt: %s", err) return fmt.Errorf("error while writing out-crt: %s", err)
} }
@@ -257,7 +256,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
return fmt.Errorf("error while generating qr code: %s", err) return fmt.Errorf("error while generating qr code: %s", err)
} }
err = ioutil.WriteFile(*sf.outQRPath, b, 0600) err = os.WriteFile(*sf.outQRPath, b, 0600)
if err != nil { if err != nil {
return fmt.Errorf("error while writing out-qr: %s", err) return fmt.Errorf("error while writing out-qr: %s", err)
} }

View File

@@ -7,7 +7,6 @@ import (
"bytes" "bytes"
"crypto/rand" "crypto/rand"
"errors" "errors"
"io/ioutil"
"os" "os"
"testing" "testing"
"time" "time"
@@ -104,7 +103,7 @@ func Test_signCert(t *testing.T) {
// failed to unmarshal key // failed to unmarshal key
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
caKeyF, err := ioutil.TempFile("", "sign-cert.key") caKeyF, err := os.CreateTemp("", "sign-cert.key")
assert.Nil(t, err) assert.Nil(t, err)
defer os.Remove(caKeyF.Name()) defer os.Remove(caKeyF.Name())
@@ -128,7 +127,7 @@ func Test_signCert(t *testing.T) {
// failed to unmarshal cert // failed to unmarshal cert
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
caCrtF, err := ioutil.TempFile("", "sign-cert.crt") caCrtF, err := os.CreateTemp("", "sign-cert.crt")
assert.Nil(t, err) assert.Nil(t, err)
defer os.Remove(caCrtF.Name()) defer os.Remove(caCrtF.Name())
@@ -159,7 +158,7 @@ func Test_signCert(t *testing.T) {
// failed to unmarshal pub // failed to unmarshal pub
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
inPubF, err := ioutil.TempFile("", "in.pub") inPubF, err := os.CreateTemp("", "in.pub")
assert.Nil(t, err) assert.Nil(t, err)
defer os.Remove(inPubF.Name()) defer os.Remove(inPubF.Name())
@@ -206,7 +205,7 @@ func Test_signCert(t *testing.T) {
// mismatched ca key // mismatched ca key
_, caPriv2, _ := ed25519.GenerateKey(rand.Reader) _, caPriv2, _ := ed25519.GenerateKey(rand.Reader)
caKeyF2, err := ioutil.TempFile("", "sign-cert-2.key") caKeyF2, err := os.CreateTemp("", "sign-cert-2.key")
assert.Nil(t, err) assert.Nil(t, err)
defer os.Remove(caKeyF2.Name()) defer os.Remove(caKeyF2.Name())
caKeyF2.Write(cert.MarshalEd25519PrivateKey(caPriv2)) caKeyF2.Write(cert.MarshalEd25519PrivateKey(caPriv2))
@@ -227,7 +226,7 @@ func Test_signCert(t *testing.T) {
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
// create temp key file // create temp key file
keyF, err := ioutil.TempFile("", "test.key") keyF, err := os.CreateTemp("", "test.key")
assert.Nil(t, err) assert.Nil(t, err)
os.Remove(keyF.Name()) os.Remove(keyF.Name())
@@ -241,7 +240,7 @@ func Test_signCert(t *testing.T) {
os.Remove(keyF.Name()) os.Remove(keyF.Name())
// create temp cert file // create temp cert file
crtF, err := ioutil.TempFile("", "test.crt") crtF, err := os.CreateTemp("", "test.crt")
assert.Nil(t, err) assert.Nil(t, err)
os.Remove(crtF.Name()) os.Remove(crtF.Name())
@@ -254,13 +253,13 @@ func Test_signCert(t *testing.T) {
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
// read cert and key files // read cert and key files
rb, _ := ioutil.ReadFile(keyF.Name()) rb, _ := os.ReadFile(keyF.Name())
lKey, b, err := cert.UnmarshalX25519PrivateKey(rb) lKey, b, err := cert.UnmarshalX25519PrivateKey(rb)
assert.Len(t, b, 0) assert.Len(t, b, 0)
assert.Nil(t, err) assert.Nil(t, err)
assert.Len(t, lKey, 32) assert.Len(t, lKey, 32)
rb, _ = ioutil.ReadFile(crtF.Name()) rb, _ = os.ReadFile(crtF.Name())
lCrt, b, err := cert.UnmarshalNebulaCertificateFromPEM(rb) lCrt, b, err := cert.UnmarshalNebulaCertificateFromPEM(rb)
assert.Len(t, b, 0) assert.Len(t, b, 0)
assert.Nil(t, err) assert.Nil(t, err)
@@ -296,7 +295,7 @@ func Test_signCert(t *testing.T) {
assert.Empty(t, eb.String()) assert.Empty(t, eb.String())
// read cert file and check pub key matches in-pub // read cert file and check pub key matches in-pub
rb, _ = ioutil.ReadFile(crtF.Name()) rb, _ = os.ReadFile(crtF.Name())
lCrt, b, err = cert.UnmarshalNebulaCertificateFromPEM(rb) lCrt, b, err = cert.UnmarshalNebulaCertificateFromPEM(rb)
assert.Len(t, b, 0) assert.Len(t, b, 0)
assert.Nil(t, err) assert.Nil(t, err)
@@ -348,11 +347,11 @@ func Test_signCert(t *testing.T) {
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
caKeyF, err = ioutil.TempFile("", "sign-cert.key") caKeyF, err = os.CreateTemp("", "sign-cert.key")
assert.Nil(t, err) assert.Nil(t, err)
defer os.Remove(caKeyF.Name()) defer os.Remove(caKeyF.Name())
caCrtF, err = ioutil.TempFile("", "sign-cert.crt") caCrtF, err = os.CreateTemp("", "sign-cert.crt")
assert.Nil(t, err) assert.Nil(t, err)
defer os.Remove(caCrtF.Name()) defer os.Remove(caCrtF.Name())

View File

@@ -4,7 +4,6 @@ import (
"flag" "flag"
"fmt" "fmt"
"io" "io"
"io/ioutil"
"os" "os"
"strings" "strings"
"time" "time"
@@ -40,7 +39,7 @@ func verify(args []string, out io.Writer, errOut io.Writer) error {
return err return err
} }
rawCACert, err := ioutil.ReadFile(*vf.caPath) rawCACert, err := os.ReadFile(*vf.caPath)
if err != nil { if err != nil {
return fmt.Errorf("error while reading ca: %s", err) return fmt.Errorf("error while reading ca: %s", err)
} }
@@ -57,7 +56,7 @@ func verify(args []string, out io.Writer, errOut io.Writer) error {
} }
} }
rawCert, err := ioutil.ReadFile(*vf.certPath) rawCert, err := os.ReadFile(*vf.certPath)
if err != nil { if err != nil {
return fmt.Errorf("unable to read crt; %s", err) return fmt.Errorf("unable to read crt; %s", err)
} }

View File

@@ -3,7 +3,6 @@ package main
import ( import (
"bytes" "bytes"
"crypto/rand" "crypto/rand"
"io/ioutil"
"os" "os"
"testing" "testing"
"time" "time"
@@ -56,7 +55,7 @@ func Test_verify(t *testing.T) {
// invalid ca at path // invalid ca at path
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
caFile, err := ioutil.TempFile("", "verify-ca") caFile, err := os.CreateTemp("", "verify-ca")
assert.Nil(t, err) assert.Nil(t, err)
defer os.Remove(caFile.Name()) defer os.Remove(caFile.Name())
@@ -92,7 +91,7 @@ func Test_verify(t *testing.T) {
// invalid crt at path // invalid crt at path
ob.Reset() ob.Reset()
eb.Reset() eb.Reset()
certFile, err := ioutil.TempFile("", "verify-cert") certFile, err := os.CreateTemp("", "verify-cert")
assert.Nil(t, err) assert.Nil(t, err)
defer os.Remove(certFile.Name()) defer os.Remove(certFile.Name())

View File

@@ -59,13 +59,8 @@ func main() {
} }
ctrl, err := nebula.Main(c, *configTest, Build, l, nil) ctrl, err := nebula.Main(c, *configTest, Build, l, nil)
if err != nil {
switch v := err.(type) { util.LogWithContextIfNeeded("Failed to start", err, l)
case util.ContextualError:
v.Log(l)
os.Exit(1)
case error:
l.WithError(err).Error("Failed to start")
os.Exit(1) os.Exit(1)
} }

View File

@@ -53,18 +53,14 @@ func main() {
} }
ctrl, err := nebula.Main(c, *configTest, Build, l, nil) ctrl, err := nebula.Main(c, *configTest, Build, l, nil)
if err != nil {
switch v := err.(type) { util.LogWithContextIfNeeded("Failed to start", err, l)
case util.ContextualError:
v.Log(l)
os.Exit(1)
case error:
l.WithError(err).Error("Failed to start")
os.Exit(1) os.Exit(1)
} }
if !*configTest { if !*configTest {
ctrl.Start() ctrl.Start()
notifyReady(l)
ctrl.ShutdownBlock() ctrl.ShutdownBlock()
} }

View File

@@ -0,0 +1,42 @@
package main
import (
"net"
"os"
"time"
"github.com/sirupsen/logrus"
)
// SdNotifyReady tells systemd the service is ready and dependent services can now be started
// https://www.freedesktop.org/software/systemd/man/sd_notify.html
// https://www.freedesktop.org/software/systemd/man/systemd.service.html
const SdNotifyReady = "READY=1"
func notifyReady(l *logrus.Logger) {
sockName := os.Getenv("NOTIFY_SOCKET")
if sockName == "" {
l.Debugln("NOTIFY_SOCKET systemd env var not set, not sending ready signal")
return
}
conn, err := net.DialTimeout("unixgram", sockName, time.Second)
if err != nil {
l.WithError(err).Error("failed to connect to systemd notification socket")
return
}
defer conn.Close()
err = conn.SetWriteDeadline(time.Now().Add(time.Second))
if err != nil {
l.WithError(err).Error("failed to set the write deadline for the systemd notification socket")
return
}
if _, err = conn.Write([]byte(SdNotifyReady)); err != nil {
l.WithError(err).Error("failed to signal the systemd notification socket")
return
}
l.Debugln("notified systemd the service is ready")
}

View File

@@ -0,0 +1,10 @@
//go:build !linux
// +build !linux
package main
import "github.com/sirupsen/logrus"
func notifyReady(_ *logrus.Logger) {
// No init service to notify
}

View File

@@ -4,7 +4,7 @@ import (
"context" "context"
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "math"
"os" "os"
"os/signal" "os/signal"
"path/filepath" "path/filepath"
@@ -15,7 +15,7 @@ import (
"syscall" "syscall"
"time" "time"
"github.com/imdario/mergo" "dario.cat/mergo"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
) )
@@ -121,6 +121,10 @@ func (c *C) HasChanged(k string) bool {
// CatchHUP will listen for the HUP signal in a go routine and reload all configs found in the // CatchHUP will listen for the HUP signal in a go routine and reload all configs found in the
// original path provided to Load. The old settings are shallow copied for change detection after the reload. // original path provided to Load. The old settings are shallow copied for change detection after the reload.
func (c *C) CatchHUP(ctx context.Context) { func (c *C) CatchHUP(ctx context.Context) {
if c.path == "" {
return
}
ch := make(chan os.Signal, 1) ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGHUP) signal.Notify(ch, syscall.SIGHUP)
@@ -236,6 +240,15 @@ func (c *C) GetInt(k string, d int) int {
return v return v
} }
// GetUint32 will get the uint32 for k or return the default d if not found or invalid
func (c *C) GetUint32(k string, d uint32) uint32 {
r := c.GetInt(k, int(d))
if uint64(r) > uint64(math.MaxUint32) {
return d
}
return uint32(r)
}
// GetBool will get the bool for k or return the default d if not found or invalid // GetBool will get the bool for k or return the default d if not found or invalid
func (c *C) GetBool(k string, d bool) bool { func (c *C) GetBool(k string, d bool) bool {
r := strings.ToLower(c.GetString(k, fmt.Sprintf("%v", d))) r := strings.ToLower(c.GetString(k, fmt.Sprintf("%v", d)))
@@ -348,7 +361,7 @@ func (c *C) parse() error {
var m map[interface{}]interface{} var m map[interface{}]interface{}
for _, path := range c.files { for _, path := range c.files {
b, err := ioutil.ReadFile(path) b, err := os.ReadFile(path)
if err != nil { if err != nil {
return err return err
} }

View File

@@ -1,13 +1,12 @@
package config package config
import ( import (
"io/ioutil"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
"time" "time"
"github.com/imdario/mergo" "dario.cat/mergo"
"github.com/slackhq/nebula/test" "github.com/slackhq/nebula/test"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@@ -16,10 +15,10 @@ import (
func TestConfig_Load(t *testing.T) { func TestConfig_Load(t *testing.T) {
l := test.NewLogger() l := test.NewLogger()
dir, err := ioutil.TempDir("", "config-test") dir, err := os.MkdirTemp("", "config-test")
// invalid yaml // invalid yaml
c := NewC(l) c := NewC(l)
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644) os.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644)
assert.EqualError(t, c.Load(dir), "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into map[interface {}]interface {}") assert.EqualError(t, c.Load(dir), "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into map[interface {}]interface {}")
// simple multi config merge // simple multi config merge
@@ -29,8 +28,8 @@ func TestConfig_Load(t *testing.T) {
assert.Nil(t, err) assert.Nil(t, err)
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644) os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
ioutil.WriteFile(filepath.Join(dir, "02.yml"), []byte("outer:\n inner: override\nnew: hi"), 0644) os.WriteFile(filepath.Join(dir, "02.yml"), []byte("outer:\n inner: override\nnew: hi"), 0644)
assert.Nil(t, c.Load(dir)) assert.Nil(t, c.Load(dir))
expected := map[interface{}]interface{}{ expected := map[interface{}]interface{}{
"outer": map[interface{}]interface{}{ "outer": map[interface{}]interface{}{
@@ -120,9 +119,9 @@ func TestConfig_HasChanged(t *testing.T) {
func TestConfig_ReloadConfig(t *testing.T) { func TestConfig_ReloadConfig(t *testing.T) {
l := test.NewLogger() l := test.NewLogger()
done := make(chan bool, 1) done := make(chan bool, 1)
dir, err := ioutil.TempDir("", "config-test") dir, err := os.MkdirTemp("", "config-test")
assert.Nil(t, err) assert.Nil(t, err)
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644) os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
c := NewC(l) c := NewC(l)
assert.Nil(t, c.Load(dir)) assert.Nil(t, c.Load(dir))
@@ -131,7 +130,7 @@ func TestConfig_ReloadConfig(t *testing.T) {
assert.False(t, c.HasChanged("outer")) assert.False(t, c.HasChanged("outer"))
assert.False(t, c.HasChanged("")) assert.False(t, c.HasChanged(""))
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: ho"), 0644) os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: ho"), 0644)
c.RegisterReloadCallback(func(c *C) { c.RegisterReloadCallback(func(c *C) {
done <- true done <- true

View File

@@ -3,15 +3,18 @@ package nebula
import ( import (
"bytes" "bytes"
"context" "context"
"encoding/binary"
"fmt"
"net/netip"
"sync" "sync"
"sync/atomic"
"time" "time"
"github.com/rcrowley/go-metrics" "github.com/rcrowley/go-metrics"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/header" "github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp"
) )
type trafficDecision int type trafficDecision int
@@ -23,133 +26,128 @@ const (
swapPrimary trafficDecision = 3 swapPrimary trafficDecision = 3
migrateRelays trafficDecision = 4 migrateRelays trafficDecision = 4
tryRehandshake trafficDecision = 5 tryRehandshake trafficDecision = 5
sendTestPacket trafficDecision = 6
) )
type connectionManager struct { type connectionManager struct {
in map[uint32]struct{}
inLock *sync.RWMutex
out map[uint32]struct{}
outLock *sync.RWMutex
// relayUsed holds which relay localIndexs are in use // relayUsed holds which relay localIndexs are in use
relayUsed map[uint32]struct{} relayUsed map[uint32]struct{}
relayUsedLock *sync.RWMutex relayUsedLock *sync.RWMutex
hostMap *HostMap hostMap *HostMap
trafficTimer *LockingTimerWheel[uint32] trafficTimer *LockingTimerWheel[uint32]
intf *Interface intf *Interface
pendingDeletion map[uint32]struct{} punchy *Punchy
punchy *Punchy
// Configuration settings
checkInterval time.Duration checkInterval time.Duration
pendingDeletionInterval time.Duration pendingDeletionInterval time.Duration
metricsTxPunchy metrics.Counter inactivityTimeout atomic.Int64
dropInactive atomic.Bool
metricsTxPunchy metrics.Counter
l *logrus.Logger l *logrus.Logger
} }
func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval time.Duration, punchy *Punchy) *connectionManager { func newConnectionManagerFromConfig(l *logrus.Logger, c *config.C, hm *HostMap, p *Punchy) *connectionManager {
var max time.Duration cm := &connectionManager{
if checkInterval < pendingDeletionInterval { hostMap: hm,
max = pendingDeletionInterval l: l,
} else { punchy: p,
max = checkInterval relayUsed: make(map[uint32]struct{}),
relayUsedLock: &sync.RWMutex{},
metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
} }
nc := &connectionManager{ cm.reload(c, true)
hostMap: intf.hostMap, c.RegisterReloadCallback(func(c *config.C) {
in: make(map[uint32]struct{}), cm.reload(c, false)
inLock: &sync.RWMutex{}, })
out: make(map[uint32]struct{}),
outLock: &sync.RWMutex{},
relayUsed: make(map[uint32]struct{}),
relayUsedLock: &sync.RWMutex{},
trafficTimer: NewLockingTimerWheel[uint32](time.Millisecond*500, max),
intf: intf,
pendingDeletion: make(map[uint32]struct{}),
checkInterval: checkInterval,
pendingDeletionInterval: pendingDeletionInterval,
punchy: punchy,
metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
l: l,
}
nc.Start(ctx) return cm
return nc
} }
func (n *connectionManager) In(localIndex uint32) { func (cm *connectionManager) reload(c *config.C, initial bool) {
n.inLock.RLock() if initial {
// If this already exists, return cm.checkInterval = time.Duration(c.GetInt("timers.connection_alive_interval", 5)) * time.Second
if _, ok := n.in[localIndex]; ok { cm.pendingDeletionInterval = time.Duration(c.GetInt("timers.pending_deletion_interval", 10)) * time.Second
n.inLock.RUnlock()
return // We want at least a minimum resolution of 500ms per tick so that we can hit these intervals
// pretty close to their configured duration.
// The inactivity duration is checked each time a hostinfo ticks through so we don't need the wheel to contain it.
minDuration := min(time.Millisecond*500, cm.checkInterval, cm.pendingDeletionInterval)
maxDuration := max(cm.checkInterval, cm.pendingDeletionInterval)
cm.trafficTimer = NewLockingTimerWheel[uint32](minDuration, maxDuration)
}
if initial || c.HasChanged("tunnels.inactivity_timeout") {
old := cm.getInactivityTimeout()
cm.inactivityTimeout.Store((int64)(c.GetDuration("tunnels.inactivity_timeout", 10*time.Minute)))
if !initial {
cm.l.WithField("oldDuration", old).
WithField("newDuration", cm.getInactivityTimeout()).
Info("Inactivity timeout has changed")
}
}
if initial || c.HasChanged("tunnels.drop_inactive") {
old := cm.dropInactive.Load()
cm.dropInactive.Store(c.GetBool("tunnels.drop_inactive", false))
if !initial {
cm.l.WithField("oldBool", old).
WithField("newBool", cm.dropInactive.Load()).
Info("Drop inactive setting has changed")
}
} }
n.inLock.RUnlock()
n.inLock.Lock()
n.in[localIndex] = struct{}{}
n.inLock.Unlock()
} }
func (n *connectionManager) Out(localIndex uint32) { func (cm *connectionManager) getInactivityTimeout() time.Duration {
n.outLock.RLock() return (time.Duration)(cm.inactivityTimeout.Load())
// If this already exists, return
if _, ok := n.out[localIndex]; ok {
n.outLock.RUnlock()
return
}
n.outLock.RUnlock()
n.outLock.Lock()
n.out[localIndex] = struct{}{}
n.outLock.Unlock()
} }
func (n *connectionManager) RelayUsed(localIndex uint32) { func (cm *connectionManager) In(h *HostInfo) {
n.relayUsedLock.RLock() h.in.Store(true)
}
func (cm *connectionManager) Out(h *HostInfo) {
h.out.Store(true)
}
func (cm *connectionManager) RelayUsed(localIndex uint32) {
cm.relayUsedLock.RLock()
// If this already exists, return // If this already exists, return
if _, ok := n.relayUsed[localIndex]; ok { if _, ok := cm.relayUsed[localIndex]; ok {
n.relayUsedLock.RUnlock() cm.relayUsedLock.RUnlock()
return return
} }
n.relayUsedLock.RUnlock() cm.relayUsedLock.RUnlock()
n.relayUsedLock.Lock() cm.relayUsedLock.Lock()
n.relayUsed[localIndex] = struct{}{} cm.relayUsed[localIndex] = struct{}{}
n.relayUsedLock.Unlock() cm.relayUsedLock.Unlock()
} }
// getAndResetTrafficCheck returns if there was any inbound or outbound traffic within the last tick and // getAndResetTrafficCheck returns if there was any inbound or outbound traffic within the last tick and
// resets the state for this local index // resets the state for this local index
func (n *connectionManager) getAndResetTrafficCheck(localIndex uint32) (bool, bool) { func (cm *connectionManager) getAndResetTrafficCheck(h *HostInfo, now time.Time) (bool, bool) {
n.inLock.Lock() in := h.in.Swap(false)
n.outLock.Lock() out := h.out.Swap(false)
_, in := n.in[localIndex] if in || out {
_, out := n.out[localIndex] h.lastUsed = now
delete(n.in, localIndex) }
delete(n.out, localIndex)
n.inLock.Unlock()
n.outLock.Unlock()
return in, out return in, out
} }
func (n *connectionManager) AddTrafficWatch(localIndex uint32) { // AddTrafficWatch must be called for every new HostInfo.
// Use a write lock directly because it should be incredibly rare that we are ever already tracking this index // We will continue to monitor the HostInfo until the tunnel is dropped.
n.outLock.Lock() func (cm *connectionManager) AddTrafficWatch(h *HostInfo) {
if _, ok := n.out[localIndex]; ok { if h.out.Swap(true) == false {
n.outLock.Unlock() cm.trafficTimer.Add(h.localIndexId, cm.checkInterval)
return
} }
n.out[localIndex] = struct{}{}
n.trafficTimer.Add(localIndex, n.checkInterval)
n.outLock.Unlock()
} }
func (n *connectionManager) Start(ctx context.Context) { func (cm *connectionManager) Start(ctx context.Context) {
go n.Run(ctx) clockSource := time.NewTicker(cm.trafficTimer.t.tickDuration)
}
func (n *connectionManager) Run(ctx context.Context) {
//TODO: this tick should be based on the min wheel tick? Check firewall
clockSource := time.NewTicker(500 * time.Millisecond)
defer clockSource.Stop() defer clockSource.Stop()
p := []byte("") p := []byte("")
@@ -162,125 +160,137 @@ func (n *connectionManager) Run(ctx context.Context) {
return return
case now := <-clockSource.C: case now := <-clockSource.C:
n.trafficTimer.Advance(now) cm.trafficTimer.Advance(now)
for { for {
localIndex, has := n.trafficTimer.Purge() localIndex, has := cm.trafficTimer.Purge()
if !has { if !has {
break break
} }
n.doTrafficCheck(localIndex, p, nb, out, now) cm.doTrafficCheck(localIndex, p, nb, out, now)
} }
} }
} }
} }
func (n *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) { func (cm *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
decision, hostinfo, primary := n.makeTrafficDecision(localIndex, p, nb, out, now) decision, hostinfo, primary := cm.makeTrafficDecision(localIndex, now)
switch decision { switch decision {
case deleteTunnel: case deleteTunnel:
if n.hostMap.DeleteHostInfo(hostinfo) { if cm.hostMap.DeleteHostInfo(hostinfo) {
// Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap // Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap
n.intf.lightHouse.DeleteVpnIp(hostinfo.vpnIp) cm.intf.lightHouse.DeleteVpnIp(hostinfo.vpnIp)
} }
case closeTunnel: case closeTunnel:
n.intf.sendCloseTunnel(hostinfo) cm.intf.sendCloseTunnel(hostinfo)
n.intf.closeTunnel(hostinfo) cm.intf.closeTunnel(hostinfo)
case swapPrimary: case swapPrimary:
n.swapPrimary(hostinfo, primary) cm.swapPrimary(hostinfo, primary)
case migrateRelays: case migrateRelays:
n.migrateRelayUsed(hostinfo, primary) cm.migrateRelayUsed(hostinfo, primary)
case tryRehandshake: case tryRehandshake:
n.tryRehandshake(hostinfo) cm.tryRehandshake(hostinfo)
case sendTestPacket:
cm.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
} }
n.resetRelayTrafficCheck(hostinfo) cm.resetRelayTrafficCheck(hostinfo)
} }
func (n *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) { func (cm *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) {
if hostinfo != nil { if hostinfo != nil {
n.relayUsedLock.Lock() cm.relayUsedLock.Lock()
defer n.relayUsedLock.Unlock() defer cm.relayUsedLock.Unlock()
// No need to migrate any relays, delete usage info now. // No need to migrate any relays, delete usage info now.
for _, idx := range hostinfo.relayState.CopyRelayForIdxs() { for _, idx := range hostinfo.relayState.CopyRelayForIdxs() {
delete(n.relayUsed, idx) delete(cm.relayUsed, idx)
} }
} }
} }
func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) { func (cm *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) {
relayFor := oldhostinfo.relayState.CopyAllRelayFor() relayFor := oldhostinfo.relayState.CopyAllRelayFor()
for _, r := range relayFor { for _, r := range relayFor {
existing, ok := newhostinfo.relayState.QueryRelayForByIp(r.PeerIp) existing, ok := newhostinfo.relayState.QueryRelayForByIp(r.PeerIp)
var index uint32 var index uint32
var relayFrom iputil.VpnIp var relayFrom netip.Addr
var relayTo iputil.VpnIp var relayTo netip.Addr
switch { switch {
case ok && existing.State == Established: case ok:
// This relay already exists in newhostinfo, then do nothing. switch existing.State {
continue case Established, PeerRequested, Disestablished:
case ok && existing.State == Requested: // This relay already exists in newhostinfo, then do nothing.
// The relay exists in a Requested state; re-send the request continue
index = existing.LocalIndex case Requested:
switch r.Type { // The relayed connection exists in a Requested state; re-send the request
case TerminalType: index = existing.LocalIndex
relayFrom = newhostinfo.vpnIp switch r.Type {
relayTo = existing.PeerIp case TerminalType:
case ForwardingType: relayFrom = cm.intf.myVpnNet.Addr()
relayFrom = existing.PeerIp relayTo = existing.PeerIp
relayTo = newhostinfo.vpnIp case ForwardingType:
default: relayFrom = existing.PeerIp
// should never happen relayTo = newhostinfo.vpnIp
default:
// should never happen
panic(fmt.Sprintf("Migrating unknown relay type: %v", r.Type))
}
} }
case !ok: case !ok:
n.relayUsedLock.RLock() cm.relayUsedLock.RLock()
if _, relayUsed := n.relayUsed[r.LocalIndex]; !relayUsed { if _, relayUsed := cm.relayUsed[r.LocalIndex]; !relayUsed {
// The relay hasn't been used; don't migrate it. // The relay hasn't been used; don't migrate it.
n.relayUsedLock.RUnlock() cm.relayUsedLock.RUnlock()
continue continue
} }
n.relayUsedLock.RUnlock() cm.relayUsedLock.RUnlock()
// The relay doesn't exist at all; create some relay state and send the request. // The relay doesn't exist at all; create some relay state and send the request.
var err error var err error
index, err = AddRelay(n.l, newhostinfo, n.hostMap, r.PeerIp, nil, r.Type, Requested) index, err = AddRelay(cm.l, newhostinfo, cm.hostMap, r.PeerIp, nil, r.Type, Requested)
if err != nil { if err != nil {
n.l.WithError(err).Error("failed to migrate relay to new hostinfo") cm.l.WithError(err).Error("failed to migrate relay to new hostinfo")
continue continue
} }
switch r.Type { switch r.Type {
case TerminalType: case TerminalType:
relayFrom = newhostinfo.vpnIp relayFrom = cm.intf.myVpnNet.Addr()
relayTo = r.PeerIp relayTo = r.PeerIp
case ForwardingType: case ForwardingType:
relayFrom = r.PeerIp relayFrom = r.PeerIp
relayTo = newhostinfo.vpnIp relayTo = newhostinfo.vpnIp
default: default:
// should never happen // should never happen
panic(fmt.Sprintf("Migrating unknown relay type: %v", r.Type))
} }
} }
//TODO: IPV6-WORK
relayFromB := relayFrom.As4()
relayToB := relayTo.As4()
// Send a CreateRelayRequest to the peer. // Send a CreateRelayRequest to the peer.
req := NebulaControl{ req := NebulaControl{
Type: NebulaControl_CreateRelayRequest, Type: NebulaControl_CreateRelayRequest,
InitiatorRelayIndex: index, InitiatorRelayIndex: index,
RelayFromIp: uint32(relayFrom), RelayFromIp: binary.BigEndian.Uint32(relayFromB[:]),
RelayToIp: uint32(relayTo), RelayToIp: binary.BigEndian.Uint32(relayToB[:]),
} }
msg, err := req.Marshal() msg, err := req.Marshal()
if err != nil { if err != nil {
n.l.WithError(err).Error("failed to marshal Control message to migrate relay") cm.l.WithError(err).Error("failed to marshal Control message to migrate relay")
} else { } else {
n.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu)) cm.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
n.l.WithFields(logrus.Fields{ cm.l.WithFields(logrus.Fields{
"relayFrom": iputil.VpnIp(req.RelayFromIp), "relayFrom": req.RelayFromIp,
"relayTo": iputil.VpnIp(req.RelayToIp), "relayTo": req.RelayToIp,
"initiatorRelayIndex": req.InitiatorRelayIndex, "initiatorRelayIndex": req.InitiatorRelayIndex,
"responderRelayIndex": req.ResponderRelayIndex, "responderRelayIndex": req.ResponderRelayIndex,
"vpnIp": newhostinfo.vpnIp}). "vpnIp": newhostinfo.vpnIp}).
@@ -289,46 +299,45 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
} }
} }
func (n *connectionManager) makeTrafficDecision(localIndex uint32, p, nb, out []byte, now time.Time) (trafficDecision, *HostInfo, *HostInfo) { func (cm *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
n.hostMap.RLock() // Read lock the main hostmap to order decisions based on tunnels being the primary tunnel
defer n.hostMap.RUnlock() cm.hostMap.RLock()
defer cm.hostMap.RUnlock()
hostinfo := n.hostMap.Indexes[localIndex] hostinfo := cm.hostMap.Indexes[localIndex]
if hostinfo == nil { if hostinfo == nil {
n.l.WithField("localIndex", localIndex).Debugf("Not found in hostmap") cm.l.WithField("localIndex", localIndex).Debugln("Not found in hostmap")
delete(n.pendingDeletion, localIndex)
return doNothing, nil, nil return doNothing, nil, nil
} }
if n.isInvalidCertificate(now, hostinfo) { if cm.isInvalidCertificate(now, hostinfo) {
delete(n.pendingDeletion, hostinfo.localIndexId)
return closeTunnel, hostinfo, nil return closeTunnel, hostinfo, nil
} }
primary := n.hostMap.Hosts[hostinfo.vpnIp] primary := cm.hostMap.Hosts[hostinfo.vpnIp]
mainHostInfo := true mainHostInfo := true
if primary != nil && primary != hostinfo { if primary != nil && primary != hostinfo {
mainHostInfo = false mainHostInfo = false
} }
// Check for traffic on this hostinfo // Check for traffic on this hostinfo
inTraffic, outTraffic := n.getAndResetTrafficCheck(localIndex) inTraffic, outTraffic := cm.getAndResetTrafficCheck(hostinfo, now)
// A hostinfo is determined alive if there is incoming traffic // A hostinfo is determined alive if there is incoming traffic
if inTraffic { if inTraffic {
decision := doNothing decision := doNothing
if n.l.Level >= logrus.DebugLevel { if cm.l.Level >= logrus.DebugLevel {
hostinfo.logger(n.l). hostinfo.logger(cm.l).
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}). WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
Debug("Tunnel status") Debug("Tunnel status")
} }
delete(n.pendingDeletion, hostinfo.localIndexId) hostinfo.pendingDeletion.Store(false)
if mainHostInfo { if mainHostInfo {
decision = tryRehandshake decision = tryRehandshake
} else { } else {
if n.shouldSwapPrimary(hostinfo, primary) { if cm.shouldSwapPrimary(hostinfo, primary) {
decision = swapPrimary decision = swapPrimary
} else { } else {
// migrate the relays to the primary, if in use. // migrate the relays to the primary, if in use.
@@ -336,155 +345,175 @@ func (n *connectionManager) makeTrafficDecision(localIndex uint32, p, nb, out []
} }
} }
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval) cm.trafficTimer.Add(hostinfo.localIndexId, cm.checkInterval)
if !outTraffic { if !outTraffic {
// Send a punch packet to keep the NAT state alive // Send a punch packet to keep the NAT state alive
n.sendPunch(hostinfo) cm.sendPunch(hostinfo)
} }
return decision, hostinfo, primary return decision, hostinfo, primary
} }
if _, ok := n.pendingDeletion[hostinfo.localIndexId]; ok { if hostinfo.pendingDeletion.Load() {
// We have already sent a test packet and nothing was returned, this hostinfo is dead // We have already sent a test packet and nothing was returned, this hostinfo is dead
hostinfo.logger(n.l). hostinfo.logger(cm.l).
WithField("tunnelCheck", m{"state": "dead", "method": "active"}). WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
Info("Tunnel status") Info("Tunnel status")
delete(n.pendingDeletion, hostinfo.localIndexId)
return deleteTunnel, hostinfo, nil return deleteTunnel, hostinfo, nil
} }
decision := doNothing
if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo { if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo {
if !outTraffic { if !outTraffic {
inactiveFor, isInactive := cm.isInactive(hostinfo, now)
if isInactive {
// Tunnel is inactive, tear it down
hostinfo.logger(cm.l).
WithField("inactiveDuration", inactiveFor).
WithField("primary", mainHostInfo).
Info("Dropping tunnel due to inactivity")
return closeTunnel, hostinfo, primary
}
// If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel. // If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel.
// Just maintain NAT state if configured to do so. // Just maintain NAT state if configured to do so.
n.sendPunch(hostinfo) cm.sendPunch(hostinfo)
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval) cm.trafficTimer.Add(hostinfo.localIndexId, cm.checkInterval)
return doNothing, nil, nil return doNothing, nil, nil
} }
if n.punchy.GetTargetEverything() { if cm.punchy.GetTargetEverything() {
// This is similar to the old punchy behavior with a slight optimization. // This is similar to the old punchy behavior with a slight optimization.
// We aren't receiving traffic but we are sending it, punch on all known // We aren't receiving traffic but we are sending it, punch on all known
// ips in case we need to re-prime NAT state // ips in case we need to re-prime NAT state
n.sendPunch(hostinfo) cm.sendPunch(hostinfo)
} }
if n.l.Level >= logrus.DebugLevel { if cm.l.Level >= logrus.DebugLevel {
hostinfo.logger(n.l). hostinfo.logger(cm.l).
WithField("tunnelCheck", m{"state": "testing", "method": "active"}). WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
Debug("Tunnel status") Debug("Tunnel status")
} }
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues // Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
n.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out) decision = sendTestPacket
} else { } else {
if n.l.Level >= logrus.DebugLevel { if cm.l.Level >= logrus.DebugLevel {
hostinfo.logger(n.l).Debugf("Hostinfo sadness") hostinfo.logger(cm.l).Debugf("Hostinfo sadness")
} }
} }
n.pendingDeletion[hostinfo.localIndexId] = struct{}{} hostinfo.pendingDeletion.Store(true)
n.trafficTimer.Add(hostinfo.localIndexId, n.pendingDeletionInterval) cm.trafficTimer.Add(hostinfo.localIndexId, cm.pendingDeletionInterval)
return doNothing, nil, nil return decision, hostinfo, nil
} }
func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool { func (cm *connectionManager) isInactive(hostinfo *HostInfo, now time.Time) (time.Duration, bool) {
if cm.dropInactive.Load() == false {
// We aren't configured to drop inactive tunnels
return 0, false
}
inactiveDuration := now.Sub(hostinfo.lastUsed)
if inactiveDuration < cm.getInactivityTimeout() {
// It's not considered inactive
return inactiveDuration, false
}
// The tunnel is inactive
return inactiveDuration, true
}
func (cm *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
// The primary tunnel is the most recent handshake to complete locally and should work entirely fine. // The primary tunnel is the most recent handshake to complete locally and should work entirely fine.
// If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary. // If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary.
// Let's sort this out. // Let's sort this out.
if current.vpnIp < n.intf.myVpnIp { if current.vpnIp.Compare(cm.intf.myVpnNet.Addr()) < 0 {
// Only one side should flip primary because if both flip then we may never resolve to a single tunnel. // Only one side should flip primary because if both flip then we may never resolve to a single tunnel.
// vpn ip is static across all tunnels for this host pair so lets use that to determine who is flipping. // vpn ip is static across all tunnels for this host pair so lets use that to determine who is flipping.
// The remotes vpn ip is lower than mine. I will not flip. // The remotes vpn ip is lower than mine. I will not flip.
return false return false
} }
certState := n.intf.certState.Load() certState := cm.intf.pki.GetCertState()
return bytes.Equal(current.ConnectionState.certState.certificate.Signature, certState.certificate.Signature) return bytes.Equal(current.ConnectionState.myCert.Signature, certState.Certificate.Signature)
} }
func (n *connectionManager) swapPrimary(current, primary *HostInfo) { func (cm *connectionManager) swapPrimary(current, primary *HostInfo) {
n.hostMap.Lock() cm.hostMap.Lock()
// Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake. // Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake.
if n.hostMap.Hosts[current.vpnIp] == primary { if cm.hostMap.Hosts[current.vpnIp] == primary {
n.hostMap.unlockedMakePrimary(current) cm.hostMap.unlockedMakePrimary(current)
} }
n.hostMap.Unlock() cm.hostMap.Unlock()
} }
// isInvalidCertificate will check if we should destroy a tunnel if pki.disconnect_invalid is true and // isInvalidCertificate will check if we should destroy a tunnel if pki.disconnect_invalid is true and
// the certificate is no longer valid. Block listed certificates will skip the pki.disconnect_invalid // the certificate is no longer valid. Block listed certificates will skip the pki.disconnect_invalid
// check and return true. // check and return true.
func (n *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool { func (cm *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool {
remoteCert := hostinfo.GetCert() remoteCert := hostinfo.GetCert()
if remoteCert == nil { if remoteCert == nil {
return false return false
} }
valid, err := remoteCert.VerifyWithCache(now, n.intf.caPool) valid, err := remoteCert.VerifyWithCache(now, cm.intf.pki.GetCAPool())
if valid { if valid {
return false return false
} }
if !n.intf.disconnectInvalid && err != cert.ErrBlockListed { if !cm.intf.disconnectInvalid.Load() && err != cert.ErrBlockListed {
// Block listed certificates should always be disconnected // Block listed certificates should always be disconnected
return false return false
} }
fingerprint, _ := remoteCert.Sha256Sum() fingerprint, _ := remoteCert.Sha256Sum()
hostinfo.logger(n.l).WithError(err). hostinfo.logger(cm.l).WithError(err).
WithField("fingerprint", fingerprint). WithField("fingerprint", fingerprint).
Info("Remote certificate is no longer valid, tearing down the tunnel") Info("Remote certificate is no longer valid, tearing down the tunnel")
return true return true
} }
func (n *connectionManager) sendPunch(hostinfo *HostInfo) { func (cm *connectionManager) sendPunch(hostinfo *HostInfo) {
if !n.punchy.GetPunch() { if !cm.punchy.GetPunch() {
// Punching is disabled // Punching is disabled
return return
} }
if n.punchy.GetTargetEverything() { if cm.intf.lightHouse.IsLighthouseIP(hostinfo.vpnIp) {
hostinfo.remotes.ForEach(n.hostMap.preferredRanges, func(addr *udp.Addr, preferred bool) { // Do not punch to lighthouses, we assume our lighthouse update interval is good enough.
n.metricsTxPunchy.Inc(1) // In the event the update interval is not sufficient to maintain NAT state then a publicly available lighthouse
n.intf.outside.WriteTo([]byte{1}, addr) // would lose the ability to notify us and punchy.respond would become unreliable.
})
} else if hostinfo.remote != nil {
n.metricsTxPunchy.Inc(1)
n.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
}
}
func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) {
certState := n.intf.certState.Load()
if bytes.Equal(hostinfo.ConnectionState.certState.certificate.Signature, certState.certificate.Signature) {
return return
} }
n.l.WithField("vpnIp", hostinfo.vpnIp). if cm.punchy.GetTargetEverything() {
hostinfo.remotes.ForEach(cm.hostMap.GetPreferredRanges(), func(addr netip.AddrPort, preferred bool) {
cm.metricsTxPunchy.Inc(1)
cm.intf.outside.WriteTo([]byte{1}, addr)
})
} else if hostinfo.remote.IsValid() {
cm.metricsTxPunchy.Inc(1)
cm.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
}
}
func (cm *connectionManager) tryRehandshake(hostinfo *HostInfo) {
certState := cm.intf.pki.GetCertState()
if bytes.Equal(hostinfo.ConnectionState.myCert.Signature, certState.Certificate.Signature) {
return
}
cm.l.WithField("vpnIp", hostinfo.vpnIp).
WithField("reason", "local certificate is not current"). WithField("reason", "local certificate is not current").
Info("Re-handshaking with remote") Info("Re-handshaking with remote")
//TODO: this is copied from getOrHandshake to keep the extra checks out of the hot path, figure it out cm.intf.handshakeManager.StartHandshake(hostinfo.vpnIp, nil)
newHostinfo := n.intf.handshakeManager.AddVpnIp(hostinfo.vpnIp, n.intf.initHostInfo)
if !newHostinfo.HandshakeReady {
ixHandshakeStage0(n.intf, newHostinfo.vpnIp, newHostinfo)
}
//If this is a static host, we don't need to wait for the HostQueryReply
//We can trigger the handshake right now
if _, ok := n.intf.lightHouse.GetStaticHostList()[hostinfo.vpnIp]; ok {
select {
case n.intf.handshakeManager.trigger <- hostinfo.vpnIp:
default:
}
}
} }

View File

@@ -1,31 +1,29 @@
package nebula package nebula
import ( import (
"context"
"crypto/ed25519" "crypto/ed25519"
"crypto/rand" "crypto/rand"
"net" "net"
"net/netip"
"testing" "testing"
"time" "time"
"github.com/flynn/noise" "github.com/flynn/noise"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config" "github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/test" "github.com/slackhq/nebula/test"
"github.com/slackhq/nebula/udp" "github.com/slackhq/nebula/udp"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
var vpnIp iputil.VpnIp
func newTestLighthouse() *LightHouse { func newTestLighthouse() *LightHouse {
lh := &LightHouse{ lh := &LightHouse{
l: test.NewLogger(), l: test.NewLogger(),
addrMap: map[iputil.VpnIp]*RemoteList{}, addrMap: map[netip.Addr]*RemoteList{},
queryChan: make(chan netip.Addr, 10),
} }
lighthouses := map[iputil.VpnIp]struct{}{} lighthouses := map[netip.Addr]struct{}{}
staticList := map[iputil.VpnIp]struct{}{} staticList := map[netip.Addr]struct{}{}
lh.lighthouses.Store(&lighthouses) lh.lighthouses.Store(&lighthouses)
lh.staticList.Store(&staticList) lh.staticList.Store(&staticList)
@@ -36,37 +34,40 @@ func newTestLighthouse() *LightHouse {
func Test_NewConnectionManagerTest(t *testing.T) { func Test_NewConnectionManagerTest(t *testing.T) {
l := test.NewLogger() l := test.NewLogger()
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24") //_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24") vpncidr := netip.MustParsePrefix("172.1.1.1/24")
_, localrange, _ := net.ParseCIDR("10.1.1.1/24") localrange := netip.MustParsePrefix("10.1.1.1/24")
vpnIp = iputil.Ip2VpnIp(net.ParseIP("172.1.1.2")) vpnIp := netip.MustParseAddr("172.1.1.2")
preferredRanges := []*net.IPNet{localrange} preferredRanges := []netip.Prefix{localrange}
// Very incomplete mock objects // Very incomplete mock objects
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges) hostMap := newHostMap(l, vpncidr)
hostMap.preferredRanges.Store(&preferredRanges)
cs := &CertState{ cs := &CertState{
rawCertificate: []byte{}, RawCertificate: []byte{},
privateKey: []byte{}, PrivateKey: []byte{},
certificate: &cert.NebulaCertificate{}, Certificate: &cert.NebulaCertificate{},
rawCertificateNoKey: []byte{}, RawCertificateNoKey: []byte{},
} }
lh := newTestLighthouse() lh := newTestLighthouse()
ifce := &Interface{ ifce := &Interface{
hostMap: hostMap, hostMap: hostMap,
inside: &test.NoopTun{}, inside: &test.NoopTun{},
outside: &udp.Conn{}, outside: &udp.NoopConn{},
firewall: &Firewall{}, firewall: &Firewall{},
lightHouse: lh, lightHouse: lh,
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig), pki: &PKI{},
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
l: l, l: l,
} }
ifce.certState.Store(cs) ifce.pki.cs.Store(cs)
// Create manager // Create manager
ctx, cancel := context.WithCancel(context.Background()) conf := config.NewC(l)
defer cancel() punchy := NewPunchyFromConfig(l, conf)
punchy := NewPunchyFromConfig(l, config.NewC(l)) nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy) nc.intf = ifce
p := []byte("") p := []byte("")
nb := make([]byte, 12, 12) nb := make([]byte, 12, 12)
out := make([]byte, mtu) out := make([]byte, mtu)
@@ -78,37 +79,38 @@ func Test_NewConnectionManagerTest(t *testing.T) {
remoteIndexId: 9901, remoteIndexId: 9901,
} }
hostinfo.ConnectionState = &ConnectionState{ hostinfo.ConnectionState = &ConnectionState{
certState: cs, myCert: &cert.NebulaCertificate{},
H: &noise.HandshakeState{}, H: &noise.HandshakeState{},
} }
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce) nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
// We saw traffic out to vpnIp // We saw traffic out to vpnIp
nc.Out(hostinfo.localIndexId) nc.Out(hostinfo)
nc.In(hostinfo.localIndexId) nc.In(hostinfo)
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId) assert.False(t, hostinfo.pendingDeletion.Load())
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp) assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId) assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.Contains(t, nc.out, hostinfo.localIndexId) assert.True(t, hostinfo.out.Load())
assert.True(t, hostinfo.in.Load())
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded // Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now()) nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId) assert.False(t, hostinfo.pendingDeletion.Load())
assert.NotContains(t, nc.out, hostinfo.localIndexId) assert.False(t, hostinfo.out.Load())
assert.NotContains(t, nc.in, hostinfo.localIndexId) assert.False(t, hostinfo.in.Load())
// Do another traffic check tick, this host should be pending deletion now // Do another traffic check tick, this host should be pending deletion now
nc.Out(hostinfo.localIndexId) nc.Out(hostinfo)
assert.True(t, hostinfo.out.Load())
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now()) nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId) assert.True(t, hostinfo.pendingDeletion.Load())
assert.NotContains(t, nc.out, hostinfo.localIndexId) assert.False(t, hostinfo.out.Load())
assert.NotContains(t, nc.in, hostinfo.localIndexId) assert.False(t, hostinfo.in.Load())
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId) assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp) assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
// Do a final traffic check tick, the host should now be removed // Do a final traffic check tick, the host should now be removed
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now()) nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnIp) assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId) assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
} }
@@ -116,36 +118,40 @@ func Test_NewConnectionManagerTest(t *testing.T) {
func Test_NewConnectionManagerTest2(t *testing.T) { func Test_NewConnectionManagerTest2(t *testing.T) {
l := test.NewLogger() l := test.NewLogger()
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24") //_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24") vpncidr := netip.MustParsePrefix("172.1.1.1/24")
_, localrange, _ := net.ParseCIDR("10.1.1.1/24") localrange := netip.MustParsePrefix("10.1.1.1/24")
preferredRanges := []*net.IPNet{localrange} vpnIp := netip.MustParseAddr("172.1.1.2")
preferredRanges := []netip.Prefix{localrange}
// Very incomplete mock objects // Very incomplete mock objects
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges) hostMap := newHostMap(l, vpncidr)
hostMap.preferredRanges.Store(&preferredRanges)
cs := &CertState{ cs := &CertState{
rawCertificate: []byte{}, RawCertificate: []byte{},
privateKey: []byte{}, PrivateKey: []byte{},
certificate: &cert.NebulaCertificate{}, Certificate: &cert.NebulaCertificate{},
rawCertificateNoKey: []byte{}, RawCertificateNoKey: []byte{},
} }
lh := newTestLighthouse() lh := newTestLighthouse()
ifce := &Interface{ ifce := &Interface{
hostMap: hostMap, hostMap: hostMap,
inside: &test.NoopTun{}, inside: &test.NoopTun{},
outside: &udp.Conn{}, outside: &udp.NoopConn{},
firewall: &Firewall{}, firewall: &Firewall{},
lightHouse: lh, lightHouse: lh,
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig), pki: &PKI{},
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
l: l, l: l,
} }
ifce.certState.Store(cs) ifce.pki.cs.Store(cs)
// Create manager // Create manager
ctx, cancel := context.WithCancel(context.Background()) conf := config.NewC(l)
defer cancel() punchy := NewPunchyFromConfig(l, conf)
punchy := NewPunchyFromConfig(l, config.NewC(l)) nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy) nc.intf = ifce
p := []byte("") p := []byte("")
nb := make([]byte, 12, 12) nb := make([]byte, 12, 12)
out := make([]byte, mtu) out := make([]byte, mtu)
@@ -157,39 +163,136 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
remoteIndexId: 9901, remoteIndexId: 9901,
} }
hostinfo.ConnectionState = &ConnectionState{ hostinfo.ConnectionState = &ConnectionState{
certState: cs, myCert: &cert.NebulaCertificate{},
H: &noise.HandshakeState{}, H: &noise.HandshakeState{},
} }
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce) nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
// We saw traffic out to vpnIp // We saw traffic out to vpnIp
nc.Out(hostinfo.localIndexId) nc.Out(hostinfo)
nc.In(hostinfo.localIndexId) nc.In(hostinfo)
assert.NotContains(t, nc.pendingDeletion, hostinfo.vpnIp) assert.True(t, hostinfo.in.Load())
assert.True(t, hostinfo.out.Load())
assert.False(t, hostinfo.pendingDeletion.Load())
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp) assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId) assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded // Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now()) nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId) assert.False(t, hostinfo.pendingDeletion.Load())
assert.NotContains(t, nc.out, hostinfo.localIndexId) assert.False(t, hostinfo.out.Load())
assert.NotContains(t, nc.in, hostinfo.localIndexId) assert.False(t, hostinfo.in.Load())
// Do another traffic check tick, this host should be pending deletion now // Do another traffic check tick, this host should be pending deletion now
nc.Out(hostinfo.localIndexId) nc.Out(hostinfo)
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now()) nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId) assert.True(t, hostinfo.pendingDeletion.Load())
assert.NotContains(t, nc.out, hostinfo.localIndexId) assert.False(t, hostinfo.out.Load())
assert.NotContains(t, nc.in, hostinfo.localIndexId) assert.False(t, hostinfo.in.Load())
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId) assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp) assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
// We saw traffic, should no longer be pending deletion // We saw traffic, should no longer be pending deletion
nc.In(hostinfo.localIndexId) nc.In(hostinfo)
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now()) nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId) assert.False(t, hostinfo.pendingDeletion.Load())
assert.NotContains(t, nc.out, hostinfo.localIndexId) assert.False(t, hostinfo.out.Load())
assert.NotContains(t, nc.in, hostinfo.localIndexId) assert.False(t, hostinfo.in.Load())
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
}
func Test_NewConnectionManager_DisconnectInactive(t *testing.T) {
l := test.NewLogger()
vpncidr := netip.MustParsePrefix("172.1.1.1/24")
localrange := netip.MustParsePrefix("10.1.1.1/24")
vpnIp := netip.MustParseAddr("172.1.1.2")
preferredRanges := []netip.Prefix{localrange}
// Very incomplete mock objects
hostMap := newHostMap(l, vpncidr)
hostMap.preferredRanges.Store(&preferredRanges)
cs := &CertState{
RawCertificate: []byte{},
PrivateKey: []byte{},
Certificate: &cert.NebulaCertificate{},
RawCertificateNoKey: []byte{},
}
lh := newTestLighthouse()
ifce := &Interface{
hostMap: hostMap,
inside: &test.NoopTun{},
outside: &udp.NoopConn{},
firewall: &Firewall{},
lightHouse: lh,
pki: &PKI{},
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
l: l,
}
ifce.pki.cs.Store(cs)
// Create manager
conf := config.NewC(l)
conf.Settings["tunnels"] = map[interface{}]interface{}{
"drop_inactive": true,
}
punchy := NewPunchyFromConfig(l, conf)
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
assert.True(t, nc.dropInactive.Load())
nc.intf = ifce
// Add an ip we have established a connection w/ to hostmap
hostinfo := &HostInfo{
vpnIp: vpnIp,
localIndexId: 1099,
remoteIndexId: 9901,
}
hostinfo.ConnectionState = &ConnectionState{
myCert: &cert.NebulaCertificate{},
H: &noise.HandshakeState{},
}
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
// Do a traffic check tick, in and out should be cleared but should not be pending deletion
nc.Out(hostinfo)
nc.In(hostinfo)
assert.True(t, hostinfo.out.Load())
assert.True(t, hostinfo.in.Load())
now := time.Now()
decision, _, _ := nc.makeTrafficDecision(hostinfo.localIndexId, now)
assert.Equal(t, tryRehandshake, decision)
assert.Equal(t, now, hostinfo.lastUsed)
assert.False(t, hostinfo.pendingDeletion.Load())
assert.False(t, hostinfo.out.Load())
assert.False(t, hostinfo.in.Load())
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Second*5))
assert.Equal(t, doNothing, decision)
assert.Equal(t, now, hostinfo.lastUsed)
assert.False(t, hostinfo.pendingDeletion.Load())
assert.False(t, hostinfo.out.Load())
assert.False(t, hostinfo.in.Load())
// Do another traffic check tick, should still not be pending deletion
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Second*10))
assert.Equal(t, doNothing, decision)
assert.Equal(t, now, hostinfo.lastUsed)
assert.False(t, hostinfo.pendingDeletion.Load())
assert.False(t, hostinfo.out.Load())
assert.False(t, hostinfo.in.Load())
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
// Finally advance beyond the inactivity timeout
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Minute*10))
assert.Equal(t, closeTunnel, decision)
assert.Equal(t, now, hostinfo.lastUsed)
assert.False(t, hostinfo.pendingDeletion.Load())
assert.False(t, hostinfo.out.Load())
assert.False(t, hostinfo.in.Load())
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId) assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp) assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
} }
@@ -204,10 +307,12 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
IP: net.IPv4(172, 1, 1, 2), IP: net.IPv4(172, 1, 1, 2),
Mask: net.IPMask{255, 255, 255, 0}, Mask: net.IPMask{255, 255, 255, 0},
} }
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24") vpncidr := netip.MustParsePrefix("172.1.1.1/24")
_, localrange, _ := net.ParseCIDR("10.1.1.1/24") localrange := netip.MustParsePrefix("10.1.1.1/24")
preferredRanges := []*net.IPNet{localrange} vpnIp := netip.MustParseAddr("172.1.1.2")
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges) preferredRanges := []netip.Prefix{localrange}
hostMap := newHostMap(l, vpncidr)
hostMap.preferredRanges.Store(&preferredRanges)
// Generate keys for CA and peer's cert. // Generate keys for CA and peer's cert.
pubCA, privCA, _ := ed25519.GenerateKey(rand.Reader) pubCA, privCA, _ := ed25519.GenerateKey(rand.Reader)
@@ -220,7 +325,8 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
PublicKey: pubCA, PublicKey: pubCA,
}, },
} }
caCert.Sign(cert.Curve_CURVE25519, privCA)
assert.NoError(t, caCert.Sign(cert.Curve_CURVE25519, privCA))
ncp := &cert.NebulaCAPool{ ncp := &cert.NebulaCAPool{
CAs: cert.NewCAPool().CAs, CAs: cert.NewCAPool().CAs,
} }
@@ -239,41 +345,46 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
Issuer: "ca", Issuer: "ca",
}, },
} }
peerCert.Sign(cert.Curve_CURVE25519, privCA) assert.NoError(t, peerCert.Sign(cert.Curve_CURVE25519, privCA))
cs := &CertState{ cs := &CertState{
rawCertificate: []byte{}, RawCertificate: []byte{},
privateKey: []byte{}, PrivateKey: []byte{},
certificate: &cert.NebulaCertificate{}, Certificate: &cert.NebulaCertificate{},
rawCertificateNoKey: []byte{}, RawCertificateNoKey: []byte{},
} }
lh := newTestLighthouse() lh := newTestLighthouse()
ifce := &Interface{ ifce := &Interface{
hostMap: hostMap, hostMap: hostMap,
inside: &test.NoopTun{}, inside: &test.NoopTun{},
outside: &udp.Conn{}, outside: &udp.NoopConn{},
firewall: &Firewall{}, firewall: &Firewall{},
lightHouse: lh, lightHouse: lh,
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig), handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
l: l, l: l,
disconnectInvalid: true, pki: &PKI{},
caPool: ncp,
} }
ifce.certState.Store(cs) ifce.pki.cs.Store(cs)
ifce.pki.caPool.Store(ncp)
ifce.disconnectInvalid.Store(true)
// Create manager // Create manager
ctx, cancel := context.WithCancel(context.Background()) conf := config.NewC(l)
defer cancel() punchy := NewPunchyFromConfig(l, conf)
punchy := NewPunchyFromConfig(l, config.NewC(l)) nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy) nc.intf = ifce
ifce.connectionManager = nc ifce.connectionManager = nc
hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil)
hostinfo.ConnectionState = &ConnectionState{ hostinfo := &HostInfo{
certState: cs, vpnIp: vpnIp,
peerCert: &peerCert, ConnectionState: &ConnectionState{
H: &noise.HandshakeState{}, myCert: &cert.NebulaCertificate{},
peerCert: &peerCert,
H: &noise.HandshakeState{},
},
} }
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
// Move ahead 45s. // Move ahead 45s.
// Check if to disconnect with invalid certificate. // Check if to disconnect with invalid certificate.

View File

@@ -18,35 +18,34 @@ type ConnectionState struct {
eKey *NebulaCipherState eKey *NebulaCipherState
dKey *NebulaCipherState dKey *NebulaCipherState
H *noise.HandshakeState H *noise.HandshakeState
certState *CertState myCert *cert.NebulaCertificate
peerCert *cert.NebulaCertificate peerCert *cert.NebulaCertificate
initiator bool initiator bool
messageCounter atomic.Uint64 messageCounter atomic.Uint64
window *Bits window *Bits
queueLock sync.Mutex
writeLock sync.Mutex writeLock sync.Mutex
ready bool
} }
func (f *Interface) newConnectionState(l *logrus.Logger, initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState { func NewConnectionState(l *logrus.Logger, cipher string, certState *CertState, initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState {
var dhFunc noise.DHFunc var dhFunc noise.DHFunc
curCertState := f.certState.Load() switch certState.Certificate.Details.Curve {
switch curCertState.certificate.Details.Curve {
case cert.Curve_CURVE25519: case cert.Curve_CURVE25519:
dhFunc = noise.DH25519 dhFunc = noise.DH25519
case cert.Curve_P256: case cert.Curve_P256:
dhFunc = noiseutil.DHP256 dhFunc = noiseutil.DHP256
default: default:
l.Errorf("invalid curve: %s", curCertState.certificate.Details.Curve) l.Errorf("invalid curve: %s", certState.Certificate.Details.Curve)
return nil return nil
} }
cs := noise.NewCipherSuite(dhFunc, noiseutil.CipherAESGCM, noise.HashSHA256)
if f.cipher == "chachapoly" { var cs noise.CipherSuite
if cipher == "chachapoly" {
cs = noise.NewCipherSuite(dhFunc, noise.CipherChaChaPoly, noise.HashSHA256) cs = noise.NewCipherSuite(dhFunc, noise.CipherChaChaPoly, noise.HashSHA256)
} else {
cs = noise.NewCipherSuite(dhFunc, noiseutil.CipherAESGCM, noise.HashSHA256)
} }
static := noise.DHKey{Private: curCertState.privateKey, Public: curCertState.publicKey} static := noise.DHKey{Private: certState.PrivateKey, Public: certState.PublicKey}
b := NewBits(ReplayWindow) b := NewBits(ReplayWindow)
// Clear out bit 0, we never transmit it and we don't want it showing as packet loss // Clear out bit 0, we never transmit it and we don't want it showing as packet loss
@@ -71,9 +70,10 @@ func (f *Interface) newConnectionState(l *logrus.Logger, initiator bool, pattern
H: hs, H: hs,
initiator: initiator, initiator: initiator,
window: b, window: b,
ready: false, myCert: certState.Certificate,
certState: curCertState,
} }
// always start the counter from 2, as packet 1 and packet 2 are handshake packets.
ci.messageCounter.Add(2)
return ci return ci
} }
@@ -83,6 +83,5 @@ func (cs *ConnectionState) MarshalJSON() ([]byte, error) {
"certificate": cs.peerCert, "certificate": cs.peerCert,
"initiator": cs.initiator, "initiator": cs.initiator,
"message_counter": cs.messageCounter.Load(), "message_counter": cs.messageCounter.Load(),
"ready": cs.ready,
}) })
} }

View File

@@ -2,7 +2,7 @@ package nebula
import ( import (
"context" "context"
"net" "net/netip"
"os" "os"
"os/signal" "os/signal"
"syscall" "syscall"
@@ -10,33 +10,43 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/header" "github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil" "github.com/slackhq/nebula/overlay"
"github.com/slackhq/nebula/udp"
) )
// Every interaction here needs to take extra care to copy memory and not return or use arguments "as is" when touching // Every interaction here needs to take extra care to copy memory and not return or use arguments "as is" when touching
// core. This means copying IP objects, slices, de-referencing pointers and taking the actual value, etc // core. This means copying IP objects, slices, de-referencing pointers and taking the actual value, etc
type controlEach func(h *HostInfo)
type controlHostLister interface {
QueryVpnIp(vpnIp netip.Addr) *HostInfo
ForEachIndex(each controlEach)
ForEachVpnIp(each controlEach)
GetPreferredRanges() []netip.Prefix
}
type Control struct { type Control struct {
f *Interface f *Interface
l *logrus.Logger l *logrus.Logger
cancel context.CancelFunc ctx context.Context
sshStart func() cancel context.CancelFunc
statsStart func() sshStart func()
dnsStart func() statsStart func()
dnsStart func()
lighthouseStart func()
connectionManagerStart func(context.Context)
} }
type ControlHostInfo struct { type ControlHostInfo struct {
VpnIp net.IP `json:"vpnIp"` VpnIp netip.Addr `json:"vpnIp"`
LocalIndex uint32 `json:"localIndex"` LocalIndex uint32 `json:"localIndex"`
RemoteIndex uint32 `json:"remoteIndex"` RemoteIndex uint32 `json:"remoteIndex"`
RemoteAddrs []*udp.Addr `json:"remoteAddrs"` RemoteAddrs []netip.AddrPort `json:"remoteAddrs"`
CachedPackets int `json:"cachedPackets"`
Cert *cert.NebulaCertificate `json:"cert"` Cert *cert.NebulaCertificate `json:"cert"`
MessageCounter uint64 `json:"messageCounter"` MessageCounter uint64 `json:"messageCounter"`
CurrentRemote *udp.Addr `json:"currentRemote"` CurrentRemote netip.AddrPort `json:"currentRemote"`
CurrentRelaysToMe []iputil.VpnIp `json:"currentRelaysToMe"` CurrentRelaysToMe []netip.Addr `json:"currentRelaysToMe"`
CurrentRelaysThroughMe []iputil.VpnIp `json:"currentRelaysThroughMe"` CurrentRelaysThroughMe []netip.Addr `json:"currentRelaysThroughMe"`
} }
// Start actually runs nebula, this is a nonblocking call. To block use Control.ShutdownBlock() // Start actually runs nebula, this is a nonblocking call. To block use Control.ShutdownBlock()
@@ -54,12 +64,22 @@ func (c *Control) Start() {
if c.dnsStart != nil { if c.dnsStart != nil {
go c.dnsStart() go c.dnsStart()
} }
if c.connectionManagerStart != nil {
go c.connectionManagerStart(c.ctx)
}
if c.lighthouseStart != nil {
c.lighthouseStart()
}
// Start reading packets. // Start reading packets.
c.f.run() c.f.run()
} }
// Stop signals nebula to shutdown, returns after the shutdown is complete func (c *Control) Context() context.Context {
return c.ctx
}
// Stop signals nebula to shutdown and close all tunnels, returns after the shutdown is complete
func (c *Control) Stop() { func (c *Control) Stop() {
// Stop the handshakeManager (and other services), to prevent new tunnels from // Stop the handshakeManager (and other services), to prevent new tunnels from
// being created while we're shutting them all down. // being created while we're shutting them all down.
@@ -89,7 +109,7 @@ func (c *Control) RebindUDPServer() {
_ = c.f.outside.Rebind() _ = c.f.outside.Rebind()
// Trigger a lighthouse update, useful for mobile clients that should have an update interval of 0 // Trigger a lighthouse update, useful for mobile clients that should have an update interval of 0
c.f.lightHouse.SendUpdate(c.f) c.f.lightHouse.SendUpdate()
// Let the main interface know that we rebound so that underlying tunnels know to trigger punches from their remotes // Let the main interface know that we rebound so that underlying tunnels know to trigger punches from their remotes
c.f.rebindCount++ c.f.rebindCount++
@@ -98,7 +118,7 @@ func (c *Control) RebindUDPServer() {
// ListHostmapHosts returns details about the actual or pending (handshaking) hostmap by vpn ip // ListHostmapHosts returns details about the actual or pending (handshaking) hostmap by vpn ip
func (c *Control) ListHostmapHosts(pendingMap bool) []ControlHostInfo { func (c *Control) ListHostmapHosts(pendingMap bool) []ControlHostInfo {
if pendingMap { if pendingMap {
return listHostMapHosts(c.f.handshakeManager.pendingHostMap) return listHostMapHosts(c.f.handshakeManager)
} else { } else {
return listHostMapHosts(c.f.hostMap) return listHostMapHosts(c.f.hostMap)
} }
@@ -107,46 +127,85 @@ func (c *Control) ListHostmapHosts(pendingMap bool) []ControlHostInfo {
// ListHostmapIndexes returns details about the actual or pending (handshaking) hostmap by local index id // ListHostmapIndexes returns details about the actual or pending (handshaking) hostmap by local index id
func (c *Control) ListHostmapIndexes(pendingMap bool) []ControlHostInfo { func (c *Control) ListHostmapIndexes(pendingMap bool) []ControlHostInfo {
if pendingMap { if pendingMap {
return listHostMapIndexes(c.f.handshakeManager.pendingHostMap) return listHostMapIndexes(c.f.handshakeManager)
} else { } else {
return listHostMapIndexes(c.f.hostMap) return listHostMapIndexes(c.f.hostMap)
} }
} }
// GetCertByVpnIp returns the authenticated certificate of the given vpn IP, or nil if not found
func (c *Control) GetCertByVpnIp(vpnIp netip.Addr) *cert.NebulaCertificate {
if c.f.myVpnNet.Addr() == vpnIp {
return c.f.pki.GetCertState().Certificate
}
hi := c.f.hostMap.QueryVpnIp(vpnIp)
if hi == nil {
return nil
}
return hi.GetCert()
}
// CreateTunnel creates a new tunnel to the given vpn ip.
func (c *Control) CreateTunnel(vpnIp netip.Addr) {
c.f.handshakeManager.StartHandshake(vpnIp, nil)
}
// PrintTunnel creates a new tunnel to the given vpn ip.
func (c *Control) PrintTunnel(vpnIp netip.Addr) *ControlHostInfo {
hi := c.f.hostMap.QueryVpnIp(vpnIp)
if hi == nil {
return nil
}
chi := copyHostInfo(hi, c.f.hostMap.GetPreferredRanges())
return &chi
}
// QueryLighthouse queries the lighthouse.
func (c *Control) QueryLighthouse(vpnIp netip.Addr) *CacheMap {
hi := c.f.lightHouse.Query(vpnIp)
if hi == nil {
return nil
}
return hi.CopyCache()
}
// GetHostInfoByVpnIp returns a single tunnels hostInfo, or nil if not found // GetHostInfoByVpnIp returns a single tunnels hostInfo, or nil if not found
func (c *Control) GetHostInfoByVpnIp(vpnIp iputil.VpnIp, pending bool) *ControlHostInfo { // Caller should take care to Unmap() any 4in6 addresses prior to calling.
var hm *HostMap func (c *Control) GetHostInfoByVpnIp(vpnIp netip.Addr, pending bool) *ControlHostInfo {
var hl controlHostLister
if pending { if pending {
hm = c.f.handshakeManager.pendingHostMap hl = c.f.handshakeManager
} else { } else {
hm = c.f.hostMap hl = c.f.hostMap
} }
h, err := hm.QueryVpnIp(vpnIp) h := hl.QueryVpnIp(vpnIp)
if err != nil { if h == nil {
return nil return nil
} }
ch := copyHostInfo(h, c.f.hostMap.preferredRanges) ch := copyHostInfo(h, c.f.hostMap.GetPreferredRanges())
return &ch return &ch
} }
// SetRemoteForTunnel forces a tunnel to use a specific remote // SetRemoteForTunnel forces a tunnel to use a specific remote
func (c *Control) SetRemoteForTunnel(vpnIp iputil.VpnIp, addr udp.Addr) *ControlHostInfo { // Caller should take care to Unmap() any 4in6 addresses prior to calling.
hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp) func (c *Control) SetRemoteForTunnel(vpnIp netip.Addr, addr netip.AddrPort) *ControlHostInfo {
if err != nil { hostInfo := c.f.hostMap.QueryVpnIp(vpnIp)
if hostInfo == nil {
return nil return nil
} }
hostInfo.SetRemote(addr.Copy()) hostInfo.SetRemote(addr)
ch := copyHostInfo(hostInfo, c.f.hostMap.preferredRanges) ch := copyHostInfo(hostInfo, c.f.hostMap.GetPreferredRanges())
return &ch return &ch
} }
// CloseTunnel closes a fully established tunnel. If localOnly is false it will notify the remote end as well. // CloseTunnel closes a fully established tunnel. If localOnly is false it will notify the remote end as well.
func (c *Control) CloseTunnel(vpnIp iputil.VpnIp, localOnly bool) bool { // Caller should take care to Unmap() any 4in6 addresses prior to calling.
hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp) func (c *Control) CloseTunnel(vpnIp netip.Addr, localOnly bool) bool {
if err != nil { hostInfo := c.f.hostMap.QueryVpnIp(vpnIp)
if hostInfo == nil {
return false return false
} }
@@ -187,7 +246,7 @@ func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
} }
// Learn which hosts are being used as relays, so we can shut them down last. // Learn which hosts are being used as relays, so we can shut them down last.
relayingHosts := map[iputil.VpnIp]*HostInfo{} relayingHosts := map[netip.Addr]*HostInfo{}
// Grab the hostMap lock to access the Relays map // Grab the hostMap lock to access the Relays map
c.f.hostMap.Lock() c.f.hostMap.Lock()
for _, relayingHost := range c.f.hostMap.Relays { for _, relayingHost := range c.f.hostMap.Relays {
@@ -214,16 +273,20 @@ func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
return return
} }
func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo { func (c *Control) Device() overlay.Device {
return c.f.inside
}
func copyHostInfo(h *HostInfo, preferredRanges []netip.Prefix) ControlHostInfo {
chi := ControlHostInfo{ chi := ControlHostInfo{
VpnIp: h.vpnIp.ToIP(), VpnIp: h.vpnIp,
LocalIndex: h.localIndexId, LocalIndex: h.localIndexId,
RemoteIndex: h.remoteIndexId, RemoteIndex: h.remoteIndexId,
RemoteAddrs: h.remotes.CopyAddrs(preferredRanges), RemoteAddrs: h.remotes.CopyAddrs(preferredRanges),
CachedPackets: len(h.packetStore),
CurrentRelaysToMe: h.relayState.CopyRelayIps(), CurrentRelaysToMe: h.relayState.CopyRelayIps(),
CurrentRelaysThroughMe: h.relayState.CopyRelayForIps(), CurrentRelaysThroughMe: h.relayState.CopyRelayForIps(),
CurrentRemote: h.remote,
} }
if h.ConnectionState != nil { if h.ConnectionState != nil {
@@ -234,35 +297,23 @@ func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
chi.Cert = c.Copy() chi.Cert = c.Copy()
} }
if h.remote != nil {
chi.CurrentRemote = h.remote.Copy()
}
return chi return chi
} }
func listHostMapHosts(hm *HostMap) []ControlHostInfo { func listHostMapHosts(hl controlHostLister) []ControlHostInfo {
hm.RLock() hosts := make([]ControlHostInfo, 0)
hosts := make([]ControlHostInfo, len(hm.Hosts)) pr := hl.GetPreferredRanges()
i := 0 hl.ForEachVpnIp(func(hostinfo *HostInfo) {
for _, v := range hm.Hosts { hosts = append(hosts, copyHostInfo(hostinfo, pr))
hosts[i] = copyHostInfo(v, hm.preferredRanges) })
i++
}
hm.RUnlock()
return hosts return hosts
} }
func listHostMapIndexes(hm *HostMap) []ControlHostInfo { func listHostMapIndexes(hl controlHostLister) []ControlHostInfo {
hm.RLock() hosts := make([]ControlHostInfo, 0)
hosts := make([]ControlHostInfo, len(hm.Indexes)) pr := hl.GetPreferredRanges()
i := 0 hl.ForEachIndex(func(hostinfo *HostInfo) {
for _, v := range hm.Indexes { hosts = append(hosts, copyHostInfo(hostinfo, pr))
hosts[i] = copyHostInfo(v, hm.preferredRanges) })
i++
}
hm.RUnlock()
return hosts return hosts
} }

View File

@@ -2,15 +2,14 @@ package nebula
import ( import (
"net" "net"
"net/netip"
"reflect" "reflect"
"testing" "testing"
"time" "time"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/test" "github.com/slackhq/nebula/test"
"github.com/slackhq/nebula/udp"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@@ -18,16 +17,19 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
l := test.NewLogger() l := test.NewLogger()
// Special care must be taken to re-use all objects provided to the hostmap and certificate in the expectedInfo object // Special care must be taken to re-use all objects provided to the hostmap and certificate in the expectedInfo object
// To properly ensure we are not exposing core memory to the caller // To properly ensure we are not exposing core memory to the caller
hm := NewHostMap(l, "test", &net.IPNet{}, make([]*net.IPNet, 0)) hm := newHostMap(l, netip.Prefix{})
remote1 := udp.NewAddr(net.ParseIP("0.0.0.100"), 4444) hm.preferredRanges.Store(&[]netip.Prefix{})
remote2 := udp.NewAddr(net.ParseIP("1:2:3:4:5:6:7:8"), 4444)
remote1 := netip.MustParseAddrPort("0.0.0.100:4444")
remote2 := netip.MustParseAddrPort("[1:2:3:4:5:6:7:8]:4444")
ipNet := net.IPNet{ ipNet := net.IPNet{
IP: net.IPv4(1, 2, 3, 4), IP: remote1.Addr().AsSlice(),
Mask: net.IPMask{255, 255, 255, 0}, Mask: net.IPMask{255, 255, 255, 0},
} }
ipNet2 := net.IPNet{ ipNet2 := net.IPNet{
IP: net.ParseIP("1:2:3:4:5:6:7:8"), IP: remote2.Addr().AsSlice(),
Mask: net.IPMask{255, 255, 255, 0}, Mask: net.IPMask{255, 255, 255, 0},
} }
@@ -48,9 +50,13 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
} }
remotes := NewRemoteList(nil) remotes := NewRemoteList(nil)
remotes.unlockedPrependV4(0, NewIp4AndPort(remote1.IP, uint32(remote1.Port))) remotes.unlockedPrependV4(netip.IPv4Unspecified(), NewIp4AndPortFromNetIP(remote1.Addr(), remote1.Port()))
remotes.unlockedPrependV6(0, NewIp6AndPort(remote2.IP, uint32(remote2.Port))) remotes.unlockedPrependV6(netip.IPv4Unspecified(), NewIp6AndPortFromNetIP(remote2.Addr(), remote2.Port()))
hm.Add(iputil.Ip2VpnIp(ipNet.IP), &HostInfo{
vpnIp, ok := netip.AddrFromSlice(ipNet.IP)
assert.True(t, ok)
hm.unlockedAddHostInfo(&HostInfo{
remote: remote1, remote: remote1,
remotes: remotes, remotes: remotes,
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
@@ -58,15 +64,18 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
}, },
remoteIndexId: 200, remoteIndexId: 200,
localIndexId: 201, localIndexId: 201,
vpnIp: iputil.Ip2VpnIp(ipNet.IP), vpnIp: vpnIp,
relayState: RelayState{ relayState: RelayState{
relays: map[iputil.VpnIp]struct{}{}, relays: nil,
relayForByIp: map[iputil.VpnIp]*Relay{}, relayForByIp: map[netip.Addr]*Relay{},
relayForByIdx: map[uint32]*Relay{}, relayForByIdx: map[uint32]*Relay{},
}, },
}) }, &Interface{})
hm.Add(iputil.Ip2VpnIp(ipNet2.IP), &HostInfo{ vpnIp2, ok := netip.AddrFromSlice(ipNet2.IP)
assert.True(t, ok)
hm.unlockedAddHostInfo(&HostInfo{
remote: remote1, remote: remote1,
remotes: remotes, remotes: remotes,
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
@@ -74,13 +83,13 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
}, },
remoteIndexId: 200, remoteIndexId: 200,
localIndexId: 201, localIndexId: 201,
vpnIp: iputil.Ip2VpnIp(ipNet2.IP), vpnIp: vpnIp2,
relayState: RelayState{ relayState: RelayState{
relays: map[iputil.VpnIp]struct{}{}, relays: nil,
relayForByIp: map[iputil.VpnIp]*Relay{}, relayForByIp: map[netip.Addr]*Relay{},
relayForByIdx: map[uint32]*Relay{}, relayForByIdx: map[uint32]*Relay{},
}, },
}) }, &Interface{})
c := Control{ c := Control{
f: &Interface{ f: &Interface{
@@ -89,28 +98,29 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
l: logrus.New(), l: logrus.New(),
} }
thi := c.GetHostInfoByVpnIp(iputil.Ip2VpnIp(ipNet.IP), false) thi := c.GetHostInfoByVpnIp(vpnIp, false)
expectedInfo := ControlHostInfo{ expectedInfo := ControlHostInfo{
VpnIp: net.IPv4(1, 2, 3, 4).To4(), VpnIp: vpnIp,
LocalIndex: 201, LocalIndex: 201,
RemoteIndex: 200, RemoteIndex: 200,
RemoteAddrs: []*udp.Addr{remote2, remote1}, RemoteAddrs: []netip.AddrPort{remote2, remote1},
CachedPackets: 0,
Cert: crt.Copy(), Cert: crt.Copy(),
MessageCounter: 0, MessageCounter: 0,
CurrentRemote: udp.NewAddr(net.ParseIP("0.0.0.100"), 4444), CurrentRemote: remote1,
CurrentRelaysToMe: []iputil.VpnIp{}, CurrentRelaysToMe: []netip.Addr{},
CurrentRelaysThroughMe: []iputil.VpnIp{}, CurrentRelaysThroughMe: []netip.Addr{},
} }
// Make sure we don't have any unexpected fields // Make sure we don't have any unexpected fields
assertFields(t, []string{"VpnIp", "LocalIndex", "RemoteIndex", "RemoteAddrs", "CachedPackets", "Cert", "MessageCounter", "CurrentRemote", "CurrentRelaysToMe", "CurrentRelaysThroughMe"}, thi) assertFields(t, []string{"VpnIp", "LocalIndex", "RemoteIndex", "RemoteAddrs", "Cert", "MessageCounter", "CurrentRemote", "CurrentRelaysToMe", "CurrentRelaysThroughMe"}, thi)
test.AssertDeepCopyEqual(t, &expectedInfo, thi) assert.EqualValues(t, &expectedInfo, thi)
//TODO: netip.Addr reuses global memory for zone identifiers which breaks our "no reused memory check" here
//test.AssertDeepCopyEqual(t, &expectedInfo, thi)
// Make sure we don't panic if the host info doesn't have a cert yet // Make sure we don't panic if the host info doesn't have a cert yet
assert.NotPanics(t, func() { assert.NotPanics(t, func() {
thi = c.GetHostInfoByVpnIp(iputil.Ip2VpnIp(ipNet2.IP), false) thi = c.GetHostInfoByVpnIp(vpnIp2, false)
}) })
} }

View File

@@ -4,14 +4,13 @@
package nebula package nebula
import ( import (
"net" "net/netip"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/google/gopacket" "github.com/google/gopacket"
"github.com/google/gopacket/layers" "github.com/google/gopacket/layers"
"github.com/slackhq/nebula/header" "github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/overlay" "github.com/slackhq/nebula/overlay"
"github.com/slackhq/nebula/udp" "github.com/slackhq/nebula/udp"
) )
@@ -21,7 +20,7 @@ import (
func (c *Control) WaitForType(msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) { func (c *Control) WaitForType(msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
h := &header.H{} h := &header.H{}
for { for {
p := c.f.outside.Get(true) p := c.f.outside.(*udp.TesterConn).Get(true)
if err := h.Parse(p.Data); err != nil { if err := h.Parse(p.Data); err != nil {
panic(err) panic(err)
} }
@@ -37,7 +36,7 @@ func (c *Control) WaitForType(msgType header.MessageType, subType header.Message
func (c *Control) WaitForTypeByIndex(toIndex uint32, msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) { func (c *Control) WaitForTypeByIndex(toIndex uint32, msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
h := &header.H{} h := &header.H{}
for { for {
p := c.f.outside.Get(true) p := c.f.outside.(*udp.TesterConn).Get(true)
if err := h.Parse(p.Data); err != nil { if err := h.Parse(p.Data); err != nil {
panic(err) panic(err)
} }
@@ -50,37 +49,30 @@ func (c *Control) WaitForTypeByIndex(toIndex uint32, msgType header.MessageType,
// InjectLightHouseAddr will push toAddr into the local lighthouse cache for the vpnIp // InjectLightHouseAddr will push toAddr into the local lighthouse cache for the vpnIp
// This is necessary if you did not configure static hosts or are not running a lighthouse // This is necessary if you did not configure static hosts or are not running a lighthouse
func (c *Control) InjectLightHouseAddr(vpnIp net.IP, toAddr *net.UDPAddr) { func (c *Control) InjectLightHouseAddr(vpnIp netip.Addr, toAddr netip.AddrPort) {
c.f.lightHouse.Lock() c.f.lightHouse.Lock()
remoteList := c.f.lightHouse.unlockedGetRemoteList(iputil.Ip2VpnIp(vpnIp)) remoteList := c.f.lightHouse.unlockedGetRemoteList(vpnIp)
remoteList.Lock() remoteList.Lock()
defer remoteList.Unlock() defer remoteList.Unlock()
c.f.lightHouse.Unlock() c.f.lightHouse.Unlock()
iVpnIp := iputil.Ip2VpnIp(vpnIp) if toAddr.Addr().Is4() {
if v4 := toAddr.IP.To4(); v4 != nil { remoteList.unlockedPrependV4(vpnIp, NewIp4AndPortFromNetIP(toAddr.Addr(), toAddr.Port()))
remoteList.unlockedPrependV4(iVpnIp, NewIp4AndPort(v4, uint32(toAddr.Port)))
} else { } else {
remoteList.unlockedPrependV6(iVpnIp, NewIp6AndPort(toAddr.IP, uint32(toAddr.Port))) remoteList.unlockedPrependV6(vpnIp, NewIp6AndPortFromNetIP(toAddr.Addr(), toAddr.Port()))
} }
} }
// InjectRelays will push relayVpnIps into the local lighthouse cache for the vpnIp // InjectRelays will push relayVpnIps into the local lighthouse cache for the vpnIp
// This is necessary to inform an initiator of possible relays for communicating with a responder // This is necessary to inform an initiator of possible relays for communicating with a responder
func (c *Control) InjectRelays(vpnIp net.IP, relayVpnIps []net.IP) { func (c *Control) InjectRelays(vpnIp netip.Addr, relayVpnIps []netip.Addr) {
c.f.lightHouse.Lock() c.f.lightHouse.Lock()
remoteList := c.f.lightHouse.unlockedGetRemoteList(iputil.Ip2VpnIp(vpnIp)) remoteList := c.f.lightHouse.unlockedGetRemoteList(vpnIp)
remoteList.Lock() remoteList.Lock()
defer remoteList.Unlock() defer remoteList.Unlock()
c.f.lightHouse.Unlock() c.f.lightHouse.Unlock()
iVpnIp := iputil.Ip2VpnIp(vpnIp) remoteList.unlockedSetRelay(vpnIp, vpnIp, relayVpnIps)
uVpnIp := []uint32{}
for _, rVPnIp := range relayVpnIps {
uVpnIp = append(uVpnIp, uint32(iputil.Ip2VpnIp(rVPnIp)))
}
remoteList.unlockedSetRelay(iVpnIp, iVpnIp, uVpnIp)
} }
// GetFromTun will pull a packet off the tun side of nebula // GetFromTun will pull a packet off the tun side of nebula
@@ -90,11 +82,11 @@ func (c *Control) GetFromTun(block bool) []byte {
// GetFromUDP will pull a udp packet off the udp side of nebula // GetFromUDP will pull a udp packet off the udp side of nebula
func (c *Control) GetFromUDP(block bool) *udp.Packet { func (c *Control) GetFromUDP(block bool) *udp.Packet {
return c.f.outside.Get(block) return c.f.outside.(*udp.TesterConn).Get(block)
} }
func (c *Control) GetUDPTxChan() <-chan *udp.Packet { func (c *Control) GetUDPTxChan() <-chan *udp.Packet {
return c.f.outside.TxPackets return c.f.outside.(*udp.TesterConn).TxPackets
} }
func (c *Control) GetTunTxChan() <-chan []byte { func (c *Control) GetTunTxChan() <-chan []byte {
@@ -103,17 +95,18 @@ func (c *Control) GetTunTxChan() <-chan []byte {
// InjectUDPPacket will inject a packet into the udp side of nebula // InjectUDPPacket will inject a packet into the udp side of nebula
func (c *Control) InjectUDPPacket(p *udp.Packet) { func (c *Control) InjectUDPPacket(p *udp.Packet) {
c.f.outside.Send(p) c.f.outside.(*udp.TesterConn).Send(p)
} }
// InjectTunUDPPacket puts a udp packet on the tun interface. Using UDP here because it's a simpler protocol // InjectTunUDPPacket puts a udp packet on the tun interface. Using UDP here because it's a simpler protocol
func (c *Control) InjectTunUDPPacket(toIp net.IP, toPort uint16, fromPort uint16, data []byte) { func (c *Control) InjectTunUDPPacket(toIp netip.Addr, toPort uint16, fromPort uint16, data []byte) {
//TODO: IPV6-WORK
ip := layers.IPv4{ ip := layers.IPv4{
Version: 4, Version: 4,
TTL: 64, TTL: 64,
Protocol: layers.IPProtocolUDP, Protocol: layers.IPProtocolUDP,
SrcIP: c.f.inside.Cidr().IP, SrcIP: c.f.inside.Cidr().Addr().Unmap().AsSlice(),
DstIP: toIp, DstIP: toIp.Unmap().AsSlice(),
} }
udp := layers.UDP{ udp := layers.UDP{
@@ -138,21 +131,21 @@ func (c *Control) InjectTunUDPPacket(toIp net.IP, toPort uint16, fromPort uint16
c.f.inside.(*overlay.TestTun).Send(buffer.Bytes()) c.f.inside.(*overlay.TestTun).Send(buffer.Bytes())
} }
func (c *Control) GetVpnIp() iputil.VpnIp { func (c *Control) GetVpnIp() netip.Addr {
return c.f.myVpnIp return c.f.myVpnNet.Addr()
} }
func (c *Control) GetUDPAddr() string { func (c *Control) GetUDPAddr() netip.AddrPort {
return c.f.outside.Addr.String() return c.f.outside.(*udp.TesterConn).Addr
} }
func (c *Control) KillPendingTunnel(vpnIp net.IP) bool { func (c *Control) KillPendingTunnel(vpnIp netip.Addr) bool {
hostinfo, ok := c.f.handshakeManager.pendingHostMap.Hosts[iputil.Ip2VpnIp(vpnIp)] hostinfo := c.f.handshakeManager.QueryVpnIp(vpnIp)
if !ok { if hostinfo == nil {
return false return false
} }
c.f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo) c.f.handshakeManager.DeleteHostInfo(hostinfo)
return true return true
} }
@@ -161,19 +154,9 @@ func (c *Control) GetHostmap() *HostMap {
} }
func (c *Control) GetCert() *cert.NebulaCertificate { func (c *Control) GetCert() *cert.NebulaCertificate {
return c.f.certState.Load().certificate return c.f.pki.GetCertState().Certificate
} }
func (c *Control) ReHandshake(vpnIp iputil.VpnIp) { func (c *Control) ReHandshake(vpnIp netip.Addr) {
hostinfo := c.f.handshakeManager.AddVpnIp(vpnIp, c.f.initHostInfo) c.f.handshakeManager.StartHandshake(vpnIp, nil)
ixHandshakeStage0(c.f, vpnIp, hostinfo)
// If this is a static host, we don't need to wait for the HostQueryReply
// We can trigger the handshake right now
if _, ok := c.f.lightHouse.GetStaticHostList()[hostinfo.vpnIp]; ok {
select {
case c.f.handshakeManager.trigger <- hostinfo.vpnIp:
default:
}
}
} }

View File

@@ -1,13 +0,0 @@
[Unit]
Description=Nebula overlay networking tool
Wants=basic.target network-online.target nss-lookup.target time-sync.target
After=basic.target network.target network-online.target
[Service]
SyslogIdentifier=nebula
ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
Restart=always
[Install]
WantedBy=multi-user.target

View File

@@ -1,14 +0,0 @@
[Unit]
Description=Nebula overlay networking tool
Wants=basic.target network-online.target nss-lookup.target time-sync.target
After=basic.target network.target network-online.target
Before=sshd.service
[Service]
SyslogIdentifier=nebula
ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
Restart=always
[Install]
WantedBy=multi-user.target

View File

@@ -3,6 +3,7 @@ package nebula
import ( import (
"fmt" "fmt"
"net" "net"
"net/netip"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@@ -10,7 +11,6 @@ import (
"github.com/miekg/dns" "github.com/miekg/dns"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/config" "github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/iputil"
) )
// This whole thing should be rewritten to use context // This whole thing should be rewritten to use context
@@ -42,21 +42,23 @@ func (d *dnsRecords) Query(data string) string {
} }
func (d *dnsRecords) QueryCert(data string) string { func (d *dnsRecords) QueryCert(data string) string {
ip := net.ParseIP(data[:len(data)-1]) ip, err := netip.ParseAddr(data[:len(data)-1])
if ip == nil {
return ""
}
iip := iputil.Ip2VpnIp(ip)
hostinfo, err := d.hostMap.QueryVpnIp(iip)
if err != nil { if err != nil {
return "" return ""
} }
hostinfo := d.hostMap.QueryVpnIp(ip)
if hostinfo == nil {
return ""
}
q := hostinfo.GetCert() q := hostinfo.GetCert()
if q == nil { if q == nil {
return "" return ""
} }
cert := q.Details cert := q.Details
c := fmt.Sprintf("\"Name: %s\" \"Ips: %s\" \"Subnets %s\" \"Groups %s\" \"NotBefore %s\" \"NotAFter %s\" \"PublicKey %x\" \"IsCA %t\" \"Issuer %s\"", cert.Name, cert.Ips, cert.Subnets, cert.Groups, cert.NotBefore, cert.NotAfter, cert.PublicKey, cert.IsCA, cert.Issuer) c := fmt.Sprintf("\"Name: %s\" \"Ips: %s\" \"Subnets %s\" \"Groups %s\" \"NotBefore %s\" \"NotAfter %s\" \"PublicKey %x\" \"IsCA %t\" \"Issuer %s\"", cert.Name, cert.Ips, cert.Subnets, cert.Groups, cert.NotBefore, cert.NotAfter, cert.PublicKey, cert.IsCA, cert.Issuer)
return c return c
} }
@@ -80,7 +82,11 @@ func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) {
} }
case dns.TypeTXT: case dns.TypeTXT:
a, _, _ := net.SplitHostPort(w.RemoteAddr().String()) a, _, _ := net.SplitHostPort(w.RemoteAddr().String())
b := net.ParseIP(a) b, err := netip.ParseAddr(a)
if err != nil {
return
}
// We don't answer these queries from non nebula nodes or localhost // We don't answer these queries from non nebula nodes or localhost
//l.Debugf("Does %s contain %s", b, dnsR.hostMap.vpnCIDR) //l.Debugf("Does %s contain %s", b, dnsR.hostMap.vpnCIDR)
if !dnsR.hostMap.vpnCIDR.Contains(b) && a != "127.0.0.1" { if !dnsR.hostMap.vpnCIDR.Contains(b) && a != "127.0.0.1" {
@@ -96,6 +102,10 @@ func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) {
} }
} }
} }
if len(m.Answer) == 0 {
m.Rcode = dns.RcodeNameError
}
} }
func handleDnsRequest(l *logrus.Logger, w dns.ResponseWriter, r *dns.Msg) { func handleDnsRequest(l *logrus.Logger, w dns.ResponseWriter, r *dns.Msg) {
@@ -129,7 +139,12 @@ func dnsMain(l *logrus.Logger, hostMap *HostMap, c *config.C) func() {
} }
func getDnsServerAddr(c *config.C) string { func getDnsServerAddr(c *config.C) string {
return c.GetString("lighthouse.dns.host", "") + ":" + strconv.Itoa(c.GetInt("lighthouse.dns.port", 53)) dnsHost := strings.TrimSpace(c.GetString("lighthouse.dns.host", ""))
// Old guidance was to provide the literal `[::]` in `lighthouse.dns.host` but that won't resolve.
if dnsHost == "[::]" {
dnsHost = "::"
}
return net.JoinHostPort(dnsHost, strconv.Itoa(c.GetInt("lighthouse.dns.port", 53)))
} }
func startDns(l *logrus.Logger, c *config.C) { func startDns(l *logrus.Logger, c *config.C) {

View File

@@ -4,6 +4,8 @@ import (
"testing" "testing"
"github.com/miekg/dns" "github.com/miekg/dns"
"github.com/slackhq/nebula/config"
"github.com/stretchr/testify/assert"
) )
func TestParsequery(t *testing.T) { func TestParsequery(t *testing.T) {
@@ -17,3 +19,40 @@ func TestParsequery(t *testing.T) {
//parseQuery(m) //parseQuery(m)
} }
func Test_getDnsServerAddr(t *testing.T) {
c := config.NewC(nil)
c.Settings["lighthouse"] = map[interface{}]interface{}{
"dns": map[interface{}]interface{}{
"host": "0.0.0.0",
"port": "1",
},
}
assert.Equal(t, "0.0.0.0:1", getDnsServerAddr(c))
c.Settings["lighthouse"] = map[interface{}]interface{}{
"dns": map[interface{}]interface{}{
"host": "::",
"port": "1",
},
}
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
c.Settings["lighthouse"] = map[interface{}]interface{}{
"dns": map[interface{}]interface{}{
"host": "[::]",
"port": "1",
},
}
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
// Make sure whitespace doesn't mess us up
c.Settings["lighthouse"] = map[interface{}]interface{}{
"dns": map[interface{}]interface{}{
"host": "[::] ",
"port": "1",
},
}
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
}

11
docker/Dockerfile Normal file
View File

@@ -0,0 +1,11 @@
FROM gcr.io/distroless/static:latest
ARG TARGETOS TARGETARCH
COPY build/$TARGETOS-$TARGETARCH/nebula /nebula
COPY build/$TARGETOS-$TARGETARCH/nebula-cert /nebula-cert
VOLUME ["/config"]
ENTRYPOINT ["/nebula"]
# Allow users to override the args passed to nebula
CMD ["-config", "/config/config.yml"]

24
docker/README.md Normal file
View File

@@ -0,0 +1,24 @@
# NebulaOSS/nebula Docker Image
## Building
From the root of the repository, run `make docker`.
## Running
To run the built image, use the following command:
```
docker run \
--name nebula \
--network host \
--cap-add NET_ADMIN \
--volume ./config:/config \
--rm \
nebulaoss/nebula
```
A few notes:
- The `NET_ADMIN` capability is necessary to create the tun adapter on the host (this is unnecessary if the tun device is disabled.)
- `--volume ./config:/config` should point to a directory that contains your `config.yml` and any other necessary files.

View File

@@ -4,28 +4,29 @@
package e2e package e2e
import ( import (
"fmt" "net/netip"
"net" "slices"
"testing" "testing"
"time" "time"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula" "github.com/slackhq/nebula"
"github.com/slackhq/nebula/e2e/router" "github.com/slackhq/nebula/e2e/router"
"github.com/slackhq/nebula/header" "github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp" "github.com/slackhq/nebula/udp"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
) )
func BenchmarkHotPath(b *testing.B) { func BenchmarkHotPath(b *testing.B) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
myControl, _, _, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil) myControl, _, _, _ := newSimpleServer(ca, caKey, "me", "10.128.0.1/24", nil)
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil) theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", nil)
// Put their info in our lighthouse // Put their info in our lighthouse
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
// Start the servers // Start the servers
myControl.Start() myControl.Start()
@@ -35,7 +36,7 @@ func BenchmarkHotPath(b *testing.B) {
r.CancelFlowLogs() r.CancelFlowLogs()
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me")) myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
_ = r.RouteForAllUntilTxTun(theirControl) _ = r.RouteForAllUntilTxTun(theirControl)
} }
@@ -44,19 +45,19 @@ func BenchmarkHotPath(b *testing.B) {
} }
func TestGoodHandshake(t *testing.T) { func TestGoodHandshake(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil) myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", "10.128.0.1/24", nil)
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil) theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", nil)
// Put their info in our lighthouse // Put their info in our lighthouse
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
// Start the servers // Start the servers
myControl.Start() myControl.Start()
theirControl.Start() theirControl.Start()
t.Log("Send a udp packet through to begin standing up the tunnel, this should come out the other side") t.Log("Send a udp packet through to begin standing up the tunnel, this should come out the other side")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me")) myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
t.Log("Have them consume my stage 0 packet. They have a tunnel now") t.Log("Have them consume my stage 0 packet. They have a tunnel now")
theirControl.InjectUDPPacket(myControl.GetFromUDP(true)) theirControl.InjectUDPPacket(myControl.GetFromUDP(true))
@@ -77,16 +78,16 @@ func TestGoodHandshake(t *testing.T) {
myControl.WaitForType(1, 0, theirControl) myControl.WaitForType(1, 0, theirControl)
t.Log("Make sure our host infos are correct") t.Log("Make sure our host infos are correct")
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl) assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl)
t.Log("Get that cached packet and make sure it looks right") t.Log("Get that cached packet and make sure it looks right")
myCachedPacket := theirControl.GetFromTun(true) myCachedPacket := theirControl.GetFromTun(true)
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80) assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
t.Log("Do a bidirectional tunnel test") t.Log("Do a bidirectional tunnel test")
r := router.NewR(t, myControl, theirControl) r := router.NewR(t, myControl, theirControl)
defer r.RenderFlow() defer r.RenderFlow()
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
r.RenderHostmaps("Final hostmaps", myControl, theirControl) r.RenderHostmaps("Final hostmaps", myControl, theirControl)
myControl.Stop() myControl.Stop()
@@ -95,20 +96,20 @@ func TestGoodHandshake(t *testing.T) {
} }
func TestWrongResponderHandshake(t *testing.T) { func TestWrongResponderHandshake(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
// The IPs here are chosen on purpose: // The IPs here are chosen on purpose:
// The current remote handling will sort by preference, public, and then lexically. // The current remote handling will sort by preference, public, and then lexically.
// So we need them to have a higher address than evil (we could apply a preference though) // So we need them to have a higher address than evil (we could apply a preference though)
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 100}, nil) myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", "10.128.0.100/24", nil)
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 99}, nil) theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.99/24", nil)
evilControl, evilVpnIp, evilUdpAddr, _ := newSimpleServer(ca, caKey, "evil", net.IP{10, 0, 0, 2}, nil) evilControl, evilVpnIp, evilUdpAddr, _ := newSimpleServer(ca, caKey, "evil", "10.128.0.2/24", nil)
// Add their real udp addr, which should be tried after evil. // Add their real udp addr, which should be tried after evil.
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
// Put the evil udp addr in for their vpn Ip, this is a case of being lied to by the lighthouse. // Put the evil udp addr in for their vpn Ip, this is a case of being lied to by the lighthouse.
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, evilUdpAddr) myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), evilUdpAddr)
// Build a router so we don't have to reason who gets which packet // Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, theirControl, evilControl) r := router.NewR(t, myControl, theirControl, evilControl)
@@ -120,7 +121,7 @@ func TestWrongResponderHandshake(t *testing.T) {
evilControl.Start() evilControl.Start()
t.Log("Start the handshake process, we will route until we see our cached packet get sent to them") t.Log("Start the handshake process, we will route until we see our cached packet get sent to them")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me")) myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType { r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
h := &header.H{} h := &header.H{}
err := h.Parse(p.Data) err := h.Parse(p.Data)
@@ -128,7 +129,7 @@ func TestWrongResponderHandshake(t *testing.T) {
panic(err) panic(err)
} }
if p.ToIp.Equal(theirUdpAddr.IP) && p.ToPort == uint16(theirUdpAddr.Port) && h.Type == 1 { if p.To == theirUdpAddr && h.Type == 1 {
return router.RouteAndExit return router.RouteAndExit
} }
@@ -139,18 +140,18 @@ func TestWrongResponderHandshake(t *testing.T) {
t.Log("My cached packet should be received by them") t.Log("My cached packet should be received by them")
myCachedPacket := theirControl.GetFromTun(true) myCachedPacket := theirControl.GetFromTun(true)
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80) assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
t.Log("Test the tunnel with them") t.Log("Test the tunnel with them")
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl) assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl)
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
t.Log("Flush all packets from all controllers") t.Log("Flush all packets from all controllers")
r.FlushAll() r.FlushAll()
t.Log("Ensure ensure I don't have any hostinfo artifacts from evil") t.Log("Ensure ensure I don't have any hostinfo artifacts from evil")
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp.IP), true), "My pending hostmap should not contain evil") assert.Nil(t, myControl.GetHostInfoByVpnIp(evilVpnIp.Addr(), true), "My pending hostmap should not contain evil")
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp.IP), false), "My main hostmap should not contain evil") assert.Nil(t, myControl.GetHostInfoByVpnIp(evilVpnIp.Addr(), false), "My main hostmap should not contain evil")
//NOTE: if evil lost the handshake race it may still have a tunnel since me would reject the handshake since the tunnel is complete //NOTE: if evil lost the handshake race it may still have a tunnel since me would reject the handshake since the tunnel is complete
//TODO: assert hostmaps for everyone //TODO: assert hostmaps for everyone
@@ -164,13 +165,13 @@ func TestStage1Race(t *testing.T) {
// This tests ensures that two hosts handshaking with each other at the same time will allow traffic to flow // This tests ensures that two hosts handshaking with each other at the same time will allow traffic to flow
// But will eventually collapse down to a single tunnel // But will eventually collapse down to a single tunnel
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil) myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", nil)
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil) theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", nil)
// Put their info in our lighthouse and vice versa // Put their info in our lighthouse and vice versa
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr) theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
// Build a router so we don't have to reason who gets which packet // Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, theirControl) r := router.NewR(t, myControl, theirControl)
@@ -181,8 +182,8 @@ func TestStage1Race(t *testing.T) {
theirControl.Start() theirControl.Start()
t.Log("Trigger a handshake to start on both me and them") t.Log("Trigger a handshake to start on both me and them")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me")) myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them")) theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them"))
t.Log("Get both stage 1 handshake packets") t.Log("Get both stage 1 handshake packets")
myHsForThem := myControl.GetFromUDP(true) myHsForThem := myControl.GetFromUDP(true)
@@ -194,14 +195,14 @@ func TestStage1Race(t *testing.T) {
r.Log("Route until they receive a message packet") r.Log("Route until they receive a message packet")
myCachedPacket := r.RouteForAllUntilTxTun(theirControl) myCachedPacket := r.RouteForAllUntilTxTun(theirControl)
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80) assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
r.Log("Their cached packet should be received by me") r.Log("Their cached packet should be received by me")
theirCachedPacket := r.RouteForAllUntilTxTun(myControl) theirCachedPacket := r.RouteForAllUntilTxTun(myControl)
assertUdpPacket(t, []byte("Hi from them"), theirCachedPacket, theirVpnIpNet.IP, myVpnIpNet.IP, 80, 80) assertUdpPacket(t, []byte("Hi from them"), theirCachedPacket, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), 80, 80)
r.Log("Do a bidirectional tunnel test") r.Log("Do a bidirectional tunnel test")
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
myHostmapHosts := myControl.ListHostmapHosts(false) myHostmapHosts := myControl.ListHostmapHosts(false)
myHostmapIndexes := myControl.ListHostmapIndexes(false) myHostmapIndexes := myControl.ListHostmapIndexes(false)
@@ -219,7 +220,7 @@ func TestStage1Race(t *testing.T) {
r.Log("Spin until connection manager tears down a tunnel") r.Log("Spin until connection manager tears down a tunnel")
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 { for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
t.Log("Connection manager hasn't ticked yet") t.Log("Connection manager hasn't ticked yet")
time.Sleep(time.Second) time.Sleep(time.Second)
} }
@@ -241,13 +242,13 @@ func TestStage1Race(t *testing.T) {
} }
func TestUncleanShutdownRaceLoser(t *testing.T) { func TestUncleanShutdownRaceLoser(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil) myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", nil)
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil) theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", nil)
// Teach my how to get to the relay and that their can be reached via the relay // Teach my how to get to the relay and that their can be reached via the relay
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr) theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
// Build a router so we don't have to reason who gets which packet // Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, theirControl) r := router.NewR(t, myControl, theirControl)
@@ -258,28 +259,28 @@ func TestUncleanShutdownRaceLoser(t *testing.T) {
theirControl.Start() theirControl.Start()
r.Log("Trigger a handshake from me to them") r.Log("Trigger a handshake from me to them")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me")) myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
p := r.RouteForAllUntilTxTun(theirControl) p := r.RouteForAllUntilTxTun(theirControl)
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80) assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
r.Log("Nuke my hostmap") r.Log("Nuke my hostmap")
myHostmap := myControl.GetHostmap() myHostmap := myControl.GetHostmap()
myHostmap.Hosts = map[iputil.VpnIp]*nebula.HostInfo{} myHostmap.Hosts = map[netip.Addr]*nebula.HostInfo{}
myHostmap.Indexes = map[uint32]*nebula.HostInfo{} myHostmap.Indexes = map[uint32]*nebula.HostInfo{}
myHostmap.RemoteIndexes = map[uint32]*nebula.HostInfo{} myHostmap.RemoteIndexes = map[uint32]*nebula.HostInfo{}
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me again")) myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me again"))
p = r.RouteForAllUntilTxTun(theirControl) p = r.RouteForAllUntilTxTun(theirControl)
assertUdpPacket(t, []byte("Hi from me again"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80) assertUdpPacket(t, []byte("Hi from me again"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
r.Log("Assert the tunnel works") r.Log("Assert the tunnel works")
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
r.Log("Wait for the dead index to go away") r.Log("Wait for the dead index to go away")
start := len(theirControl.GetHostmap().Indexes) start := len(theirControl.GetHostmap().Indexes)
for { for {
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
if len(theirControl.GetHostmap().Indexes) < start { if len(theirControl.GetHostmap().Indexes) < start {
break break
} }
@@ -290,13 +291,13 @@ func TestUncleanShutdownRaceLoser(t *testing.T) {
} }
func TestUncleanShutdownRaceWinner(t *testing.T) { func TestUncleanShutdownRaceWinner(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil) myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", nil)
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil) theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", nil)
// Teach my how to get to the relay and that their can be reached via the relay // Teach my how to get to the relay and that their can be reached via the relay
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr) theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
// Build a router so we don't have to reason who gets which packet // Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, theirControl) r := router.NewR(t, myControl, theirControl)
@@ -307,30 +308,30 @@ func TestUncleanShutdownRaceWinner(t *testing.T) {
theirControl.Start() theirControl.Start()
r.Log("Trigger a handshake from me to them") r.Log("Trigger a handshake from me to them")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me")) myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
p := r.RouteForAllUntilTxTun(theirControl) p := r.RouteForAllUntilTxTun(theirControl)
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80) assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
r.RenderHostmaps("Final hostmaps", myControl, theirControl) r.RenderHostmaps("Final hostmaps", myControl, theirControl)
r.Log("Nuke my hostmap") r.Log("Nuke my hostmap")
theirHostmap := theirControl.GetHostmap() theirHostmap := theirControl.GetHostmap()
theirHostmap.Hosts = map[iputil.VpnIp]*nebula.HostInfo{} theirHostmap.Hosts = map[netip.Addr]*nebula.HostInfo{}
theirHostmap.Indexes = map[uint32]*nebula.HostInfo{} theirHostmap.Indexes = map[uint32]*nebula.HostInfo{}
theirHostmap.RemoteIndexes = map[uint32]*nebula.HostInfo{} theirHostmap.RemoteIndexes = map[uint32]*nebula.HostInfo{}
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them again")) theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them again"))
p = r.RouteForAllUntilTxTun(myControl) p = r.RouteForAllUntilTxTun(myControl)
assertUdpPacket(t, []byte("Hi from them again"), p, theirVpnIpNet.IP, myVpnIpNet.IP, 80, 80) assertUdpPacket(t, []byte("Hi from them again"), p, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), 80, 80)
r.RenderHostmaps("Derp hostmaps", myControl, theirControl) r.RenderHostmaps("Derp hostmaps", myControl, theirControl)
r.Log("Assert the tunnel works") r.Log("Assert the tunnel works")
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
r.Log("Wait for the dead index to go away") r.Log("Wait for the dead index to go away")
start := len(myControl.GetHostmap().Indexes) start := len(myControl.GetHostmap().Indexes)
for { for {
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
if len(myControl.GetHostmap().Indexes) < start { if len(myControl.GetHostmap().Indexes) < start {
break break
} }
@@ -341,15 +342,15 @@ func TestUncleanShutdownRaceWinner(t *testing.T) {
} }
func TestRelays(t *testing.T) { func TestRelays(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}}) myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", m{"relay": m{"use_relays": true}})
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}}) relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", "10.128.0.128/24", m{"relay": m{"am_relay": true}})
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}}) theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", "10.128.0.2/24", m{"relay": m{"use_relays": true}})
// Teach my how to get to the relay and that their can be reached via the relay // Teach my how to get to the relay and that their can be reached via the relay
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr) myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP}) myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
// Build a router so we don't have to reason who gets which packet // Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, relayControl, theirControl) r := router.NewR(t, myControl, relayControl, theirControl)
@@ -361,31 +362,162 @@ func TestRelays(t *testing.T) {
theirControl.Start() theirControl.Start()
t.Log("Trigger a handshake from me to them via the relay") t.Log("Trigger a handshake from me to them via the relay")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me")) myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
p := r.RouteForAllUntilTxTun(theirControl) p := r.RouteForAllUntilTxTun(theirControl)
r.Log("Assert the tunnel works") r.Log("Assert the tunnel works")
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80) assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
r.RenderHostmaps("Final hostmaps", myControl, relayControl, theirControl) r.RenderHostmaps("Final hostmaps", myControl, relayControl, theirControl)
//TODO: assert we actually used the relay even though it should be impossible for a tunnel to have occurred without it //TODO: assert we actually used the relay even though it should be impossible for a tunnel to have occurred without it
} }
func TestStage1RaceRelays(t *testing.T) { func TestReestablishRelays(t *testing.T) {
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", m{"relay": m{"use_relays": true}})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}}) relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", "10.128.0.128/24", m{"relay": m{"am_relay": true}})
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}}) theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", "10.128.0.2/24", m{"relay": m{"use_relays": true}})
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
// Teach my how to get to the relay and that their can be reached via the relay // Teach my how to get to the relay and that their can be reached via the relay
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr) myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
theirControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr) myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP}) // Build a router so we don't have to reason who gets which packet
theirControl.InjectRelays(myVpnIpNet.IP, []net.IP{relayVpnIpNet.IP}) r := router.NewR(t, myControl, relayControl, theirControl)
defer r.RenderFlow()
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) // Start the servers
relayControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr) myControl.Start()
relayControl.Start()
theirControl.Start()
t.Log("Trigger a handshake from me to them via the relay")
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
p := r.RouteForAllUntilTxTun(theirControl)
r.Log("Assert the tunnel works")
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
t.Log("Ensure packet traversal from them to me via the relay")
theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them"))
p = r.RouteForAllUntilTxTun(myControl)
r.Log("Assert the tunnel works")
assertUdpPacket(t, []byte("Hi from them"), p, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), 80, 80)
// If we break the relay's connection to 'them', 'me' needs to detect and recover the connection
r.Log("Close the tunnel")
relayControl.CloseTunnel(theirVpnIpNet.Addr(), true)
start := len(myControl.GetHostmap().Indexes)
curIndexes := len(myControl.GetHostmap().Indexes)
for curIndexes >= start {
curIndexes = len(myControl.GetHostmap().Indexes)
r.Logf("Wait for the dead index to go away:start=%v indexes, current=%v indexes", start, curIndexes)
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me should fail"))
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
return router.RouteAndExit
})
time.Sleep(2 * time.Second)
}
r.Log("Dead index went away. Woot!")
r.RenderHostmaps("Me removed hostinfo", myControl, relayControl, theirControl)
// Next packet should re-establish a relayed connection and work just great.
t.Logf("Assert the tunnel...")
for {
t.Log("RouteForAllUntilTxTun")
myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
p = r.RouteForAllUntilTxTun(theirControl)
r.Log("Assert the tunnel works")
packet := gopacket.NewPacket(p, layers.LayerTypeIPv4, gopacket.Lazy)
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
if slices.Compare(v4.SrcIP, myVpnIpNet.Addr().AsSlice()) != 0 {
t.Logf("SrcIP is unexpected...this is not the packet I'm looking for. Keep looking")
continue
}
if slices.Compare(v4.DstIP, theirVpnIpNet.Addr().AsSlice()) != 0 {
t.Logf("DstIP is unexpected...this is not the packet I'm looking for. Keep looking")
continue
}
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
if udp == nil {
t.Log("Not a UDP packet. This is not the packet I'm looking for. Keep looking")
continue
}
data := packet.ApplicationLayer()
if data == nil {
t.Log("No data found in packet. This is not the packet I'm looking for. Keep looking.")
continue
}
if string(data.Payload()) != "Hi from me" {
t.Logf("Unexpected payload: '%v', keep looking", string(data.Payload()))
continue
}
t.Log("I found my lost packet. I am so happy.")
break
}
t.Log("Assert the tunnel works the other way, too")
for {
t.Log("RouteForAllUntilTxTun")
theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them"))
p = r.RouteForAllUntilTxTun(myControl)
r.Log("Assert the tunnel works")
packet := gopacket.NewPacket(p, layers.LayerTypeIPv4, gopacket.Lazy)
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
if slices.Compare(v4.DstIP, myVpnIpNet.Addr().AsSlice()) != 0 {
t.Logf("Dst is unexpected...this is not the packet I'm looking for. Keep looking")
continue
}
if slices.Compare(v4.SrcIP, theirVpnIpNet.Addr().AsSlice()) != 0 {
t.Logf("SrcIP is unexpected...this is not the packet I'm looking for. Keep looking")
continue
}
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
if udp == nil {
t.Log("Not a UDP packet. This is not the packet I'm looking for. Keep looking")
continue
}
data := packet.ApplicationLayer()
if data == nil {
t.Log("No data found in packet. This is not the packet I'm looking for. Keep looking.")
continue
}
if string(data.Payload()) != "Hi from them" {
t.Logf("Unexpected payload: '%v', keep looking", string(data.Payload()))
continue
}
t.Log("I found my lost packet. I am so happy.")
break
}
r.RenderHostmaps("Final hostmaps", myControl, relayControl, theirControl)
}
func TestStage1RaceRelays(t *testing.T) {
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", m{"relay": m{"use_relays": true}})
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", "10.128.0.128/24", m{"relay": m{"am_relay": true}})
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", "10.128.0.2/24", m{"relay": m{"use_relays": true}})
// Teach my how to get to the relay and that their can be reached via the relay
myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
theirControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
theirControl.InjectRelays(myVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
relayControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
// Build a router so we don't have to reason who gets which packet // Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, relayControl, theirControl) r := router.NewR(t, myControl, relayControl, theirControl)
@@ -397,19 +529,21 @@ func TestStage1RaceRelays(t *testing.T) {
theirControl.Start() theirControl.Start()
r.Log("Get a tunnel between me and relay") r.Log("Get a tunnel between me and relay")
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r) assertTunnel(t, myVpnIpNet.Addr(), relayVpnIpNet.Addr(), myControl, relayControl, r)
r.Log("Get a tunnel between them and relay") r.Log("Get a tunnel between them and relay")
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r) assertTunnel(t, theirVpnIpNet.Addr(), relayVpnIpNet.Addr(), theirControl, relayControl, r)
r.Log("Trigger a handshake from both them and me via relay to them and me") r.Log("Trigger a handshake from both them and me via relay to them and me")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me")) myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them")) theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them"))
r.Log("Wait for a packet from them to me") r.Log("Wait for a packet from them to me")
p := r.RouteForAllUntilTxTun(myControl) p := r.RouteForAllUntilTxTun(myControl)
_ = p _ = p
r.FlushAll()
myControl.Stop() myControl.Stop()
theirControl.Stop() theirControl.Stop()
relayControl.Stop() relayControl.Stop()
@@ -419,21 +553,21 @@ func TestStage1RaceRelays(t *testing.T) {
func TestStage1RaceRelays2(t *testing.T) { func TestStage1RaceRelays2(t *testing.T) {
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay //NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}}) myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", m{"relay": m{"use_relays": true}})
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}}) relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", "10.128.0.128/24", m{"relay": m{"am_relay": true}})
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}}) theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", "10.128.0.2/24", m{"relay": m{"use_relays": true}})
l := NewTestLogger() l := NewTestLogger()
// Teach my how to get to the relay and that their can be reached via the relay // Teach my how to get to the relay and that their can be reached via the relay
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr) myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
theirControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr) theirControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP}) myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
theirControl.InjectRelays(myVpnIpNet.IP, []net.IP{relayVpnIpNet.IP}) theirControl.InjectRelays(myVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
relayControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr) relayControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
// Build a router so we don't have to reason who gets which packet // Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, relayControl, theirControl) r := router.NewR(t, myControl, relayControl, theirControl)
@@ -446,16 +580,16 @@ func TestStage1RaceRelays2(t *testing.T) {
r.Log("Get a tunnel between me and relay") r.Log("Get a tunnel between me and relay")
l.Info("Get a tunnel between me and relay") l.Info("Get a tunnel between me and relay")
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r) assertTunnel(t, myVpnIpNet.Addr(), relayVpnIpNet.Addr(), myControl, relayControl, r)
r.Log("Get a tunnel between them and relay") r.Log("Get a tunnel between them and relay")
l.Info("Get a tunnel between them and relay") l.Info("Get a tunnel between them and relay")
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r) assertTunnel(t, theirVpnIpNet.Addr(), relayVpnIpNet.Addr(), theirControl, relayControl, r)
r.Log("Trigger a handshake from both them and me via relay to them and me") r.Log("Trigger a handshake from both them and me via relay to them and me")
l.Info("Trigger a handshake from both them and me via relay to them and me") l.Info("Trigger a handshake from both them and me via relay to them and me")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me")) myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them")) theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them"))
//r.RouteUntilAfterMsgType(myControl, header.Control, header.MessageNone) //r.RouteUntilAfterMsgType(myControl, header.Control, header.MessageNone)
//r.RouteUntilAfterMsgType(theirControl, header.Control, header.MessageNone) //r.RouteUntilAfterMsgType(theirControl, header.Control, header.MessageNone)
@@ -468,7 +602,7 @@ func TestStage1RaceRelays2(t *testing.T) {
r.Log("Assert the tunnel works") r.Log("Assert the tunnel works")
l.Info("Assert the tunnel works") l.Info("Assert the tunnel works")
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r) assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
t.Log("Wait until we remove extra tunnels") t.Log("Wait until we remove extra tunnels")
l.Info("Wait until we remove extra tunnels") l.Info("Wait until we remove extra tunnels")
@@ -488,7 +622,7 @@ func TestStage1RaceRelays2(t *testing.T) {
"theirControl": len(theirControl.GetHostmap().Indexes), "theirControl": len(theirControl.GetHostmap().Indexes),
"relayControl": len(relayControl.GetHostmap().Indexes), "relayControl": len(relayControl.GetHostmap().Indexes),
}).Info("Waiting for hostinfos to be removed...") }).Info("Waiting for hostinfos to be removed...")
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
t.Log("Connection manager hasn't ticked yet") t.Log("Connection manager hasn't ticked yet")
time.Sleep(time.Second) time.Sleep(time.Second)
retries-- retries--
@@ -496,7 +630,7 @@ func TestStage1RaceRelays2(t *testing.T) {
r.Log("Assert the tunnel works") r.Log("Assert the tunnel works")
l.Info("Assert the tunnel works") l.Info("Assert the tunnel works")
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r) assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
myControl.Stop() myControl.Stop()
theirControl.Stop() theirControl.Stop()
@@ -505,16 +639,17 @@ func TestStage1RaceRelays2(t *testing.T) {
// //
////TODO: assert hostmaps ////TODO: assert hostmaps
} }
func TestRehandshakingRelays(t *testing.T) { func TestRehandshakingRelays(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}}) myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", m{"relay": m{"use_relays": true}})
relayControl, relayVpnIpNet, relayUdpAddr, relayConfig := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}}) relayControl, relayVpnIpNet, relayUdpAddr, relayConfig := newSimpleServer(ca, caKey, "relay ", "10.128.0.128/24", m{"relay": m{"am_relay": true}})
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}}) theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", "10.128.0.2/24", m{"relay": m{"use_relays": true}})
// Teach my how to get to the relay and that their can be reached via the relay // Teach my how to get to the relay and that their can be reached via the relay
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr) myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP}) myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
// Build a router so we don't have to reason who gets which packet // Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, relayControl, theirControl) r := router.NewR(t, myControl, relayControl, theirControl)
@@ -526,17 +661,17 @@ func TestRehandshakingRelays(t *testing.T) {
theirControl.Start() theirControl.Start()
t.Log("Trigger a handshake from me to them via the relay") t.Log("Trigger a handshake from me to them via the relay")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me")) myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
p := r.RouteForAllUntilTxTun(theirControl) p := r.RouteForAllUntilTxTun(theirControl)
r.Log("Assert the tunnel works") r.Log("Assert the tunnel works")
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80) assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl) r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
// When I update the certificate for the relay, both me and them will have 2 host infos for the relay, // When I update the certificate for the relay, both me and them will have 2 host infos for the relay,
// and the main host infos will not have any relay state to handle the me<->relay<->them tunnel. // and the main host infos will not have any relay state to handle the me<->relay<->them tunnel.
r.Log("Renew relay certificate and spin until me and them sees it") r.Log("Renew relay certificate and spin until me and them sees it")
_, _, myNextPrivKey, myNextPEM := newTestCert(ca, caKey, "relay", time.Now(), time.Now().Add(5*time.Minute), relayVpnIpNet, nil, []string{"new group"}) _, _, myNextPrivKey, myNextPEM := NewTestCert(ca, caKey, "relay", time.Now(), time.Now().Add(5*time.Minute), relayVpnIpNet, nil, []string{"new group"})
caB, err := ca.MarshalToPEM() caB, err := ca.MarshalToPEM()
if err != nil { if err != nil {
@@ -554,8 +689,8 @@ func TestRehandshakingRelays(t *testing.T) {
for { for {
r.Log("Assert the tunnel works between myVpnIpNet and relayVpnIpNet") r.Log("Assert the tunnel works between myVpnIpNet and relayVpnIpNet")
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r) assertTunnel(t, myVpnIpNet.Addr(), relayVpnIpNet.Addr(), myControl, relayControl, r)
c := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(relayVpnIpNet.IP), false) c := myControl.GetHostInfoByVpnIp(relayVpnIpNet.Addr(), false)
if len(c.Cert.Details.Groups) != 0 { if len(c.Cert.Details.Groups) != 0 {
// We have a new certificate now // We have a new certificate now
r.Log("Certificate between my and relay is updated!") r.Log("Certificate between my and relay is updated!")
@@ -567,8 +702,8 @@ func TestRehandshakingRelays(t *testing.T) {
for { for {
r.Log("Assert the tunnel works between theirVpnIpNet and relayVpnIpNet") r.Log("Assert the tunnel works between theirVpnIpNet and relayVpnIpNet")
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r) assertTunnel(t, theirVpnIpNet.Addr(), relayVpnIpNet.Addr(), theirControl, relayControl, r)
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(relayVpnIpNet.IP), false) c := theirControl.GetHostInfoByVpnIp(relayVpnIpNet.Addr(), false)
if len(c.Cert.Details.Groups) != 0 { if len(c.Cert.Details.Groups) != 0 {
// We have a new certificate now // We have a new certificate now
r.Log("Certificate between their and relay is updated!") r.Log("Certificate between their and relay is updated!")
@@ -579,13 +714,13 @@ func TestRehandshakingRelays(t *testing.T) {
} }
r.Log("Assert the relay tunnel still works") r.Log("Assert the relay tunnel still works")
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r) assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl) r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
// We should have two hostinfos on all sides // We should have two hostinfos on all sides
for len(myControl.GetHostmap().Indexes) != 2 { for len(myControl.GetHostmap().Indexes) != 2 {
t.Logf("Waiting for myControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(myControl.GetHostmap().Indexes)) t.Logf("Waiting for myControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(myControl.GetHostmap().Indexes))
r.Log("Assert the relay tunnel still works") r.Log("Assert the relay tunnel still works")
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r) assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
r.Log("yupitdoes") r.Log("yupitdoes")
time.Sleep(time.Second) time.Sleep(time.Second)
} }
@@ -593,7 +728,7 @@ func TestRehandshakingRelays(t *testing.T) {
for len(theirControl.GetHostmap().Indexes) != 2 { for len(theirControl.GetHostmap().Indexes) != 2 {
t.Logf("Waiting for theirControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(theirControl.GetHostmap().Indexes)) t.Logf("Waiting for theirControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(theirControl.GetHostmap().Indexes))
r.Log("Assert the relay tunnel still works") r.Log("Assert the relay tunnel still works")
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r) assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
r.Log("yupitdoes") r.Log("yupitdoes")
time.Sleep(time.Second) time.Sleep(time.Second)
} }
@@ -601,7 +736,111 @@ func TestRehandshakingRelays(t *testing.T) {
for len(relayControl.GetHostmap().Indexes) != 2 { for len(relayControl.GetHostmap().Indexes) != 2 {
t.Logf("Waiting for relayControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(relayControl.GetHostmap().Indexes)) t.Logf("Waiting for relayControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(relayControl.GetHostmap().Indexes))
r.Log("Assert the relay tunnel still works") r.Log("Assert the relay tunnel still works")
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r) assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
r.Log("yupitdoes")
time.Sleep(time.Second)
}
t.Logf("relayControl hostinfos got cleaned up!")
}
func TestRehandshakingRelaysPrimary(t *testing.T) {
// This test is the same as TestRehandshakingRelays but one of the terminal types is a primary swap winner
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.128/24", m{"relay": m{"use_relays": true}})
relayControl, relayVpnIpNet, relayUdpAddr, relayConfig := newSimpleServer(ca, caKey, "relay ", "10.128.0.1/24", m{"relay": m{"am_relay": true}})
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", "10.128.0.2/24", m{"relay": m{"use_relays": true}})
// Teach my how to get to the relay and that their can be reached via the relay
myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
// Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, relayControl, theirControl)
defer r.RenderFlow()
// Start the servers
myControl.Start()
relayControl.Start()
theirControl.Start()
t.Log("Trigger a handshake from me to them via the relay")
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
p := r.RouteForAllUntilTxTun(theirControl)
r.Log("Assert the tunnel works")
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
// When I update the certificate for the relay, both me and them will have 2 host infos for the relay,
// and the main host infos will not have any relay state to handle the me<->relay<->them tunnel.
r.Log("Renew relay certificate and spin until me and them sees it")
_, _, myNextPrivKey, myNextPEM := NewTestCert(ca, caKey, "relay", time.Now(), time.Now().Add(5*time.Minute), relayVpnIpNet, nil, []string{"new group"})
caB, err := ca.MarshalToPEM()
if err != nil {
panic(err)
}
relayConfig.Settings["pki"] = m{
"ca": string(caB),
"cert": string(myNextPEM),
"key": string(myNextPrivKey),
}
rc, err := yaml.Marshal(relayConfig.Settings)
assert.NoError(t, err)
relayConfig.ReloadConfigString(string(rc))
for {
r.Log("Assert the tunnel works between myVpnIpNet and relayVpnIpNet")
assertTunnel(t, myVpnIpNet.Addr(), relayVpnIpNet.Addr(), myControl, relayControl, r)
c := myControl.GetHostInfoByVpnIp(relayVpnIpNet.Addr(), false)
if len(c.Cert.Details.Groups) != 0 {
// We have a new certificate now
r.Log("Certificate between my and relay is updated!")
break
}
time.Sleep(time.Second)
}
for {
r.Log("Assert the tunnel works between theirVpnIpNet and relayVpnIpNet")
assertTunnel(t, theirVpnIpNet.Addr(), relayVpnIpNet.Addr(), theirControl, relayControl, r)
c := theirControl.GetHostInfoByVpnIp(relayVpnIpNet.Addr(), false)
if len(c.Cert.Details.Groups) != 0 {
// We have a new certificate now
r.Log("Certificate between their and relay is updated!")
break
}
time.Sleep(time.Second)
}
r.Log("Assert the relay tunnel still works")
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
// We should have two hostinfos on all sides
for len(myControl.GetHostmap().Indexes) != 2 {
t.Logf("Waiting for myControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(myControl.GetHostmap().Indexes))
r.Log("Assert the relay tunnel still works")
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
r.Log("yupitdoes")
time.Sleep(time.Second)
}
t.Logf("myControl hostinfos got cleaned up!")
for len(theirControl.GetHostmap().Indexes) != 2 {
t.Logf("Waiting for theirControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(theirControl.GetHostmap().Indexes))
r.Log("Assert the relay tunnel still works")
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
r.Log("yupitdoes")
time.Sleep(time.Second)
}
t.Logf("theirControl hostinfos got cleaned up!")
for len(relayControl.GetHostmap().Indexes) != 2 {
t.Logf("Waiting for relayControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(relayControl.GetHostmap().Indexes))
r.Log("Assert the relay tunnel still works")
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
r.Log("yupitdoes") r.Log("yupitdoes")
time.Sleep(time.Second) time.Sleep(time.Second)
} }
@@ -609,13 +848,13 @@ func TestRehandshakingRelays(t *testing.T) {
} }
func TestRehandshaking(t *testing.T) { func TestRehandshaking(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 2}, nil) myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", "10.128.0.2/24", nil)
theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 1}, nil) theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", "10.128.0.1/24", nil)
// Put their info in our lighthouse and vice versa // Put their info in our lighthouse and vice versa
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr) theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
// Build a router so we don't have to reason who gets which packet // Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, theirControl) r := router.NewR(t, myControl, theirControl)
@@ -626,12 +865,12 @@ func TestRehandshaking(t *testing.T) {
theirControl.Start() theirControl.Start()
t.Log("Stand up a tunnel between me and them") t.Log("Stand up a tunnel between me and them")
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
r.RenderHostmaps("Starting hostmaps", myControl, theirControl) r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
r.Log("Renew my certificate and spin until their sees it") r.Log("Renew my certificate and spin until their sees it")
_, _, myNextPrivKey, myNextPEM := newTestCert(ca, caKey, "me", time.Now(), time.Now().Add(5*time.Minute), myVpnIpNet, nil, []string{"new group"}) _, _, myNextPrivKey, myNextPEM := NewTestCert(ca, caKey, "me", time.Now(), time.Now().Add(5*time.Minute), myVpnIpNet, nil, []string{"new group"})
caB, err := ca.MarshalToPEM() caB, err := ca.MarshalToPEM()
if err != nil { if err != nil {
@@ -648,8 +887,8 @@ func TestRehandshaking(t *testing.T) {
myConfig.ReloadConfigString(string(rc)) myConfig.ReloadConfigString(string(rc))
for { for {
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(myVpnIpNet.IP), false) c := theirControl.GetHostInfoByVpnIp(myVpnIpNet.Addr(), false)
if len(c.Cert.Details.Groups) != 0 { if len(c.Cert.Details.Groups) != 0 {
// We have a new certificate now // We have a new certificate now
break break
@@ -675,19 +914,19 @@ func TestRehandshaking(t *testing.T) {
r.Log("Spin until there is only 1 tunnel") r.Log("Spin until there is only 1 tunnel")
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 { for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
t.Log("Connection manager hasn't ticked yet") t.Log("Connection manager hasn't ticked yet")
time.Sleep(time.Second) time.Sleep(time.Second)
} }
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
myFinalHostmapHosts := myControl.ListHostmapHosts(false) myFinalHostmapHosts := myControl.ListHostmapHosts(false)
myFinalHostmapIndexes := myControl.ListHostmapIndexes(false) myFinalHostmapIndexes := myControl.ListHostmapIndexes(false)
theirFinalHostmapHosts := theirControl.ListHostmapHosts(false) theirFinalHostmapHosts := theirControl.ListHostmapHosts(false)
theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false) theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false)
// Make sure the correct tunnel won // Make sure the correct tunnel won
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(myVpnIpNet.IP), false) c := theirControl.GetHostInfoByVpnIp(myVpnIpNet.Addr(), false)
assert.Contains(t, c.Cert.Details.Groups, "new group") assert.Contains(t, c.Cert.Details.Groups, "new group")
// We should only have a single tunnel now on both sides // We should only have a single tunnel now on both sides
@@ -705,13 +944,13 @@ func TestRehandshaking(t *testing.T) {
func TestRehandshakingLoser(t *testing.T) { func TestRehandshakingLoser(t *testing.T) {
// The purpose of this test is that the race loser renews their certificate and rehandshakes. The final tunnel // The purpose of this test is that the race loser renews their certificate and rehandshakes. The final tunnel
// Should be the one with the new certificate // Should be the one with the new certificate
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 2}, nil) myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", "10.128.0.2/24", nil)
theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 1}, nil) theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", "10.128.0.1/24", nil)
// Put their info in our lighthouse and vice versa // Put their info in our lighthouse and vice versa
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr) theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
// Build a router so we don't have to reason who gets which packet // Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, theirControl) r := router.NewR(t, myControl, theirControl)
@@ -722,16 +961,15 @@ func TestRehandshakingLoser(t *testing.T) {
theirControl.Start() theirControl.Start()
t.Log("Stand up a tunnel between me and them") t.Log("Stand up a tunnel between me and them")
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
tt1 := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(theirVpnIpNet.IP), false) myControl.GetHostInfoByVpnIp(theirVpnIpNet.Addr(), false)
tt2 := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(myVpnIpNet.IP), false) theirControl.GetHostInfoByVpnIp(myVpnIpNet.Addr(), false)
fmt.Println(tt1.LocalIndex, tt2.LocalIndex)
r.RenderHostmaps("Starting hostmaps", myControl, theirControl) r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
r.Log("Renew their certificate and spin until mine sees it") r.Log("Renew their certificate and spin until mine sees it")
_, _, theirNextPrivKey, theirNextPEM := newTestCert(ca, caKey, "them", time.Now(), time.Now().Add(5*time.Minute), theirVpnIpNet, nil, []string{"their new group"}) _, _, theirNextPrivKey, theirNextPEM := NewTestCert(ca, caKey, "them", time.Now(), time.Now().Add(5*time.Minute), theirVpnIpNet, nil, []string{"their new group"})
caB, err := ca.MarshalToPEM() caB, err := ca.MarshalToPEM()
if err != nil { if err != nil {
@@ -748,8 +986,8 @@ func TestRehandshakingLoser(t *testing.T) {
theirConfig.ReloadConfigString(string(rc)) theirConfig.ReloadConfigString(string(rc))
for { for {
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
theirCertInMe := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(theirVpnIpNet.IP), false) theirCertInMe := myControl.GetHostInfoByVpnIp(theirVpnIpNet.Addr(), false)
_, theirNewGroup := theirCertInMe.Cert.Details.InvertedGroups["their new group"] _, theirNewGroup := theirCertInMe.Cert.Details.InvertedGroups["their new group"]
if theirNewGroup { if theirNewGroup {
@@ -776,19 +1014,19 @@ func TestRehandshakingLoser(t *testing.T) {
r.Log("Spin until there is only 1 tunnel") r.Log("Spin until there is only 1 tunnel")
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 { for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
t.Log("Connection manager hasn't ticked yet") t.Log("Connection manager hasn't ticked yet")
time.Sleep(time.Second) time.Sleep(time.Second)
} }
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
myFinalHostmapHosts := myControl.ListHostmapHosts(false) myFinalHostmapHosts := myControl.ListHostmapHosts(false)
myFinalHostmapIndexes := myControl.ListHostmapIndexes(false) myFinalHostmapIndexes := myControl.ListHostmapIndexes(false)
theirFinalHostmapHosts := theirControl.ListHostmapHosts(false) theirFinalHostmapHosts := theirControl.ListHostmapHosts(false)
theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false) theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false)
// Make sure the correct tunnel won // Make sure the correct tunnel won
theirCertInMe := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(theirVpnIpNet.IP), false) theirCertInMe := myControl.GetHostInfoByVpnIp(theirVpnIpNet.Addr(), false)
assert.Contains(t, theirCertInMe.Cert.Details.Groups, "their new group") assert.Contains(t, theirCertInMe.Cert.Details.Groups, "their new group")
// We should only have a single tunnel now on both sides // We should only have a single tunnel now on both sides
@@ -806,13 +1044,13 @@ func TestRaceRegression(t *testing.T) {
// This test forces stage 1, stage 2, stage 1 to be received by me from them // This test forces stage 1, stage 2, stage 1 to be received by me from them
// We had a bug where we were not finding the duplicate handshake and responding to the final stage 1 which // We had a bug where we were not finding the duplicate handshake and responding to the final stage 1 which
// caused a cross-linked hostinfo // caused a cross-linked hostinfo
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{}) ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil) myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", "10.128.0.1/24", nil)
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil) theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", nil)
// Put their info in our lighthouse // Put their info in our lighthouse
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr) myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr) theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
// Start the servers // Start the servers
myControl.Start() myControl.Start()
@@ -826,8 +1064,8 @@ func TestRaceRegression(t *testing.T) {
//them rx stage:2 initiatorIndex=120607833 responderIndex=4209862089 //them rx stage:2 initiatorIndex=120607833 responderIndex=4209862089
t.Log("Start both handshakes") t.Log("Start both handshakes")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me")) myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them")) theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them"))
t.Log("Get both stage 1") t.Log("Get both stage 1")
myStage1ForThem := myControl.GetFromUDP(true) myStage1ForThem := myControl.GetFromUDP(true)
@@ -857,7 +1095,7 @@ func TestRaceRegression(t *testing.T) {
r.RenderHostmaps("Starting hostmaps", myControl, theirControl) r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
t.Log("Make sure the tunnel still works") t.Log("Make sure the tunnel still works")
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r) assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
myControl.Stop() myControl.Stop()
theirControl.Stop() theirControl.Stop()

125
e2e/helpers.go Normal file
View File

@@ -0,0 +1,125 @@
package e2e
import (
"crypto/rand"
"io"
"net"
"net/netip"
"time"
"github.com/slackhq/nebula/cert"
"golang.org/x/crypto/curve25519"
"golang.org/x/crypto/ed25519"
)
// NewTestCaCert will generate a CA cert
func NewTestCaCert(before, after time.Time, ips, subnets []netip.Prefix, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
pub, priv, err := ed25519.GenerateKey(rand.Reader)
if before.IsZero() {
before = time.Now().Add(time.Second * -60).Round(time.Second)
}
if after.IsZero() {
after = time.Now().Add(time.Second * 60).Round(time.Second)
}
nc := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "test ca",
NotBefore: time.Unix(before.Unix(), 0),
NotAfter: time.Unix(after.Unix(), 0),
PublicKey: pub,
IsCA: true,
InvertedGroups: make(map[string]struct{}),
},
}
if len(ips) > 0 {
nc.Details.Ips = make([]*net.IPNet, len(ips))
for i, ip := range ips {
nc.Details.Ips[i] = &net.IPNet{IP: ip.Addr().AsSlice(), Mask: net.CIDRMask(ip.Bits(), ip.Addr().BitLen())}
}
}
if len(subnets) > 0 {
nc.Details.Subnets = make([]*net.IPNet, len(subnets))
for i, ip := range subnets {
nc.Details.Ips[i] = &net.IPNet{IP: ip.Addr().AsSlice(), Mask: net.CIDRMask(ip.Bits(), ip.Addr().BitLen())}
}
}
if len(groups) > 0 {
nc.Details.Groups = groups
}
err = nc.Sign(cert.Curve_CURVE25519, priv)
if err != nil {
panic(err)
}
pem, err := nc.MarshalToPEM()
if err != nil {
panic(err)
}
return nc, pub, priv, pem
}
// NewTestCert will generate a signed certificate with the provided details.
// Expiry times are defaulted if you do not pass them in
func NewTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, after time.Time, ip netip.Prefix, subnets []netip.Prefix, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
issuer, err := ca.Sha256Sum()
if err != nil {
panic(err)
}
if before.IsZero() {
before = time.Now().Add(time.Second * -60).Round(time.Second)
}
if after.IsZero() {
after = time.Now().Add(time.Second * 60).Round(time.Second)
}
pub, rawPriv := x25519Keypair()
ipb := ip.Addr().AsSlice()
nc := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: name,
Ips: []*net.IPNet{{IP: ipb[:], Mask: net.CIDRMask(ip.Bits(), ip.Addr().BitLen())}},
//Subnets: subnets,
Groups: groups,
NotBefore: time.Unix(before.Unix(), 0),
NotAfter: time.Unix(after.Unix(), 0),
PublicKey: pub,
IsCA: false,
Issuer: issuer,
InvertedGroups: make(map[string]struct{}),
},
}
err = nc.Sign(ca.Details.Curve, key)
if err != nil {
panic(err)
}
pem, err := nc.MarshalToPEM()
if err != nil {
panic(err)
}
return nc, pub, cert.MarshalX25519PrivateKey(rawPriv), pem
}
func x25519Keypair() ([]byte, []byte) {
privkey := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
panic(err)
}
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
if err != nil {
panic(err)
}
return pubkey, privkey
}

View File

@@ -4,43 +4,47 @@
package e2e package e2e
import ( import (
"crypto/rand"
"fmt" "fmt"
"io" "io"
"net" "net/netip"
"os" "os"
"testing" "testing"
"time" "time"
"dario.cat/mergo"
"github.com/google/gopacket" "github.com/google/gopacket"
"github.com/google/gopacket/layers" "github.com/google/gopacket/layers"
"github.com/imdario/mergo"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula" "github.com/slackhq/nebula"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config" "github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/e2e/router" "github.com/slackhq/nebula/e2e/router"
"github.com/slackhq/nebula/iputil"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"golang.org/x/crypto/curve25519"
"golang.org/x/crypto/ed25519"
"gopkg.in/yaml.v2" "gopkg.in/yaml.v2"
) )
type m map[string]interface{} type m map[string]interface{}
// newSimpleServer creates a nebula instance with many assumptions // newSimpleServer creates a nebula instance with many assumptions
func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, udpIp net.IP, overrides m) (*nebula.Control, *net.IPNet, *net.UDPAddr, *config.C) { func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, sVpnIpNet string, overrides m) (*nebula.Control, netip.Prefix, netip.AddrPort, *config.C) {
l := NewTestLogger() l := NewTestLogger()
vpnIpNet := &net.IPNet{IP: make([]byte, len(udpIp)), Mask: net.IPMask{255, 255, 255, 0}} vpnIpNet, err := netip.ParsePrefix(sVpnIpNet)
copy(vpnIpNet.IP, udpIp) if err != nil {
vpnIpNet.IP[1] += 128 panic(err)
udpAddr := net.UDPAddr{
IP: udpIp,
Port: 4242,
} }
_, _, myPrivKey, myPEM := newTestCert(caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnIpNet, nil, []string{})
var udpAddr netip.AddrPort
if vpnIpNet.Addr().Is4() {
budpIp := vpnIpNet.Addr().As4()
budpIp[1] -= 128
udpAddr = netip.AddrPortFrom(netip.AddrFrom4(budpIp), 4242)
} else {
budpIp := vpnIpNet.Addr().As16()
budpIp[13] -= 128
udpAddr = netip.AddrPortFrom(netip.AddrFrom16(budpIp), 4242)
}
_, _, myPrivKey, myPEM := NewTestCert(caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnIpNet, nil, []string{})
caB, err := caCrt.MarshalToPEM() caB, err := caCrt.MarshalToPEM()
if err != nil { if err != nil {
@@ -70,8 +74,8 @@ func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, u
// "try_interval": "1s", // "try_interval": "1s",
//}, //},
"listen": m{ "listen": m{
"host": udpAddr.IP.String(), "host": udpAddr.Addr().String(),
"port": udpAddr.Port, "port": udpAddr.Port(),
}, },
"logging": m{ "logging": m{
"timestamp_format": fmt.Sprintf("%v 15:04:05.000000", name), "timestamp_format": fmt.Sprintf("%v 15:04:05.000000", name),
@@ -105,113 +109,7 @@ func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, u
panic(err) panic(err)
} }
return control, vpnIpNet, &udpAddr, c return control, vpnIpNet, udpAddr, c
}
// newTestCaCert will generate a CA cert
func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
pub, priv, err := ed25519.GenerateKey(rand.Reader)
if before.IsZero() {
before = time.Now().Add(time.Second * -60).Round(time.Second)
}
if after.IsZero() {
after = time.Now().Add(time.Second * 60).Round(time.Second)
}
nc := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "test ca",
NotBefore: time.Unix(before.Unix(), 0),
NotAfter: time.Unix(after.Unix(), 0),
PublicKey: pub,
IsCA: true,
InvertedGroups: make(map[string]struct{}),
},
}
if len(ips) > 0 {
nc.Details.Ips = ips
}
if len(subnets) > 0 {
nc.Details.Subnets = subnets
}
if len(groups) > 0 {
nc.Details.Groups = groups
}
err = nc.Sign(cert.Curve_CURVE25519, priv)
if err != nil {
panic(err)
}
pem, err := nc.MarshalToPEM()
if err != nil {
panic(err)
}
return nc, pub, priv, pem
}
// newTestCert will generate a signed certificate with the provided details.
// Expiry times are defaulted if you do not pass them in
func newTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, after time.Time, ip *net.IPNet, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
issuer, err := ca.Sha256Sum()
if err != nil {
panic(err)
}
if before.IsZero() {
before = time.Now().Add(time.Second * -60).Round(time.Second)
}
if after.IsZero() {
after = time.Now().Add(time.Second * 60).Round(time.Second)
}
pub, rawPriv := x25519Keypair()
nc := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: name,
Ips: []*net.IPNet{ip},
Subnets: subnets,
Groups: groups,
NotBefore: time.Unix(before.Unix(), 0),
NotAfter: time.Unix(after.Unix(), 0),
PublicKey: pub,
IsCA: false,
Issuer: issuer,
InvertedGroups: make(map[string]struct{}),
},
}
err = nc.Sign(ca.Details.Curve, key)
if err != nil {
panic(err)
}
pem, err := nc.MarshalToPEM()
if err != nil {
panic(err)
}
return nc, pub, cert.MarshalX25519PrivateKey(rawPriv), pem
}
func x25519Keypair() ([]byte, []byte) {
privkey := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
panic(err)
}
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
if err != nil {
panic(err)
}
return pubkey, privkey
} }
type doneCb func() type doneCb func()
@@ -232,7 +130,7 @@ func deadline(t *testing.T, seconds time.Duration) doneCb {
} }
} }
func assertTunnel(t *testing.T, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control, r *router.R) { func assertTunnel(t *testing.T, vpnIpA, vpnIpB netip.Addr, controlA, controlB *nebula.Control, r *router.R) {
// Send a packet from them to me // Send a packet from them to me
controlB.InjectTunUDPPacket(vpnIpA, 80, 90, []byte("Hi from B")) controlB.InjectTunUDPPacket(vpnIpA, 80, 90, []byte("Hi from B"))
bPacket := r.RouteForAllUntilTxTun(controlA) bPacket := r.RouteForAllUntilTxTun(controlA)
@@ -244,23 +142,20 @@ func assertTunnel(t *testing.T, vpnIpA, vpnIpB net.IP, controlA, controlB *nebul
assertUdpPacket(t, []byte("Hello from A"), aPacket, vpnIpA, vpnIpB, 90, 80) assertUdpPacket(t, []byte("Hello from A"), aPacket, vpnIpA, vpnIpB, 90, 80)
} }
func assertHostInfoPair(t *testing.T, addrA, addrB *net.UDPAddr, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control) { func assertHostInfoPair(t *testing.T, addrA, addrB netip.AddrPort, vpnIpA, vpnIpB netip.Addr, controlA, controlB *nebula.Control) {
// Get both host infos // Get both host infos
hBinA := controlA.GetHostInfoByVpnIp(iputil.Ip2VpnIp(vpnIpB), false) hBinA := controlA.GetHostInfoByVpnIp(vpnIpB, false)
assert.NotNil(t, hBinA, "Host B was not found by vpnIp in controlA") assert.NotNil(t, hBinA, "Host B was not found by vpnIp in controlA")
hAinB := controlB.GetHostInfoByVpnIp(iputil.Ip2VpnIp(vpnIpA), false) hAinB := controlB.GetHostInfoByVpnIp(vpnIpA, false)
assert.NotNil(t, hAinB, "Host A was not found by vpnIp in controlB") assert.NotNil(t, hAinB, "Host A was not found by vpnIp in controlB")
// Check that both vpn and real addr are correct // Check that both vpn and real addr are correct
assert.Equal(t, vpnIpB, hBinA.VpnIp, "Host B VpnIp is wrong in control A") assert.Equal(t, vpnIpB, hBinA.VpnIp, "Host B VpnIp is wrong in control A")
assert.Equal(t, vpnIpA, hAinB.VpnIp, "Host A VpnIp is wrong in control B") assert.Equal(t, vpnIpA, hAinB.VpnIp, "Host A VpnIp is wrong in control B")
assert.Equal(t, addrB.IP.To16(), hBinA.CurrentRemote.IP.To16(), "Host B remote ip is wrong in control A") assert.Equal(t, addrB, hBinA.CurrentRemote, "Host B remote is wrong in control A")
assert.Equal(t, addrA.IP.To16(), hAinB.CurrentRemote.IP.To16(), "Host A remote ip is wrong in control B") assert.Equal(t, addrA, hAinB.CurrentRemote, "Host A remote is wrong in control B")
assert.Equal(t, addrB.Port, int(hBinA.CurrentRemote.Port), "Host B remote port is wrong in control A")
assert.Equal(t, addrA.Port, int(hAinB.CurrentRemote.Port), "Host A remote port is wrong in control B")
// Check that our indexes match // Check that our indexes match
assert.Equal(t, hBinA.LocalIndex, hAinB.RemoteIndex, "Host B local index does not match host A remote index") assert.Equal(t, hBinA.LocalIndex, hAinB.RemoteIndex, "Host B local index does not match host A remote index")
@@ -283,13 +178,13 @@ func assertHostInfoPair(t *testing.T, addrA, addrB *net.UDPAddr, vpnIpA, vpnIpB
//checkIndexes("hmB", hmB, hAinB) //checkIndexes("hmB", hmB, hAinB)
} }
func assertUdpPacket(t *testing.T, expected, b []byte, fromIp, toIp net.IP, fromPort, toPort uint16) { func assertUdpPacket(t *testing.T, expected, b []byte, fromIp, toIp netip.Addr, fromPort, toPort uint16) {
packet := gopacket.NewPacket(b, layers.LayerTypeIPv4, gopacket.Lazy) packet := gopacket.NewPacket(b, layers.LayerTypeIPv4, gopacket.Lazy)
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4) v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
assert.NotNil(t, v4, "No ipv4 data found") assert.NotNil(t, v4, "No ipv4 data found")
assert.Equal(t, fromIp, v4.SrcIP, "Source ip was incorrect") assert.Equal(t, fromIp.AsSlice(), []byte(v4.SrcIP), "Source ip was incorrect")
assert.Equal(t, toIp, v4.DstIP, "Dest ip was incorrect") assert.Equal(t, toIp.AsSlice(), []byte(v4.DstIP), "Dest ip was incorrect")
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP) udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
assert.NotNil(t, udp, "No udp data found") assert.NotNil(t, udp, "No udp data found")

View File

@@ -5,11 +5,11 @@ package router
import ( import (
"fmt" "fmt"
"net/netip"
"sort" "sort"
"strings" "strings"
"github.com/slackhq/nebula" "github.com/slackhq/nebula"
"github.com/slackhq/nebula/iputil"
) )
type edge struct { type edge struct {
@@ -118,14 +118,14 @@ func renderHostmap(c *nebula.Control) (string, []*edge) {
return r, globalLines return r, globalLines
} }
func sortedHosts(hosts map[iputil.VpnIp]*nebula.HostInfo) []iputil.VpnIp { func sortedHosts(hosts map[netip.Addr]*nebula.HostInfo) []netip.Addr {
keys := make([]iputil.VpnIp, 0, len(hosts)) keys := make([]netip.Addr, 0, len(hosts))
for key := range hosts { for key := range hosts {
keys = append(keys, key) keys = append(keys, key)
} }
sort.SliceStable(keys, func(i, j int) bool { sort.SliceStable(keys, func(i, j int) bool {
return keys[i] > keys[j] return keys[i].Compare(keys[j]) > 0
}) })
return keys return keys

View File

@@ -6,12 +6,11 @@ package router
import ( import (
"context" "context"
"fmt" "fmt"
"net" "net/netip"
"os" "os"
"path/filepath" "path/filepath"
"reflect" "reflect"
"sort" "sort"
"strconv"
"strings" "strings"
"sync" "sync"
"testing" "testing"
@@ -21,7 +20,6 @@ import (
"github.com/google/gopacket/layers" "github.com/google/gopacket/layers"
"github.com/slackhq/nebula" "github.com/slackhq/nebula"
"github.com/slackhq/nebula/header" "github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp" "github.com/slackhq/nebula/udp"
"golang.org/x/exp/maps" "golang.org/x/exp/maps"
) )
@@ -29,18 +27,18 @@ import (
type R struct { type R struct {
// Simple map of the ip:port registered on a control to the control // Simple map of the ip:port registered on a control to the control
// Basically a router, right? // Basically a router, right?
controls map[string]*nebula.Control controls map[netip.AddrPort]*nebula.Control
// A map for inbound packets for a control that doesn't know about this address // A map for inbound packets for a control that doesn't know about this address
inNat map[string]*nebula.Control inNat map[netip.AddrPort]*nebula.Control
// A last used map, if an inbound packet hit the inNat map then // A last used map, if an inbound packet hit the inNat map then
// all return packets should use the same last used inbound address for the outbound sender // all return packets should use the same last used inbound address for the outbound sender
// map[from address + ":" + to address] => ip:port to rewrite in the udp packet to receiver // map[from address + ":" + to address] => ip:port to rewrite in the udp packet to receiver
outNat map[string]net.UDPAddr outNat map[string]netip.AddrPort
// A map of vpn ip to the nebula control it belongs to // A map of vpn ip to the nebula control it belongs to
vpnControls map[iputil.VpnIp]*nebula.Control vpnControls map[netip.Addr]*nebula.Control
ignoreFlows []ignoreFlow ignoreFlows []ignoreFlow
flow []flowEntry flow []flowEntry
@@ -118,10 +116,10 @@ func NewR(t testing.TB, controls ...*nebula.Control) *R {
} }
r := &R{ r := &R{
controls: make(map[string]*nebula.Control), controls: make(map[netip.AddrPort]*nebula.Control),
vpnControls: make(map[iputil.VpnIp]*nebula.Control), vpnControls: make(map[netip.Addr]*nebula.Control),
inNat: make(map[string]*nebula.Control), inNat: make(map[netip.AddrPort]*nebula.Control),
outNat: make(map[string]net.UDPAddr), outNat: make(map[string]netip.AddrPort),
flow: []flowEntry{}, flow: []flowEntry{},
ignoreFlows: []ignoreFlow{}, ignoreFlows: []ignoreFlow{},
fn: filepath.Join("mermaid", fmt.Sprintf("%s.md", t.Name())), fn: filepath.Join("mermaid", fmt.Sprintf("%s.md", t.Name())),
@@ -135,7 +133,7 @@ func NewR(t testing.TB, controls ...*nebula.Control) *R {
for _, c := range controls { for _, c := range controls {
addr := c.GetUDPAddr() addr := c.GetUDPAddr()
if _, ok := r.controls[addr]; ok { if _, ok := r.controls[addr]; ok {
panic("Duplicate listen address: " + addr) panic("Duplicate listen address: " + addr.String())
} }
r.vpnControls[c.GetVpnIp()] = c r.vpnControls[c.GetVpnIp()] = c
@@ -165,13 +163,13 @@ func NewR(t testing.TB, controls ...*nebula.Control) *R {
// It does not look at the addr attached to the instance. // It does not look at the addr attached to the instance.
// If a route is used, this will behave like a NAT for the return path. // If a route is used, this will behave like a NAT for the return path.
// Rewriting the source ip:port to what was last sent to from the origin // Rewriting the source ip:port to what was last sent to from the origin
func (r *R) AddRoute(ip net.IP, port uint16, c *nebula.Control) { func (r *R) AddRoute(ip netip.Addr, port uint16, c *nebula.Control) {
r.Lock() r.Lock()
defer r.Unlock() defer r.Unlock()
inAddr := net.JoinHostPort(ip.String(), fmt.Sprintf("%v", port)) inAddr := netip.AddrPortFrom(ip, port)
if _, ok := r.inNat[inAddr]; ok { if _, ok := r.inNat[inAddr]; ok {
panic("Duplicate listen address inNat: " + inAddr) panic("Duplicate listen address inNat: " + inAddr.String())
} }
r.inNat[inAddr] = c r.inNat[inAddr] = c
} }
@@ -198,7 +196,7 @@ func (r *R) renderFlow() {
panic(err) panic(err)
} }
var participants = map[string]struct{}{} var participants = map[netip.AddrPort]struct{}{}
var participantsVals []string var participantsVals []string
fmt.Fprintln(f, "```mermaid") fmt.Fprintln(f, "```mermaid")
@@ -215,7 +213,7 @@ func (r *R) renderFlow() {
continue continue
} }
participants[addr] = struct{}{} participants[addr] = struct{}{}
sanAddr := strings.Replace(addr, ":", "-", 1) sanAddr := strings.Replace(addr.String(), ":", "-", 1)
participantsVals = append(participantsVals, sanAddr) participantsVals = append(participantsVals, sanAddr)
fmt.Fprintf( fmt.Fprintf(
f, " participant %s as Nebula: %s<br/>UDP: %s\n", f, " participant %s as Nebula: %s<br/>UDP: %s\n",
@@ -252,9 +250,9 @@ func (r *R) renderFlow() {
fmt.Fprintf(f, fmt.Fprintf(f,
" %s%s%s: %s(%s), index %v, counter: %v\n", " %s%s%s: %s(%s), index %v, counter: %v\n",
strings.Replace(p.from.GetUDPAddr(), ":", "-", 1), strings.Replace(p.from.GetUDPAddr().String(), ":", "-", 1),
line, line,
strings.Replace(p.to.GetUDPAddr(), ":", "-", 1), strings.Replace(p.to.GetUDPAddr().String(), ":", "-", 1),
h.TypeName(), h.SubTypeName(), h.RemoteIndex, h.MessageCounter, h.TypeName(), h.SubTypeName(), h.RemoteIndex, h.MessageCounter,
) )
} }
@@ -305,7 +303,7 @@ func (r *R) RenderHostmaps(title string, controls ...*nebula.Control) {
func (r *R) renderHostmaps(title string) { func (r *R) renderHostmaps(title string) {
c := maps.Values(r.controls) c := maps.Values(r.controls)
sort.SliceStable(c, func(i, j int) bool { sort.SliceStable(c, func(i, j int) bool {
return c[i].GetVpnIp() > c[j].GetVpnIp() return c[i].GetVpnIp().Compare(c[j].GetVpnIp()) > 0
}) })
s := renderHostmaps(c...) s := renderHostmaps(c...)
@@ -420,10 +418,8 @@ func (r *R) RouteUntilTxTun(sender *nebula.Control, receiver *nebula.Control) []
// Nope, lets push the sender along // Nope, lets push the sender along
case p := <-udpTx: case p := <-udpTx:
outAddr := sender.GetUDPAddr()
r.Lock() r.Lock()
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort)) c := r.getControl(sender.GetUDPAddr(), p.To, p)
c := r.getControl(outAddr, inAddr, p)
if c == nil { if c == nil {
r.Unlock() r.Unlock()
panic("No control for udp tx") panic("No control for udp tx")
@@ -479,10 +475,7 @@ func (r *R) RouteForAllUntilTxTun(receiver *nebula.Control) []byte {
} else { } else {
// we are a udp tx, route and continue // we are a udp tx, route and continue
p := rx.Interface().(*udp.Packet) p := rx.Interface().(*udp.Packet)
outAddr := cm[x].GetUDPAddr() c := r.getControl(cm[x].GetUDPAddr(), p.To, p)
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
c := r.getControl(outAddr, inAddr, p)
if c == nil { if c == nil {
r.Unlock() r.Unlock()
panic("No control for udp tx") panic("No control for udp tx")
@@ -509,12 +502,10 @@ func (r *R) RouteExitFunc(sender *nebula.Control, whatDo ExitFunc) {
panic(err) panic(err)
} }
outAddr := sender.GetUDPAddr() receiver := r.getControl(sender.GetUDPAddr(), p.To, p)
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
receiver := r.getControl(outAddr, inAddr, p)
if receiver == nil { if receiver == nil {
r.Unlock() r.Unlock()
panic("Can't route for host: " + inAddr) panic("Can't RouteExitFunc for host: " + p.To.String())
} }
e := whatDo(p, receiver) e := whatDo(p, receiver)
@@ -590,13 +581,13 @@ func (r *R) InjectUDPPacket(sender, receiver *nebula.Control, packet *udp.Packet
// RouteForUntilAfterToAddr will route for sender and return only after it sees and sends a packet destined for toAddr // RouteForUntilAfterToAddr will route for sender and return only after it sees and sends a packet destined for toAddr
// finish can be any of the exitType values except `keepRouting`, the default value is `routeAndExit` // finish can be any of the exitType values except `keepRouting`, the default value is `routeAndExit`
// If the router doesn't have the nebula controller for that address, we panic // If the router doesn't have the nebula controller for that address, we panic
func (r *R) RouteForUntilAfterToAddr(sender *nebula.Control, toAddr *net.UDPAddr, finish ExitType) { func (r *R) RouteForUntilAfterToAddr(sender *nebula.Control, toAddr netip.AddrPort, finish ExitType) {
if finish == KeepRouting { if finish == KeepRouting {
finish = RouteAndExit finish = RouteAndExit
} }
r.RouteExitFunc(sender, func(p *udp.Packet, r *nebula.Control) ExitType { r.RouteExitFunc(sender, func(p *udp.Packet, r *nebula.Control) ExitType {
if p.ToIp.Equal(toAddr.IP) && p.ToPort == uint16(toAddr.Port) { if p.To == toAddr {
return finish return finish
} }
@@ -630,13 +621,10 @@ func (r *R) RouteForAllExitFunc(whatDo ExitFunc) {
r.Lock() r.Lock()
p := rx.Interface().(*udp.Packet) p := rx.Interface().(*udp.Packet)
receiver := r.getControl(cm[x].GetUDPAddr(), p.To, p)
outAddr := cm[x].GetUDPAddr()
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
receiver := r.getControl(outAddr, inAddr, p)
if receiver == nil { if receiver == nil {
r.Unlock() r.Unlock()
panic("Can't route for host: " + inAddr) panic("Can't RouteForAllExitFunc for host: " + p.To.String())
} }
e := whatDo(p, receiver) e := whatDo(p, receiver)
@@ -697,41 +685,26 @@ func (r *R) FlushAll() {
p := rx.Interface().(*udp.Packet) p := rx.Interface().(*udp.Packet)
outAddr := cm[x].GetUDPAddr() receiver := r.getControl(cm[x].GetUDPAddr(), p.To, p)
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
receiver := r.getControl(outAddr, inAddr, p)
if receiver == nil { if receiver == nil {
r.Unlock() r.Unlock()
panic("Can't route for host: " + inAddr) panic("Can't FlushAll for host: " + p.To.String())
} }
receiver.InjectUDPPacket(p)
r.Unlock() r.Unlock()
} }
} }
// getControl performs or seeds NAT translation and returns the control for toAddr, p from fields may change // getControl performs or seeds NAT translation and returns the control for toAddr, p from fields may change
// This is an internal router function, the caller must hold the lock // This is an internal router function, the caller must hold the lock
func (r *R) getControl(fromAddr, toAddr string, p *udp.Packet) *nebula.Control { func (r *R) getControl(fromAddr, toAddr netip.AddrPort, p *udp.Packet) *nebula.Control {
if newAddr, ok := r.outNat[fromAddr+":"+toAddr]; ok { if newAddr, ok := r.outNat[fromAddr.String()+":"+toAddr.String()]; ok {
p.FromIp = newAddr.IP p.From = newAddr
p.FromPort = uint16(newAddr.Port)
} }
c, ok := r.inNat[toAddr] c, ok := r.inNat[toAddr]
if ok { if ok {
sHost, sPort, err := net.SplitHostPort(toAddr) r.outNat[c.GetUDPAddr().String()+":"+fromAddr.String()] = toAddr
if err != nil {
panic(err)
}
port, err := strconv.Atoi(sPort)
if err != nil {
panic(err)
}
r.outNat[c.GetUDPAddr()+":"+fromAddr] = net.UDPAddr{
IP: net.ParseIP(sHost),
Port: port,
}
return c return c
} }
@@ -746,8 +719,9 @@ func (r *R) formatUdpPacket(p *packet) string {
} }
from := "unknown" from := "unknown"
if c, ok := r.vpnControls[iputil.Ip2VpnIp(v4.SrcIP)]; ok { srcAddr, _ := netip.AddrFromSlice(v4.SrcIP)
from = c.GetUDPAddr() if c, ok := r.vpnControls[srcAddr]; ok {
from = c.GetUDPAddr().String()
} }
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP) udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
@@ -759,7 +733,7 @@ func (r *R) formatUdpPacket(p *packet) string {
return fmt.Sprintf( return fmt.Sprintf(
" %s-->>%s: src port: %v<br/>dest port: %v<br/>data: \"%v\"\n", " %s-->>%s: src port: %v<br/>dest port: %v<br/>data: \"%v\"\n",
strings.Replace(from, ":", "-", 1), strings.Replace(from, ":", "-", 1),
strings.Replace(p.to.GetUDPAddr(), ":", "-", 1), strings.Replace(p.to.GetUDPAddr().String(), ":", "-", 1),
udp.SrcPort, udp.SrcPort,
udp.DstPort, udp.DstPort,
string(data.Payload()), string(data.Payload()),

55
e2e/tunnels_test.go Normal file
View File

@@ -0,0 +1,55 @@
//go:build e2e_testing
// +build e2e_testing
package e2e
import (
"testing"
"time"
"github.com/slackhq/nebula/e2e/router"
)
func TestDropInactiveTunnels(t *testing.T) {
// The goal of this test is to ensure the shortest inactivity timeout will close the tunnel on both sides
// under ideal conditions
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", "10.128.0.1/24", m{"tunnels": m{"drop_inactive": true, "inactivity_timeout": "5s"}})
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", m{"tunnels": m{"drop_inactive": true, "inactivity_timeout": "10m"}})
// Share our underlay information
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
// Start the servers
myControl.Start()
theirControl.Start()
r := router.NewR(t, myControl, theirControl)
r.Log("Assert the tunnel between me and them works")
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
r.Log("Go inactive and wait for the tunnels to get dropped")
waitStart := time.Now()
for {
myIndexes := len(myControl.GetHostmap().Indexes)
theirIndexes := len(theirControl.GetHostmap().Indexes)
if myIndexes == 0 && theirIndexes == 0 {
break
}
since := time.Since(waitStart)
r.Logf("my tunnels: %v; their tunnels: %v; duration: %v", myIndexes, theirIndexes, since)
if since > time.Second*30 {
t.Fatal("Tunnel should have been declared inactive after 5 seconds and before 30 seconds")
}
time.Sleep(1 * time.Second)
r.FlushAll()
}
r.Logf("Inactive tunnels were dropped within %v", time.Since(waitStart))
myControl.Stop()
theirControl.Stop()
}

View File

@@ -11,7 +11,7 @@ pki:
#blocklist: #blocklist:
# - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72 # - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72
# disconnect_invalid is a toggle to force a client to be disconnected if the certificate is expired or invalid. # disconnect_invalid is a toggle to force a client to be disconnected if the certificate is expired or invalid.
#disconnect_invalid: false #disconnect_invalid: true
# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network). # The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel. # A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
@@ -21,6 +21,19 @@ pki:
static_host_map: static_host_map:
"192.168.100.1": ["100.64.22.11:4242"] "192.168.100.1": ["100.64.22.11:4242"]
# The static_map config stanza can be used to configure how the static_host_map behaves.
#static_map:
# cadence determines how frequently DNS is re-queried for updated IP addresses when a static_host_map entry contains
# a DNS name.
#cadence: 30s
# network determines the type of IP addresses to ask the DNS server for. The default is "ip4" because nodes typically
# do not know their public IPv4 address. Connecting to the Lighthouse via IPv4 allows the Lighthouse to detect the
# public address. Other valid options are "ip6" and "ip" (returns both.)
#network: ip4
# lookup_timeout is the DNS query timeout.
#lookup_timeout: 250ms
lighthouse: lighthouse:
# am_lighthouse is used to enable lighthouse functionality for a node. This should ONLY be true on nodes # am_lighthouse is used to enable lighthouse functionality for a node. This should ONLY be true on nodes
@@ -154,11 +167,11 @@ punchy:
# Preferred ranges is used to define a hint about the local network ranges, which speeds up discovering the fastest # Preferred ranges is used to define a hint about the local network ranges, which speeds up discovering the fastest
# path to a network adjacent nebula node. # path to a network adjacent nebula node.
# NOTE: the previous option "local_range" only allowed definition of a single range # This setting is reloadable.
# and has been deprecated for "preferred_ranges"
#preferred_ranges: ["172.16.0.0/24"] #preferred_ranges: ["172.16.0.0/24"]
# sshd can expose informational and administrative functions via ssh this is a # sshd can expose informational and administrative functions via ssh. This can expose informational and administrative
# functions, and allows manual tweaking of various network settings when debugging or testing.
#sshd: #sshd:
# Toggles the feature # Toggles the feature
#enabled: true #enabled: true
@@ -167,12 +180,15 @@ punchy:
# A file containing the ssh host private key to use # A file containing the ssh host private key to use
# A decent way to generate one: ssh-keygen -t ed25519 -f ssh_host_ed25519_key -N "" < /dev/null # A decent way to generate one: ssh-keygen -t ed25519 -f ssh_host_ed25519_key -N "" < /dev/null
#host_key: ./ssh_host_ed25519_key #host_key: ./ssh_host_ed25519_key
# A file containing a list of authorized public keys # Authorized users and their public keys
#authorized_users: #authorized_users:
#- user: steeeeve #- user: steeeeve
# keys can be an array of strings or single string # keys can be an array of strings or single string
#keys: #keys:
#- "ssh public key string" #- "ssh public key string"
# Trusted SSH CA public keys. These are the public keys of the CAs that are allowed to sign SSH keys for access.
#trusted_cas:
#- "ssh public key string"
# EXPERIMENTAL: relay support for networks that can't establish direct connections. # EXPERIMENTAL: relay support for networks that can't establish direct connections.
relay: relay:
@@ -194,7 +210,7 @@ tun:
disabled: false disabled: false
# Name of the device. If not set, a default will be chosen by the OS. # Name of the device. If not set, a default will be chosen by the OS.
# For macOS: if set, must be in the form `utun[0-9]+`. # For macOS: if set, must be in the form `utun[0-9]+`.
# For FreeBSD: Required to be set, must be in the form `tun[0-9]+`. # For NetBSD: Required to be set, must be in the form `tun[0-9]+`
dev: nebula1 dev: nebula1
# Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert # Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert
drop_local_broadcast: false drop_local_broadcast: false
@@ -216,6 +232,7 @@ tun:
# `mtu`: will default to tun mtu if this option is not specified # `mtu`: will default to tun mtu if this option is not specified
# `metric`: will default to 0 if this option is not specified # `metric`: will default to 0 if this option is not specified
# `install`: will default to true, controls whether this route is installed in the systems routing table. # `install`: will default to true, controls whether this route is installed in the systems routing table.
# This setting is reloadable.
unsafe_routes: unsafe_routes:
#- route: 172.16.1.0/24 #- route: 172.16.1.0/24
# via: 192.168.100.99 # via: 192.168.100.99
@@ -230,7 +247,10 @@ tun:
# TODO # TODO
# Configure logging level # Configure logging level
logging: logging:
# panic, fatal, error, warning, info, or debug. Default is info # panic, fatal, error, warning, info, or debug. Default is info and is reloadable.
#NOTE: Debug mode can log remotely controlled/untrusted data which can quickly fill a disk in some
# scenarios. Debug logging is also CPU intensive and will decrease performance overall.
# Only enable debug logging while actively investigating an issue.
level: info level: info
# json or text formats currently available. Default is text # json or text formats currently available. Default is text
format: text format: text
@@ -275,10 +295,26 @@ logging:
# A 100ms interval with the default 10 retries will give a handshake 5.5 seconds to resolve before timing out # A 100ms interval with the default 10 retries will give a handshake 5.5 seconds to resolve before timing out
#try_interval: 100ms #try_interval: 100ms
#retries: 20 #retries: 20
# query_buffer is the size of the buffer channel for querying lighthouses
#query_buffer: 64
# trigger_buffer is the size of the buffer channel for quickly sending handshakes # trigger_buffer is the size of the buffer channel for quickly sending handshakes
# after receiving the response for lighthouse queries # after receiving the response for lighthouse queries
#trigger_buffer: 64 #trigger_buffer: 64
# Tunnel manager settings
#tunnels:
# drop_inactive controls whether inactive tunnels are maintained or dropped after the inactive_timeout period has
# elapsed.
# In general, it is a good idea to enable this setting. It will be enabled by default in a future release.
# This setting is reloadable
#drop_inactive: false
# inactivity_timeout controls how long a tunnel MUST NOT see any inbound or outbound traffic before being considered
# inactive and eligible to be dropped.
# This setting is reloadable
#inactivity_timeout: 10m
# Nebula security group configuration # Nebula security group configuration
firewall: firewall:
@@ -291,6 +327,13 @@ firewall:
outbound_action: drop outbound_action: drop
inbound_action: drop inbound_action: drop
# Controls the default value for local_cidr. Default is true, will be deprecated after v1.9 and defaulted to false.
# This setting only affects nebula hosts with subnets encoded in their certificate. A nebula host acting as an
# unsafe router with `default_local_cidr_any: true` will expose their unsafe routes to every inbound rule regardless
# of the actual destination for the packet. Setting this to false requires each inbound rule to contain a `local_cidr`
# if the intention is to allow traffic to flow to an unsafe route.
#default_local_cidr_any: false
conntrack: conntrack:
tcp_timeout: 12m tcp_timeout: 12m
udp_timeout: 3m udp_timeout: 3m
@@ -298,7 +341,7 @@ firewall:
# The firewall is default deny. There is no way to write a deny rule. # The firewall is default deny. There is no way to write a deny rule.
# Rules are comprised of a protocol, port, and one or more of host, group, or CIDR # Rules are comprised of a protocol, port, and one or more of host, group, or CIDR
# Logical evaluation is roughly: port AND proto AND (ca_sha OR ca_name) AND (host OR group OR groups OR cidr) # Logical evaluation is roughly: port AND proto AND (ca_sha OR ca_name) AND (host OR group OR groups OR cidr) AND (local cidr)
# - port: Takes `0` or `any` as any, a single number `80`, a range `200-901`, or `fragment` to match second and further fragments of fragmented packets (since there is no port available). # - port: Takes `0` or `any` as any, a single number `80`, a range `200-901`, or `fragment` to match second and further fragments of fragmented packets (since there is no port available).
# code: same as port but makes more sense when talking about ICMP, TODO: this is not currently implemented in a way that works, use `any` # code: same as port but makes more sense when talking about ICMP, TODO: this is not currently implemented in a way that works, use `any`
# proto: `any`, `tcp`, `udp`, or `icmp` # proto: `any`, `tcp`, `udp`, or `icmp`
@@ -307,6 +350,8 @@ firewall:
# groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass # groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
# cidr: a remote CIDR, `0.0.0.0/0` is any. # cidr: a remote CIDR, `0.0.0.0/0` is any.
# local_cidr: a local CIDR, `0.0.0.0/0` is any. This could be used to filter destinations when using unsafe_routes. # local_cidr: a local CIDR, `0.0.0.0/0` is any. This could be used to filter destinations when using unsafe_routes.
# Default is `any` unless the certificate contains subnets and then the default is the ip issued in the certificate
# if `default_local_cidr_any` is false, otherwise its `any`.
# ca_name: An issuing CA name # ca_name: An issuing CA name
# ca_sha: An issuing CA shasum # ca_sha: An issuing CA shasum
@@ -328,3 +373,10 @@ firewall:
groups: groups:
- laptop - laptop
- home - home
# Expose a subnet (unsafe route) to hosts with the group remote_client
# This example assume you have a subnet of 192.168.100.1/24 or larger encoded in the certificate
- port: 8080
proto: tcp
group: remote_client
local_cidr: 192.168.100.1/24

109
examples/go_service/main.go Normal file
View File

@@ -0,0 +1,109 @@
package main
import (
"bufio"
"fmt"
"log"
"net"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/service"
)
func main() {
if err := run(); err != nil {
log.Fatalf("%+v", err)
}
}
func run() error {
configStr := `
tun:
user: true
static_host_map:
'192.168.100.1': ['localhost:4242']
listen:
host: 0.0.0.0
port: 4241
lighthouse:
am_lighthouse: false
interval: 60
hosts:
- '192.168.100.1'
firewall:
outbound:
# Allow all outbound traffic from this node
- port: any
proto: any
host: any
inbound:
# Allow icmp between any nebula hosts
- port: any
proto: icmp
host: any
- port: any
proto: any
host: any
pki:
ca: /home/rice/Developer/nebula-config/ca.crt
cert: /home/rice/Developer/nebula-config/app.crt
key: /home/rice/Developer/nebula-config/app.key
`
var cfg config.C
if err := cfg.LoadString(configStr); err != nil {
return err
}
svc, err := service.New(&cfg)
if err != nil {
return err
}
ln, err := svc.Listen("tcp", ":1234")
if err != nil {
return err
}
for {
conn, err := ln.Accept()
if err != nil {
log.Printf("accept error: %s", err)
break
}
defer func(conn net.Conn) {
_ = conn.Close()
}(conn)
log.Printf("got connection")
_, err = conn.Write([]byte("hello world\n"))
if err != nil {
log.Printf("write error: %s", err)
}
scanner := bufio.NewScanner(conn)
for scanner.Scan() {
message := scanner.Text()
_, err = fmt.Fprintf(conn, "echo: %q\n", message)
if err != nil {
log.Printf("write error: %s", err)
}
log.Printf("got message %q", message)
}
if err := scanner.Err(); err != nil {
log.Printf("scanner error: %s", err)
break
}
}
_ = svc.Close()
if err := svc.Wait(); err != nil {
return err
}
return nil
}

View File

@@ -1,138 +0,0 @@
# Quickstart Guide
This guide is intended to bring up a vagrant environment with 1 lighthouse and 2 generic hosts running nebula.
## Creating the virtualenv for ansible
Within the `quickstart/` directory, do the following
```
# make a virtual environment
virtualenv venv
# get into the virtualenv
source venv/bin/activate
# install ansible
pip install -r requirements.yml
```
## Bringing up the vagrant environment
A plugin that is used for the Vagrant environment is `vagrant-hostmanager`
To install, run
```
vagrant plugin install vagrant-hostmanager
```
All hosts within the Vagrantfile are brought up with
`vagrant up`
Once the boxes are up, go into the `ansible/` directory and deploy the playbook by running
`ansible-playbook playbook.yml -i inventory -u vagrant`
## Testing within the vagrant env
Once the ansible run is done, hop onto a vagrant box
`vagrant ssh generic1.vagrant`
or specifically
`ssh vagrant@<ip-address-in-vagrant-file` (password for the vagrant user on the boxes is `vagrant`)
Some quick tests once the vagrant boxes are up are to ping from `generic1.vagrant` to `generic2.vagrant` using
their respective nebula ip address.
```
vagrant@generic1:~$ ping 10.168.91.220
PING 10.168.91.220 (10.168.91.220) 56(84) bytes of data.
64 bytes from 10.168.91.220: icmp_seq=1 ttl=64 time=241 ms
64 bytes from 10.168.91.220: icmp_seq=2 ttl=64 time=0.704 ms
```
You can further verify that the allowed nebula firewall rules work by ssh'ing from 1 generic box to the other.
`ssh vagrant@<nebula-ip-address>` (password for the vagrant user on the boxes is `vagrant`)
See `/etc/nebula/config.yml` on a box for firewall rules.
To see full handshakes and hostmaps, change the logging config of `/etc/nebula/config.yml` on the vagrant boxes from
info to debug.
You can watch nebula logs by running
```
sudo journalctl -fu nebula
```
Refer to the nebula src code directory's README for further instructions on configuring nebula.
## Troubleshooting
### Is nebula up and running?
Run and verify that
```
ifconfig
```
shows you an interface with the name `nebula1` being up.
```
vagrant@generic1:~$ ifconfig nebula1
nebula1: flags=4305<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST> mtu 1300
inet 10.168.91.210 netmask 255.128.0.0 destination 10.168.91.210
inet6 fe80::aeaf:b105:e6dc:936c prefixlen 64 scopeid 0x20<link>
unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC)
RX packets 2 bytes 168 (168.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 11 bytes 600 (600.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
```
### Connectivity
Are you able to ping other boxes on the private nebula network?
The following are the private nebula ip addresses of the vagrant env
```
generic1.vagrant [nebula_ip] 10.168.91.210
generic2.vagrant [nebula_ip] 10.168.91.220
lighthouse1.vagrant [nebula_ip] 10.168.91.230
```
Try pinging generic1.vagrant to and from any other box using its nebula ip above.
Double check the nebula firewall rules under /etc/nebula/config.yml to make sure that connectivity is allowed for your use-case if on a specific port.
```
vagrant@lighthouse1:~$ grep -A21 firewall /etc/nebula/config.yml
firewall:
conntrack:
tcp_timeout: 12m
udp_timeout: 3m
default_timeout: 10m
inbound:
- proto: icmp
port: any
host: any
- proto: any
port: 22
host: any
- proto: any
port: 53
host: any
outbound:
- proto: any
port: any
host: any
```

View File

@@ -1,40 +0,0 @@
Vagrant.require_version ">= 2.2.6"
nodes = [
{ :hostname => 'generic1.vagrant', :ip => '172.11.91.210', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
{ :hostname => 'generic2.vagrant', :ip => '172.11.91.220', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
{ :hostname => 'lighthouse1.vagrant', :ip => '172.11.91.230', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
]
Vagrant.configure("2") do |config|
config.ssh.insert_key = false
if Vagrant.has_plugin?('vagrant-cachier')
config.cache.enable :apt
else
printf("** Install vagrant-cachier plugin to speedup deploy: `vagrant plugin install vagrant-cachier`.**\n")
end
if Vagrant.has_plugin?('vagrant-hostmanager')
config.hostmanager.enabled = true
config.hostmanager.manage_host = true
config.hostmanager.include_offline = true
else
config.vagrant.plugins = "vagrant-hostmanager"
end
nodes.each do |node|
config.vm.define node[:hostname] do |node_config|
node_config.vm.box = node[:box]
node_config.vm.hostname = node[:hostname]
node_config.vm.network :private_network, ip: node[:ip]
node_config.vm.provider :virtualbox do |vb|
vb.memory = node[:ram]
vb.cpus = node[:cpus]
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
vb.customize ['guestproperty', 'set', :id, '/VirtualBox/GuestAdd/VBoxService/--timesync-set-threshold', 10000]
end
end
end
end

View File

@@ -1,4 +0,0 @@
[defaults]
host_key_checking = False
private_key_file = ~/.vagrant.d/insecure_private_key
become = yes

View File

@@ -1,21 +0,0 @@
#!/usr/bin/python
class FilterModule(object):
def filters(self):
return {
'to_nebula_ip': self.to_nebula_ip,
'map_to_nebula_ips': self.map_to_nebula_ips,
}
def to_nebula_ip(self, ip_str):
ip_list = list(map(int, ip_str.split(".")))
ip_list[0] = 10
ip_list[1] = 168
ip = '.'.join(map(str, ip_list))
return ip
def map_to_nebula_ips(self, ip_strs):
ip_list = [ self.to_nebula_ip(ip_str) for ip_str in ip_strs ]
ips = ', '.join(ip_list)
return ips

View File

@@ -1,11 +0,0 @@
[all]
generic1.vagrant
generic2.vagrant
lighthouse1.vagrant
[generic]
generic1.vagrant
generic2.vagrant
[lighthouse]
lighthouse1.vagrant

View File

@@ -1,23 +0,0 @@
---
- name: test connection to vagrant boxes
hosts: all
tasks:
- debug: msg=ok
- name: build nebula binaries locally
connection: local
hosts: localhost
tasks:
- command: chdir=../../../ make build/linux-amd64/"{{ item }}"
with_items:
- nebula
- nebula-cert
tags:
- build-nebula
- name: install nebula on all vagrant hosts
hosts: all
become: yes
gather_facts: yes
roles:
- nebula

View File

@@ -1,3 +0,0 @@
---
# defaults file for nebula
nebula_config_directory: "/etc/nebula/"

View File

@@ -1,14 +0,0 @@
[Unit]
Description=Nebula overlay networking tool
Wants=basic.target network-online.target nss-lookup.target time-sync.target
After=basic.target network.target network-online.target
Before=sshd.service
[Service]
SyslogIdentifier=nebula
ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml
Restart=always
[Install]
WantedBy=multi-user.target

View File

@@ -1,5 +0,0 @@
-----BEGIN NEBULA CERTIFICATE-----
CkAKDm5lYnVsYSB0ZXN0IENBKNXC1NYFMNXIhO0GOiCmVYeZ9tkB4WEnawmkrca+
hsAg9otUFhpAowZeJ33KVEABEkAORybHQUUyVFbKYzw0JHfVzAQOHA4kwB1yP9IV
KpiTw9+ADz+wA+R5tn9B+L8+7+Apc+9dem4BQULjA5mRaoYN
-----END NEBULA CERTIFICATE-----

View File

@@ -1,4 +0,0 @@
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
FEXZKMSmg8CgIODR0ymUeNT3nbnVpMi7nD79UgkCRHWmVYeZ9tkB4WEnawmkrca+
hsAg9otUFhpAowZeJ33KVA==
-----END NEBULA ED25519 PRIVATE KEY-----

View File

@@ -1,5 +0,0 @@
---
# handlers file for nebula
- name: restart nebula
service: name=nebula state=restarted

View File

@@ -1,62 +0,0 @@
---
# tasks file for nebula
- name: get the vagrant network interface and set fact
set_fact:
vagrant_ifce: "ansible_{{ ansible_interfaces | difference(['lo',ansible_default_ipv4.alias]) | sort | first }}"
tags:
- nebula-conf
- name: install built nebula binary
copy: src="../../../../../build/linux-amd64/{{ item }}" dest="/usr/local/bin" mode=0755
with_items:
- nebula
- nebula-cert
- name: create nebula config directory
file: path="{{ nebula_config_directory }}" state=directory mode=0755
- name: temporarily copy over root.crt and root.key to sign
copy: src={{ item }} dest=/opt/{{ item }}
with_items:
- vagrant-test-ca.key
- vagrant-test-ca.crt
- name: remove previously signed host certificate
file: dest=/etc/nebula/{{ item }} state=absent
with_items:
- host.crt
- host.key
- name: sign using the root key
command: nebula-cert sign -ca-crt /opt/vagrant-test-ca.crt -ca-key /opt/vagrant-test-ca.key -duration 4320h -groups vagrant -ip {{ hostvars[inventory_hostname][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }}/9 -name {{ ansible_hostname }}.nebula -out-crt /etc/nebula/host.crt -out-key /etc/nebula/host.key
- name: remove root.key used to sign
file: dest=/opt/{{ item }} state=absent
with_items:
- vagrant-test-ca.key
- name: write the content of the trusted ca certificate
copy: src="vagrant-test-ca.crt" dest="/etc/nebula/vagrant-test-ca.crt"
notify: restart nebula
- name: Create config directory
file: path="{{ nebula_config_directory }}" owner=root group=root mode=0755 state=directory
- name: nebula config
template: src=config.yml.j2 dest="/etc/nebula/config.yml" mode=0644 owner=root group=root
notify: restart nebula
tags:
- nebula-conf
- name: nebula systemd
copy: src=systemd.nebula.service dest="/etc/systemd/system/nebula.service" mode=0644 owner=root group=root
register: addconf
notify: restart nebula
- name: maybe reload systemd
shell: systemctl daemon-reload
when: addconf.changed
- name: nebula running
service: name="nebula" state=started enabled=yes

View File

@@ -1,85 +0,0 @@
pki:
ca: /etc/nebula/vagrant-test-ca.crt
cert: /etc/nebula/host.crt
key: /etc/nebula/host.key
# Port Nebula will be listening on
listen:
host: 0.0.0.0
port: 4242
# sshd can expose informational and administrative functions via ssh
sshd:
# Toggles the feature
enabled: true
# Host and port to listen on
listen: 127.0.0.1:2222
# A file containing the ssh host private key to use
host_key: /etc/ssh/ssh_host_ed25519_key
# A file containing a list of authorized public keys
authorized_users:
{% for user in nebula_users %}
- user: {{ user.name }}
keys:
{% for key in user.ssh_auth_keys %}
- "{{ key }}"
{% endfor %}
{% endfor %}
local_range: 10.168.0.0/16
static_host_map:
# lighthouse
{{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }}: ["{{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address']}}:4242"]
default_route: "0.0.0.0"
lighthouse:
{% if 'lighthouse' in group_names %}
am_lighthouse: true
serve_dns: true
{% else %}
am_lighthouse: false
{% endif %}
interval: 60
{% if 'generic' in group_names %}
hosts:
- {{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }}
{% endif %}
# Configure the private interface
tun:
dev: nebula1
# Sets MTU of the tun dev.
# MTU of the tun must be smaller than the MTU of the eth0 interface
mtu: 1300
# TODO
# Configure logging level
logging:
level: info
format: json
firewall:
conntrack:
tcp_timeout: 12m
udp_timeout: 3m
default_timeout: 10m
inbound:
- proto: icmp
port: any
host: any
- proto: any
port: 22
host: any
{% if "lighthouse" in groups %}
- proto: any
port: 53
host: any
{% endif %}
outbound:
- proto: any
port: any
host: any

View File

@@ -1,7 +0,0 @@
---
# vars file for nebula
nebula_users:
- name: user1
ssh_auth_keys:
- "ed25519 place-your-ssh-public-key-here"

View File

@@ -1 +0,0 @@
ansible

View File

@@ -0,0 +1,35 @@
#!/sbin/openrc-run
#
# nebula service for open-rc systems
extra_commands="checkconfig"
: ${NEBULA_CONFDIR:=${RC_PREFIX%/}/etc/nebula}
: ${NEBULA_CONFIG:=${NEBULA_CONFDIR}/config.yml}
: ${NEBULA_BINARY:=${NEBULA_BINARY}${RC_PREFIX%/}/usr/local/sbin/nebula}
command="${NEBULA_BINARY}"
command_args="${NEBULA_OPTS} -config ${NEBULA_CONFIG}"
supervisor="supervise-daemon"
description="A scalable overlay networking tool with a focus on performance, simplicity and security"
required_dirs="${NEBULA_CONFDIR}"
required_files="${NEBULA_CONFIG}"
checkconfig() {
"${command}" -test ${command_args} || return 1
}
start_pre() {
if [ "${RC_CMD}" != "restart" ] ; then
checkconfig || return $?
fi
}
stop_pre() {
if [ "${RC_CMD}" = "restart" ] ; then
checkconfig || return $?
fi
}

View File

@@ -5,6 +5,8 @@ After=basic.target network.target network-online.target
Before=sshd.service Before=sshd.service
[Service] [Service]
Type=notify
NotifyAccess=main
SyslogIdentifier=nebula SyslogIdentifier=nebula
ExecReload=/bin/kill -HUP $MAINPID ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml

View File

@@ -2,36 +2,31 @@ package nebula
import ( import (
"crypto/sha256" "crypto/sha256"
"encoding/binary"
"encoding/hex" "encoding/hex"
"errors" "errors"
"fmt" "fmt"
"net" "hash/fnv"
"net/netip"
"reflect" "reflect"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"time" "time"
"github.com/gaissmai/bart"
"github.com/rcrowley/go-metrics" "github.com/rcrowley/go-metrics"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/cidr"
"github.com/slackhq/nebula/config" "github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/firewall" "github.com/slackhq/nebula/firewall"
) )
const tcpACK = 0x10
const tcpFIN = 0x01
type FirewallInterface interface { type FirewallInterface interface {
AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip, localIp netip.Prefix, caName string, caSha string) error
} }
type conn struct { type conn struct {
Expires time.Time // Time when this conntrack entry will expire Expires time.Time // Time when this conntrack entry will expire
Sent time.Time // If tcp rtt tracking is enabled this will be when Seq was last set
Seq uint32 // If tcp rtt tracking is enabled this will be the seq we are looking for an ack
// record why the original connection passed the firewall, so we can re-validate // record why the original connection passed the firewall, so we can re-validate
// after ruleset changes. Note, rulesVersion is a uint16 so that these two // after ruleset changes. Note, rulesVersion is a uint16 so that these two
@@ -57,15 +52,16 @@ type Firewall struct {
DefaultTimeout time.Duration //linux: 600s DefaultTimeout time.Duration //linux: 600s
// Used to ensure we don't emit local packets for ips we don't own // Used to ensure we don't emit local packets for ips we don't own
localIps *cidr.Tree4 localIps *bart.Table[struct{}]
assignedCIDR netip.Prefix
hasSubnets bool
rules string rules string
rulesVersion uint16 rulesVersion uint16
trackTCPRTT bool defaultLocalCIDRAny bool
metricTCPRTT metrics.Histogram incomingMetrics firewallMetrics
incomingMetrics firewallMetrics outgoingMetrics firewallMetrics
outgoingMetrics firewallMetrics
l *logrus.Logger l *logrus.Logger
} }
@@ -83,6 +79,8 @@ type FirewallConntrack struct {
TimerWheel *TimerWheel[firewall.Packet] TimerWheel *TimerWheel[firewall.Packet]
} }
// FirewallTable is the entry point for a rule, the evaluation order is:
// Proto AND port AND (CA SHA or CA name) AND local CIDR AND (group OR groups OR name OR remote CIDR)
type FirewallTable struct { type FirewallTable struct {
TCP firewallPort TCP firewallPort
UDP firewallPort UDP firewallPort
@@ -106,18 +104,27 @@ type FirewallCA struct {
} }
type FirewallRule struct { type FirewallRule struct {
// Any makes Hosts, Groups, CIDR and LocalCIDR irrelevant // Any makes Hosts, Groups, and CIDR irrelevant
Any bool Any *firewallLocalCIDR
Hosts map[string]struct{} Hosts map[string]*firewallLocalCIDR
Groups [][]string Groups []*firewallGroups
CIDR *cidr.Tree4 CIDR *bart.Table[*firewallLocalCIDR]
LocalCIDR *cidr.Tree4 }
type firewallGroups struct {
Groups []string
LocalCIDR *firewallLocalCIDR
} }
// Even though ports are uint16, int32 maps are faster for lookup // Even though ports are uint16, int32 maps are faster for lookup
// Plus we can use `-1` for fragment rules // Plus we can use `-1` for fragment rules
type firewallPort map[int32]*FirewallCA type firewallPort map[int32]*FirewallCA
type firewallLocalCIDR struct {
Any bool
LocalCIDR *bart.Table[struct{}]
}
// NewFirewall creates a new Firewall object. A TimerWheel is created for you from the provided timeouts. // NewFirewall creates a new Firewall object. A TimerWheel is created for you from the provided timeouts.
func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.Duration, c *cert.NebulaCertificate) *Firewall { func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.Duration, c *cert.NebulaCertificate) *Firewall {
//TODO: error on 0 duration //TODO: error on 0 duration
@@ -137,13 +144,28 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D
max = defaultTimeout max = defaultTimeout
} }
localIps := cidr.NewTree4() localIps := new(bart.Table[struct{}])
var assignedCIDR netip.Prefix
var assignedSet bool
for _, ip := range c.Details.Ips { for _, ip := range c.Details.Ips {
localIps.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{}) //TODO: IPV6-WORK the unmap is a bit unfortunate
nip, _ := netip.AddrFromSlice(ip.IP)
nip = nip.Unmap()
nprefix := netip.PrefixFrom(nip, nip.BitLen())
localIps.Insert(nprefix, struct{}{})
if !assignedSet {
// Only grabbing the first one in the cert since any more than that currently has undefined behavior
assignedCIDR = nprefix
assignedSet = true
}
} }
for _, n := range c.Details.Subnets { for _, n := range c.Details.Subnets {
localIps.AddCIDR(n, struct{}{}) nip, _ := netip.AddrFromSlice(n.IP)
ones, _ := n.Mask.Size()
nip = nip.Unmap()
localIps.Insert(netip.PrefixFrom(nip, ones), struct{}{})
} }
return &Firewall{ return &Firewall{
@@ -157,9 +179,10 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D
UDPTimeout: UDPTimeout, UDPTimeout: UDPTimeout,
DefaultTimeout: defaultTimeout, DefaultTimeout: defaultTimeout,
localIps: localIps, localIps: localIps,
assignedCIDR: assignedCIDR,
hasSubnets: len(c.Details.Subnets) > 0,
l: l, l: l,
metricTCPRTT: metrics.GetOrRegisterHistogram("network.tcp.rtt", nil, metrics.NewExpDecaySample(1028, 0.015)),
incomingMetrics: firewallMetrics{ incomingMetrics: firewallMetrics{
droppedLocalIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.local_ip", nil), droppedLocalIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.local_ip", nil),
droppedRemoteIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.remote_ip", nil), droppedRemoteIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.remote_ip", nil),
@@ -183,6 +206,9 @@ func NewFirewallFromConfig(l *logrus.Logger, nc *cert.NebulaCertificate, c *conf
//TODO: max_connections //TODO: max_connections
) )
//TODO: Flip to false after v1.9 release
fw.defaultLocalCIDRAny = c.GetBool("firewall.default_local_cidr_any", true)
inboundAction := c.GetString("firewall.inbound_action", "drop") inboundAction := c.GetString("firewall.inbound_action", "drop")
switch inboundAction { switch inboundAction {
case "reject": case "reject":
@@ -219,15 +245,15 @@ func NewFirewallFromConfig(l *logrus.Logger, nc *cert.NebulaCertificate, c *conf
} }
// AddRule properly creates the in memory rule structure for a firewall table. // AddRule properly creates the in memory rule structure for a firewall table.
func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error { func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip, localIp netip.Prefix, caName string, caSha string) error {
// Under gomobile, stringing a nil pointer with fmt causes an abort in debug mode for iOS // Under gomobile, stringing a nil pointer with fmt causes an abort in debug mode for iOS
// https://github.com/golang/go/issues/14131 // https://github.com/golang/go/issues/14131
sIp := "" sIp := ""
if ip != nil { if ip.IsValid() {
sIp = ip.String() sIp = ip.String()
} }
lIp := "" lIp := ""
if localIp != nil { if localIp.IsValid() {
lIp = localIp.String() lIp = localIp.String()
} }
@@ -269,7 +295,7 @@ func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort
return fmt.Errorf("unknown protocol %v", proto) return fmt.Errorf("unknown protocol %v", proto)
} }
return fp.addRule(startPort, endPort, groups, host, ip, localIp, caName, caSha) return fp.addRule(f, startPort, endPort, groups, host, ip, localIp, caName, caSha)
} }
// GetRuleHash returns a hash representation of all inbound and outbound rules // GetRuleHash returns a hash representation of all inbound and outbound rules
@@ -278,6 +304,18 @@ func (f *Firewall) GetRuleHash() string {
return hex.EncodeToString(sum[:]) return hex.EncodeToString(sum[:])
} }
// GetRuleHashFNV returns a uint32 FNV-1 hash representation the rules, for use as a metric value
func (f *Firewall) GetRuleHashFNV() uint32 {
h := fnv.New32a()
h.Write([]byte(f.rules))
return h.Sum32()
}
// GetRuleHashes returns both the sha256 and FNV-1 hashes, suitable for logging
func (f *Firewall) GetRuleHashes() string {
return "SHA:" + f.GetRuleHash() + ",FNV:" + strconv.FormatUint(uint64(f.GetRuleHashFNV()), 10)
}
func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw FirewallInterface) error { func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw FirewallInterface) error {
var table string var table string
if inbound { if inbound {
@@ -352,17 +390,17 @@ func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw
return fmt.Errorf("%s rule #%v; proto was not understood; `%s`", table, i, r.Proto) return fmt.Errorf("%s rule #%v; proto was not understood; `%s`", table, i, r.Proto)
} }
var cidr *net.IPNet var cidr netip.Prefix
if r.Cidr != "" { if r.Cidr != "" {
_, cidr, err = net.ParseCIDR(r.Cidr) cidr, err = netip.ParsePrefix(r.Cidr)
if err != nil { if err != nil {
return fmt.Errorf("%s rule #%v; cidr did not parse; %s", table, i, err) return fmt.Errorf("%s rule #%v; cidr did not parse; %s", table, i, err)
} }
} }
var localCidr *net.IPNet var localCidr netip.Prefix
if r.LocalCidr != "" { if r.LocalCidr != "" {
_, localCidr, err = net.ParseCIDR(r.LocalCidr) localCidr, err = netip.ParsePrefix(r.LocalCidr)
if err != nil { if err != nil {
return fmt.Errorf("%s rule #%v; local_cidr did not parse; %s", table, i, err) return fmt.Errorf("%s rule #%v; local_cidr did not parse; %s", table, i, err)
} }
@@ -383,15 +421,17 @@ var ErrNoMatchingRule = errors.New("no matching rule in firewall table")
// Drop returns an error if the packet should be dropped, explaining why. It // Drop returns an error if the packet should be dropped, explaining why. It
// returns nil if the packet should not be dropped. // returns nil if the packet should not be dropped.
func (f *Firewall) Drop(packet []byte, fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) error { func (f *Firewall) Drop(fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) error {
// Check if we spoke to this tuple, if we did then allow this packet // Check if we spoke to this tuple, if we did then allow this packet
if f.inConns(packet, fp, incoming, h, caPool, localCache) { if f.inConns(fp, h, caPool, localCache) {
return nil return nil
} }
// Make sure remote address matches nebula certificate // Make sure remote address matches nebula certificate
if remoteCidr := h.remoteCidr; remoteCidr != nil { if remoteCidr := h.remoteCidr; remoteCidr != nil {
if remoteCidr.Contains(fp.RemoteIP) == nil { //TODO: this would be better if we had a least specific match lookup, could waste time here, need to benchmark since the algo is different
_, ok := remoteCidr.Lookup(fp.RemoteIP)
if !ok {
f.metrics(incoming).droppedRemoteIP.Inc(1) f.metrics(incoming).droppedRemoteIP.Inc(1)
return ErrInvalidRemoteIP return ErrInvalidRemoteIP
} }
@@ -404,7 +444,9 @@ func (f *Firewall) Drop(packet []byte, fp firewall.Packet, incoming bool, h *Hos
} }
// Make sure we are supposed to be handling this local ip address // Make sure we are supposed to be handling this local ip address
if f.localIps.Contains(fp.LocalIP) == nil { //TODO: this would be better if we had a least specific match lookup, could waste time here, need to benchmark since the algo is different
_, ok := f.localIps.Lookup(fp.LocalIP)
if !ok {
f.metrics(incoming).droppedLocalIP.Inc(1) f.metrics(incoming).droppedLocalIP.Inc(1)
return ErrInvalidLocalIP return ErrInvalidLocalIP
} }
@@ -421,7 +463,7 @@ func (f *Firewall) Drop(packet []byte, fp firewall.Packet, incoming bool, h *Hos
} }
// We always want to conntrack since it is a faster operation // We always want to conntrack since it is a faster operation
f.addConn(packet, fp, incoming) f.addConn(fp, incoming)
return nil return nil
} }
@@ -447,9 +489,10 @@ func (f *Firewall) EmitStats() {
conntrack.Unlock() conntrack.Unlock()
metrics.GetOrRegisterGauge("firewall.conntrack.count", nil).Update(int64(conntrackCount)) metrics.GetOrRegisterGauge("firewall.conntrack.count", nil).Update(int64(conntrackCount))
metrics.GetOrRegisterGauge("firewall.rules.version", nil).Update(int64(f.rulesVersion)) metrics.GetOrRegisterGauge("firewall.rules.version", nil).Update(int64(f.rulesVersion))
metrics.GetOrRegisterGauge("firewall.rules.hash", nil).Update(int64(f.GetRuleHashFNV()))
} }
func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) bool { func (f *Firewall) inConns(fp firewall.Packet, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) bool {
if localCache != nil { if localCache != nil {
if _, ok := localCache[fp]; ok { if _, ok := localCache[fp]; ok {
return true return true
@@ -509,11 +552,6 @@ func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *
switch fp.Protocol { switch fp.Protocol {
case firewall.ProtoTCP: case firewall.ProtoTCP:
c.Expires = time.Now().Add(f.TCPTimeout) c.Expires = time.Now().Add(f.TCPTimeout)
if incoming {
f.checkTCPRTT(c, packet)
} else {
setTCPRTTTracking(c, packet)
}
case firewall.ProtoUDP: case firewall.ProtoUDP:
c.Expires = time.Now().Add(f.UDPTimeout) c.Expires = time.Now().Add(f.UDPTimeout)
default: default:
@@ -529,16 +567,13 @@ func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *
return true return true
} }
func (f *Firewall) addConn(packet []byte, fp firewall.Packet, incoming bool) { func (f *Firewall) addConn(fp firewall.Packet, incoming bool) {
var timeout time.Duration var timeout time.Duration
c := &conn{} c := &conn{}
switch fp.Protocol { switch fp.Protocol {
case firewall.ProtoTCP: case firewall.ProtoTCP:
timeout = f.TCPTimeout timeout = f.TCPTimeout
if !incoming {
setTCPRTTTracking(c, packet)
}
case firewall.ProtoUDP: case firewall.ProtoUDP:
timeout = f.UDPTimeout timeout = f.UDPTimeout
default: default:
@@ -564,7 +599,6 @@ func (f *Firewall) addConn(packet []byte, fp firewall.Packet, incoming bool) {
// Evict checks if a conntrack entry has expired, if so it is removed, if not it is re-added to the wheel // Evict checks if a conntrack entry has expired, if so it is removed, if not it is re-added to the wheel
// Caller must own the connMutex lock! // Caller must own the connMutex lock!
func (f *Firewall) evict(p firewall.Packet) { func (f *Firewall) evict(p firewall.Packet) {
//TODO: report a stat if the tcp rtt tracking was never resolved?
// Are we still tracking this conn? // Are we still tracking this conn?
conntrack := f.Conntrack conntrack := f.Conntrack
t, ok := conntrack.Conns[p] t, ok := conntrack.Conns[p]
@@ -608,7 +642,7 @@ func (ft *FirewallTable) match(p firewall.Packet, incoming bool, c *cert.NebulaC
return false return false
} }
func (fp firewallPort) addRule(startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error { func (fp firewallPort) addRule(f *Firewall, startPort int32, endPort int32, groups []string, host string, ip, localIp netip.Prefix, caName string, caSha string) error {
if startPort > endPort { if startPort > endPort {
return fmt.Errorf("start port was lower than end port") return fmt.Errorf("start port was lower than end port")
} }
@@ -621,7 +655,7 @@ func (fp firewallPort) addRule(startPort int32, endPort int32, groups []string,
} }
} }
if err := fp[i].addRule(groups, host, ip, localIp, caName, caSha); err != nil { if err := fp[i].addRule(f, groups, host, ip, localIp, caName, caSha); err != nil {
return err return err
} }
} }
@@ -652,13 +686,12 @@ func (fp firewallPort) match(p firewall.Packet, incoming bool, c *cert.NebulaCer
return fp[firewall.PortAny].match(p, c, caPool) return fp[firewall.PortAny].match(p, c, caPool)
} }
func (fc *FirewallCA) addRule(groups []string, host string, ip, localIp *net.IPNet, caName, caSha string) error { func (fc *FirewallCA) addRule(f *Firewall, groups []string, host string, ip, localIp netip.Prefix, caName, caSha string) error {
fr := func() *FirewallRule { fr := func() *FirewallRule {
return &FirewallRule{ return &FirewallRule{
Hosts: make(map[string]struct{}), Hosts: make(map[string]*firewallLocalCIDR),
Groups: make([][]string, 0), Groups: make([]*firewallGroups, 0),
CIDR: cidr.NewTree4(), CIDR: new(bart.Table[*firewallLocalCIDR]),
LocalCIDR: cidr.NewTree4(),
} }
} }
@@ -667,14 +700,14 @@ func (fc *FirewallCA) addRule(groups []string, host string, ip, localIp *net.IPN
fc.Any = fr() fc.Any = fr()
} }
return fc.Any.addRule(groups, host, ip, localIp) return fc.Any.addRule(f, groups, host, ip, localIp)
} }
if caSha != "" { if caSha != "" {
if _, ok := fc.CAShas[caSha]; !ok { if _, ok := fc.CAShas[caSha]; !ok {
fc.CAShas[caSha] = fr() fc.CAShas[caSha] = fr()
} }
err := fc.CAShas[caSha].addRule(groups, host, ip, localIp) err := fc.CAShas[caSha].addRule(f, groups, host, ip, localIp)
if err != nil { if err != nil {
return err return err
} }
@@ -684,7 +717,7 @@ func (fc *FirewallCA) addRule(groups []string, host string, ip, localIp *net.IPN
if _, ok := fc.CANames[caName]; !ok { if _, ok := fc.CANames[caName]; !ok {
fc.CANames[caName] = fr() fc.CANames[caName] = fr()
} }
err := fc.CANames[caName].addRule(groups, host, ip, localIp) err := fc.CANames[caName].addRule(f, groups, host, ip, localIp)
if err != nil { if err != nil {
return err return err
} }
@@ -716,41 +749,63 @@ func (fc *FirewallCA) match(p firewall.Packet, c *cert.NebulaCertificate, caPool
return fc.CANames[s.Details.Name].match(p, c) return fc.CANames[s.Details.Name].match(p, c)
} }
func (fr *FirewallRule) addRule(groups []string, host string, ip *net.IPNet, localIp *net.IPNet) error { func (fr *FirewallRule) addRule(f *Firewall, groups []string, host string, ip, localCIDR netip.Prefix) error {
if fr.Any { flc := func() *firewallLocalCIDR {
return nil return &firewallLocalCIDR{
LocalCIDR: new(bart.Table[struct{}]),
}
} }
if fr.isAny(groups, host, ip, localIp) { if fr.isAny(groups, host, ip) {
fr.Any = true if fr.Any == nil {
// If it's any we need to wipe out any pre-existing rules to save on memory fr.Any = flc()
fr.Groups = make([][]string, 0)
fr.Hosts = make(map[string]struct{})
fr.CIDR = cidr.NewTree4()
fr.LocalCIDR = cidr.NewTree4()
} else {
if len(groups) > 0 {
fr.Groups = append(fr.Groups, groups)
} }
if host != "" { return fr.Any.addRule(f, localCIDR)
fr.Hosts[host] = struct{}{} }
if len(groups) > 0 {
nlc := flc()
err := nlc.addRule(f, localCIDR)
if err != nil {
return err
} }
if ip != nil { fr.Groups = append(fr.Groups, &firewallGroups{
fr.CIDR.AddCIDR(ip, struct{}{}) Groups: groups,
} LocalCIDR: nlc,
})
}
if localIp != nil { if host != "" {
fr.LocalCIDR.AddCIDR(localIp, struct{}{}) nlc := fr.Hosts[host]
if nlc == nil {
nlc = flc()
} }
err := nlc.addRule(f, localCIDR)
if err != nil {
return err
}
fr.Hosts[host] = nlc
}
if ip.IsValid() {
nlc, _ := fr.CIDR.Get(ip)
if nlc == nil {
nlc = flc()
}
err := nlc.addRule(f, localCIDR)
if err != nil {
return err
}
fr.CIDR.Insert(ip, nlc)
} }
return nil return nil
} }
func (fr *FirewallRule) isAny(groups []string, host string, ip, localIp *net.IPNet) bool { func (fr *FirewallRule) isAny(groups []string, host string, ip netip.Prefix) bool {
if len(groups) == 0 && host == "" && ip == nil && localIp == nil { if len(groups) == 0 && host == "" && !ip.IsValid() {
return true return true
} }
@@ -764,11 +819,7 @@ func (fr *FirewallRule) isAny(groups []string, host string, ip, localIp *net.IPN
return true return true
} }
if ip != nil && ip.Contains(net.IPv4(0, 0, 0, 0)) { if ip.IsValid() && ip.Bits() == 0 {
return true
}
if localIp != nil && localIp.Contains(net.IPv4(0, 0, 0, 0)) {
return true return true
} }
@@ -781,7 +832,7 @@ func (fr *FirewallRule) match(p firewall.Packet, c *cert.NebulaCertificate) bool
} }
// Shortcut path for if groups, hosts, or cidr contained an `any` // Shortcut path for if groups, hosts, or cidr contained an `any`
if fr.Any { if fr.Any.match(p, c) {
return true return true
} }
@@ -789,7 +840,7 @@ func (fr *FirewallRule) match(p firewall.Packet, c *cert.NebulaCertificate) bool
for _, sg := range fr.Groups { for _, sg := range fr.Groups {
found := false found := false
for _, g := range sg { for _, g := range sg.Groups {
if _, ok := c.Details.InvertedGroups[g]; !ok { if _, ok := c.Details.InvertedGroups[g]; !ok {
found = false found = false
break break
@@ -798,29 +849,57 @@ func (fr *FirewallRule) match(p firewall.Packet, c *cert.NebulaCertificate) bool
found = true found = true
} }
if found { if found && sg.LocalCIDR.match(p, c) {
return true return true
} }
} }
if fr.Hosts != nil { if fr.Hosts != nil {
if _, ok := fr.Hosts[c.Details.Name]; ok { if flc, ok := fr.Hosts[c.Details.Name]; ok {
if flc.match(p, c) {
return true
}
}
}
for _, v := range fr.CIDR.Supernets(netip.PrefixFrom(p.RemoteIP, p.RemoteIP.BitLen())) {
if v.match(p, c) {
return true return true
} }
} }
if fr.CIDR != nil && fr.CIDR.Contains(p.RemoteIP) != nil {
return true
}
if fr.LocalCIDR != nil && fr.LocalCIDR.Contains(p.LocalIP) != nil {
return true
}
// No host, group, or cidr matched, bye bye
return false return false
} }
func (flc *firewallLocalCIDR) addRule(f *Firewall, localIp netip.Prefix) error {
if !localIp.IsValid() {
if !f.hasSubnets || f.defaultLocalCIDRAny {
flc.Any = true
return nil
}
localIp = f.assignedCIDR
} else if localIp.Bits() == 0 {
flc.Any = true
}
flc.LocalCIDR.Insert(localIp, struct{}{})
return nil
}
func (flc *firewallLocalCIDR) match(p firewall.Packet, c *cert.NebulaCertificate) bool {
if flc == nil {
return false
}
if flc.Any {
return true
}
_, ok := flc.LocalCIDR.Lookup(p.LocalIP)
return ok
}
type rule struct { type rule struct {
Port string Port string
Code string Code string
@@ -934,42 +1013,3 @@ func parsePort(s string) (startPort, endPort int32, err error) {
return return
} }
// TODO: write tests for these
func setTCPRTTTracking(c *conn, p []byte) {
if c.Seq != 0 {
return
}
ihl := int(p[0]&0x0f) << 2
// Don't track FIN packets
if p[ihl+13]&tcpFIN != 0 {
return
}
c.Seq = binary.BigEndian.Uint32(p[ihl+4 : ihl+8])
c.Sent = time.Now()
}
func (f *Firewall) checkTCPRTT(c *conn, p []byte) bool {
if c.Seq == 0 {
return false
}
ihl := int(p[0]&0x0f) << 2
if p[ihl+13]&tcpACK == 0 {
return false
}
// Deal with wrap around, signed int cuts the ack window in half
// 0 is a bad ack, no data acknowledged
// positive number is a bad ack, ack is over half the window away
if int32(c.Seq-binary.BigEndian.Uint32(p[ihl+8:ihl+12])) >= 0 {
return false
}
f.metricTCPRTT.Update(time.Since(c.Sent).Nanoseconds())
c.Seq = 0
return true
}

View File

@@ -3,8 +3,7 @@ package firewall
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"net/netip"
"github.com/slackhq/nebula/iputil"
) )
type m map[string]interface{} type m map[string]interface{}
@@ -20,8 +19,8 @@ const (
) )
type Packet struct { type Packet struct {
LocalIP iputil.VpnIp LocalIP netip.Addr
RemoteIP iputil.VpnIp RemoteIP netip.Addr
LocalPort uint16 LocalPort uint16
RemotePort uint16 RemotePort uint16
Protocol uint8 Protocol uint8

View File

@@ -2,18 +2,16 @@ package nebula
import ( import (
"bytes" "bytes"
"encoding/binary"
"errors" "errors"
"math" "math"
"net" "net"
"net/netip"
"testing" "testing"
"time" "time"
"github.com/rcrowley/go-metrics"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config" "github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/firewall" "github.com/slackhq/nebula/firewall"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/test" "github.com/slackhq/nebula/test"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@@ -67,77 +65,62 @@ func TestFirewall_AddRule(t *testing.T) {
assert.NotNil(t, fw.InRules) assert.NotNil(t, fw.InRules)
assert.NotNil(t, fw.OutRules) assert.NotNil(t, fw.OutRules)
_, ti, _ := net.ParseCIDR("1.2.3.4/32") ti, err := netip.ParsePrefix("1.2.3.4/32")
assert.NoError(t, err)
assert.Nil(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", nil, nil, "", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
// An empty rule is any // An empty rule is any
assert.True(t, fw.InRules.TCP[1].Any.Any) assert.True(t, fw.InRules.TCP[1].Any.Any.Any)
assert.Empty(t, fw.InRules.TCP[1].Any.Groups) assert.Empty(t, fw.InRules.TCP[1].Any.Groups)
assert.Empty(t, fw.InRules.TCP[1].Any.Hosts) assert.Empty(t, fw.InRules.TCP[1].Any.Hosts)
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
assert.False(t, fw.InRules.UDP[1].Any.Any) assert.Nil(t, fw.InRules.UDP[1].Any.Any)
assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0], "g1") assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0].Groups, "g1")
assert.Empty(t, fw.InRules.UDP[1].Any.Hosts) assert.Empty(t, fw.InRules.UDP[1].Any.Hosts)
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoICMP, 1, 1, []string{}, "h1", nil, nil, "", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoICMP, 1, 1, []string{}, "h1", netip.Prefix{}, netip.Prefix{}, "", ""))
assert.False(t, fw.InRules.ICMP[1].Any.Any) assert.Nil(t, fw.InRules.ICMP[1].Any.Any)
assert.Empty(t, fw.InRules.ICMP[1].Any.Groups) assert.Empty(t, fw.InRules.ICMP[1].Any.Groups)
assert.Contains(t, fw.InRules.ICMP[1].Any.Hosts, "h1") assert.Contains(t, fw.InRules.ICMP[1].Any.Hosts, "h1")
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", ti, nil, "", "")) assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", ti, netip.Prefix{}, "", ""))
assert.False(t, fw.OutRules.AnyProto[1].Any.Any) assert.Nil(t, fw.OutRules.AnyProto[1].Any.Any)
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups) _, ok := fw.OutRules.AnyProto[1].Any.CIDR.Get(ti)
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts) assert.True(t, ok)
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP)))
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", nil, ti, "", "")) assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", netip.Prefix{}, ti, "", ""))
assert.False(t, fw.OutRules.AnyProto[1].Any.Any) assert.NotNil(t, fw.OutRules.AnyProto[1].Any.Any)
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups) _, ok = fw.OutRules.AnyProto[1].Any.Any.LocalCIDR.Get(ti)
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts) assert.True(t, ok)
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.LocalCIDR.Match(iputil.Ip2VpnIp(ti.IP)))
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "ca-name", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", netip.Prefix{}, netip.Prefix{}, "ca-name", ""))
assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name") assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name")
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "", "ca-sha")) assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", netip.Prefix{}, netip.Prefix{}, "", "ca-sha"))
assert.Contains(t, fw.InRules.UDP[1].CAShas, "ca-sha") assert.Contains(t, fw.InRules.UDP[1].CAShas, "ca-sha")
// Set any and clear fields
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"g1", "g2"}, "h1", ti, ti, "", "")) assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", netip.Prefix{}, netip.Prefix{}, "", ""))
assert.Equal(t, []string{"g1", "g2"}, fw.OutRules.AnyProto[0].Any.Groups[0]) assert.True(t, fw.OutRules.AnyProto[0].Any.Any.Any)
assert.Contains(t, fw.OutRules.AnyProto[0].Any.Hosts, "h1")
assert.NotNil(t, fw.OutRules.AnyProto[0].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP)))
assert.NotNil(t, fw.OutRules.AnyProto[0].Any.LocalCIDR.Match(iputil.Ip2VpnIp(ti.IP)))
// run twice just to make sure
//TODO: these ANY rules should clear the CA firewall portion
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", ""))
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, nil, "", ""))
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Groups)
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Hosts)
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, nil, "", "")) anyIp, err := netip.ParsePrefix("0.0.0.0/0")
assert.True(t, fw.OutRules.AnyProto[0].Any.Any) assert.NoError(t, err)
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, netip.Prefix{}, "", ""))
_, anyIp, _ := net.ParseCIDR("0.0.0.0/0") assert.True(t, fw.OutRules.AnyProto[0].Any.Any.Any)
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, nil, "", ""))
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
// Test error conditions // Test error conditions
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", nil, nil, "", "")) assert.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
assert.Error(t, fw.AddRule(true, firewall.ProtoAny, 10, 0, []string{}, "", nil, nil, "", "")) assert.Error(t, fw.AddRule(true, firewall.ProtoAny, 10, 0, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
} }
func TestFirewall_Drop(t *testing.T) { func TestFirewall_Drop(t *testing.T) {
@@ -146,8 +129,8 @@ func TestFirewall_Drop(t *testing.T) {
l.SetOutput(ob) l.SetOutput(ob)
p := firewall.Packet{ p := firewall.Packet{
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), LocalIP: netip.MustParseAddr("1.2.3.4"),
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), RemoteIP: netip.MustParseAddr("1.2.3.4"),
LocalPort: 10, LocalPort: 10,
RemotePort: 90, RemotePort: 90,
Protocol: firewall.ProtoUDP, Protocol: firewall.ProtoUDP,
@@ -172,83 +155,91 @@ func TestFirewall_Drop(t *testing.T) {
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
peerCert: &c, peerCert: &c,
}, },
vpnIp: iputil.Ip2VpnIp(ipNet.IP), vpnIp: netip.MustParseAddr("1.2.3.4"),
} }
h.CreateRemoteCIDR(&c) h.CreateRemoteCIDR(&c)
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c) fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
cp := cert.NewCAPool() cp := cert.NewCAPool()
// Drop outbound // Drop outbound
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule) assert.Equal(t, ErrNoMatchingRule, fw.Drop(p, false, &h, cp, nil))
// Allow inbound // Allow inbound
resetConntrack(fw) resetConntrack(fw)
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil)) assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
// Allow outbound because conntrack // Allow outbound because conntrack
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil)) assert.NoError(t, fw.Drop(p, false, &h, cp, nil))
// test remote mismatch // test remote mismatch
oldRemote := p.RemoteIP oldRemote := p.RemoteIP
p.RemoteIP = iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 10)) p.RemoteIP = netip.MustParseAddr("1.2.3.10")
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrInvalidRemoteIP) assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrInvalidRemoteIP)
p.RemoteIP = oldRemote p.RemoteIP = oldRemote
// ensure signer doesn't get in the way of group checks // ensure signer doesn't get in the way of group checks
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "", "signer-shasum")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum"))
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "", "signer-shasum-bad")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum-bad"))
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule) assert.Equal(t, fw.Drop(p, true, &h, cp, nil), ErrNoMatchingRule)
// test caSha doesn't drop on match // test caSha doesn't drop on match
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "", "signer-shasum-bad")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum-bad"))
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "", "signer-shasum")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum"))
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil)) assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
// ensure ca name doesn't get in the way of group checks // ensure ca name doesn't get in the way of group checks
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}} cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "ca-good", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good", ""))
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "ca-good-bad", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good-bad", ""))
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule) assert.Equal(t, fw.Drop(p, true, &h, cp, nil), ErrNoMatchingRule)
// test caName doesn't drop on match // test caName doesn't drop on match
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}} cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "ca-good-bad", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good-bad", ""))
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "ca-good", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good", ""))
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil)) assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
} }
func BenchmarkFirewallTable_match(b *testing.B) { func BenchmarkFirewallTable_match(b *testing.B) {
f := &Firewall{}
ft := FirewallTable{ ft := FirewallTable{
TCP: firewallPort{}, TCP: firewallPort{},
} }
_, n, _ := net.ParseCIDR("172.1.1.1/32") pfix := netip.MustParsePrefix("172.1.1.1/32")
_ = ft.TCP.addRule(10, 10, []string{"good-group"}, "good-host", n, n, "", "") _ = ft.TCP.addRule(f, 10, 10, []string{"good-group"}, "good-host", pfix, netip.Prefix{}, "", "")
_ = ft.TCP.addRule(10, 10, []string{"good-group2"}, "good-host", n, n, "", "") _ = ft.TCP.addRule(f, 100, 100, []string{"good-group"}, "good-host", netip.Prefix{}, pfix, "", "")
_ = ft.TCP.addRule(10, 10, []string{"good-group3"}, "good-host", n, n, "", "")
_ = ft.TCP.addRule(10, 10, []string{"good-group4"}, "good-host", n, n, "", "")
_ = ft.TCP.addRule(10, 10, []string{"good-group, good-group1"}, "good-host", n, n, "", "")
cp := cert.NewCAPool() cp := cert.NewCAPool()
b.Run("fail on proto", func(b *testing.B) { b.Run("fail on proto", func(b *testing.B) {
// This benchmark is showing us the cost of failing to match the protocol
c := &cert.NebulaCertificate{} c := &cert.NebulaCertificate{}
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoUDP}, true, c, cp) assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoUDP}, true, c, cp))
} }
}) })
b.Run("fail on port", func(b *testing.B) { b.Run("pass proto, fail on port", func(b *testing.B) {
// This benchmark is showing us the cost of matching a specific protocol but failing to match the port
c := &cert.NebulaCertificate{} c := &cert.NebulaCertificate{}
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 1}, true, c, cp) assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 1}, true, c, cp))
} }
}) })
b.Run("fail all group, name, and cidr", func(b *testing.B) { b.Run("pass proto, port, fail on local CIDR", func(b *testing.B) {
c := &cert.NebulaCertificate{}
ip := netip.MustParsePrefix("9.254.254.254/32")
for n := 0; n < b.N; n++ {
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: ip.Addr()}, true, c, cp))
}
})
b.Run("pass proto, port, any local CIDR, fail all group, name, and cidr", func(b *testing.B) {
_, ip, _ := net.ParseCIDR("9.254.254.254/32") _, ip, _ := net.ParseCIDR("9.254.254.254/32")
c := &cert.NebulaCertificate{ c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{ Details: cert.NebulaCertificateDetails{
@@ -258,11 +249,25 @@ func BenchmarkFirewallTable_match(b *testing.B) {
}, },
} }
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp) assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp))
} }
}) })
b.Run("pass on group", func(b *testing.B) { b.Run("pass proto, port, specific local CIDR, fail all group, name, and cidr", func(b *testing.B) {
_, ip, _ := net.ParseCIDR("9.254.254.254/32")
c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
InvertedGroups: map[string]struct{}{"nope": {}},
Name: "nope",
Ips: []*net.IPNet{ip},
},
}
for n := 0; n < b.N; n++ {
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: pfix.Addr()}, true, c, cp))
}
})
b.Run("pass on group on any local cidr", func(b *testing.B) {
c := &cert.NebulaCertificate{ c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{ Details: cert.NebulaCertificateDetails{
InvertedGroups: map[string]struct{}{"good-group": {}}, InvertedGroups: map[string]struct{}{"good-group": {}},
@@ -270,7 +275,19 @@ func BenchmarkFirewallTable_match(b *testing.B) {
}, },
} }
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp) assert.True(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp))
}
})
b.Run("pass on group on specific local cidr", func(b *testing.B) {
c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
InvertedGroups: map[string]struct{}{"good-group": {}},
Name: "nope",
},
}
for n := 0; n < b.N; n++ {
assert.True(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: pfix.Addr()}, true, c, cp))
} }
}) })
@@ -285,60 +302,60 @@ func BenchmarkFirewallTable_match(b *testing.B) {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp) ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp)
} }
}) })
//
b.Run("pass on ip", func(b *testing.B) { //b.Run("pass on ip", func(b *testing.B) {
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1)) // ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
c := &cert.NebulaCertificate{ // c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{ // Details: cert.NebulaCertificateDetails{
InvertedGroups: map[string]struct{}{"nope": {}}, // InvertedGroups: map[string]struct{}{"nope": {}},
Name: "good-host", // Name: "good-host",
}, // },
} // }
for n := 0; n < b.N; n++ { // for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, RemoteIP: ip}, true, c, cp) // ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, RemoteIP: ip}, true, c, cp)
} // }
}) //})
//
b.Run("pass on local ip", func(b *testing.B) { //b.Run("pass on local ip", func(b *testing.B) {
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1)) // ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
c := &cert.NebulaCertificate{ // c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{ // Details: cert.NebulaCertificateDetails{
InvertedGroups: map[string]struct{}{"nope": {}}, // InvertedGroups: map[string]struct{}{"nope": {}},
Name: "good-host", // Name: "good-host",
}, // },
} // }
for n := 0; n < b.N; n++ { // for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, LocalIP: ip}, true, c, cp) // ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, LocalIP: ip}, true, c, cp)
} // }
}) //})
//
_ = ft.TCP.addRule(0, 0, []string{"good-group"}, "good-host", n, n, "", "") //_ = ft.TCP.addRule(0, 0, []string{"good-group"}, "good-host", n, n, "", "")
//
b.Run("pass on ip with any port", func(b *testing.B) { //b.Run("pass on ip with any port", func(b *testing.B) {
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1)) // ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
c := &cert.NebulaCertificate{ // c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{ // Details: cert.NebulaCertificateDetails{
InvertedGroups: map[string]struct{}{"nope": {}}, // InvertedGroups: map[string]struct{}{"nope": {}},
Name: "good-host", // Name: "good-host",
}, // },
} // }
for n := 0; n < b.N; n++ { // for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, RemoteIP: ip}, true, c, cp) // ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, RemoteIP: ip}, true, c, cp)
} // }
}) //})
//
b.Run("pass on local ip with any port", func(b *testing.B) { //b.Run("pass on local ip with any port", func(b *testing.B) {
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1)) // ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
c := &cert.NebulaCertificate{ // c := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{ // Details: cert.NebulaCertificateDetails{
InvertedGroups: map[string]struct{}{"nope": {}}, // InvertedGroups: map[string]struct{}{"nope": {}},
Name: "good-host", // Name: "good-host",
}, // },
} // }
for n := 0; n < b.N; n++ { // for n := 0; n < b.N; n++ {
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: ip}, true, c, cp) // ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: ip}, true, c, cp)
} // }
}) //})
} }
func TestFirewall_Drop2(t *testing.T) { func TestFirewall_Drop2(t *testing.T) {
@@ -347,8 +364,8 @@ func TestFirewall_Drop2(t *testing.T) {
l.SetOutput(ob) l.SetOutput(ob)
p := firewall.Packet{ p := firewall.Packet{
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), LocalIP: netip.MustParseAddr("1.2.3.4"),
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), RemoteIP: netip.MustParseAddr("1.2.3.4"),
LocalPort: 10, LocalPort: 10,
RemotePort: 90, RemotePort: 90,
Protocol: firewall.ProtoUDP, Protocol: firewall.ProtoUDP,
@@ -371,7 +388,7 @@ func TestFirewall_Drop2(t *testing.T) {
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
peerCert: &c, peerCert: &c,
}, },
vpnIp: iputil.Ip2VpnIp(ipNet.IP), vpnIp: netip.MustParseAddr(ipNet.IP.String()),
} }
h.CreateRemoteCIDR(&c) h.CreateRemoteCIDR(&c)
@@ -390,14 +407,14 @@ func TestFirewall_Drop2(t *testing.T) {
h1.CreateRemoteCIDR(&c1) h1.CreateRemoteCIDR(&c1)
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c) fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group", "test-group"}, "", nil, nil, "", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group", "test-group"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
cp := cert.NewCAPool() cp := cert.NewCAPool()
// h1/c1 lacks the proper groups // h1/c1 lacks the proper groups
assert.Error(t, fw.Drop([]byte{}, p, true, &h1, cp, nil), ErrNoMatchingRule) assert.Error(t, fw.Drop(p, true, &h1, cp, nil), ErrNoMatchingRule)
// c has the proper groups // c has the proper groups
resetConntrack(fw) resetConntrack(fw)
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil)) assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
} }
func TestFirewall_Drop3(t *testing.T) { func TestFirewall_Drop3(t *testing.T) {
@@ -406,8 +423,8 @@ func TestFirewall_Drop3(t *testing.T) {
l.SetOutput(ob) l.SetOutput(ob)
p := firewall.Packet{ p := firewall.Packet{
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), LocalIP: netip.MustParseAddr("1.2.3.4"),
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), RemoteIP: netip.MustParseAddr("1.2.3.4"),
LocalPort: 1, LocalPort: 1,
RemotePort: 1, RemotePort: 1,
Protocol: firewall.ProtoUDP, Protocol: firewall.ProtoUDP,
@@ -437,7 +454,7 @@ func TestFirewall_Drop3(t *testing.T) {
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
peerCert: &c1, peerCert: &c1,
}, },
vpnIp: iputil.Ip2VpnIp(ipNet.IP), vpnIp: netip.MustParseAddr(ipNet.IP.String()),
} }
h1.CreateRemoteCIDR(&c1) h1.CreateRemoteCIDR(&c1)
@@ -452,7 +469,7 @@ func TestFirewall_Drop3(t *testing.T) {
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
peerCert: &c2, peerCert: &c2,
}, },
vpnIp: iputil.Ip2VpnIp(ipNet.IP), vpnIp: netip.MustParseAddr(ipNet.IP.String()),
} }
h2.CreateRemoteCIDR(&c2) h2.CreateRemoteCIDR(&c2)
@@ -467,23 +484,23 @@ func TestFirewall_Drop3(t *testing.T) {
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
peerCert: &c3, peerCert: &c3,
}, },
vpnIp: iputil.Ip2VpnIp(ipNet.IP), vpnIp: netip.MustParseAddr(ipNet.IP.String()),
} }
h3.CreateRemoteCIDR(&c3) h3.CreateRemoteCIDR(&c3)
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c) fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "host1", nil, nil, "", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "host1", netip.Prefix{}, netip.Prefix{}, "", ""))
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", nil, nil, "", "signer-sha")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-sha"))
cp := cert.NewCAPool() cp := cert.NewCAPool()
// c1 should pass because host match // c1 should pass because host match
assert.NoError(t, fw.Drop([]byte{}, p, true, &h1, cp, nil)) assert.NoError(t, fw.Drop(p, true, &h1, cp, nil))
// c2 should pass because ca sha match // c2 should pass because ca sha match
resetConntrack(fw) resetConntrack(fw)
assert.NoError(t, fw.Drop([]byte{}, p, true, &h2, cp, nil)) assert.NoError(t, fw.Drop(p, true, &h2, cp, nil))
// c3 should fail because no match // c3 should fail because no match
resetConntrack(fw) resetConntrack(fw)
assert.Equal(t, fw.Drop([]byte{}, p, true, &h3, cp, nil), ErrNoMatchingRule) assert.Equal(t, fw.Drop(p, true, &h3, cp, nil), ErrNoMatchingRule)
} }
func TestFirewall_DropConntrackReload(t *testing.T) { func TestFirewall_DropConntrackReload(t *testing.T) {
@@ -492,8 +509,8 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
l.SetOutput(ob) l.SetOutput(ob)
p := firewall.Packet{ p := firewall.Packet{
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), LocalIP: netip.MustParseAddr("1.2.3.4"),
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)), RemoteIP: netip.MustParseAddr("1.2.3.4"),
LocalPort: 10, LocalPort: 10,
RemotePort: 90, RemotePort: 90,
Protocol: firewall.ProtoUDP, Protocol: firewall.ProtoUDP,
@@ -518,39 +535,39 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
ConnectionState: &ConnectionState{ ConnectionState: &ConnectionState{
peerCert: &c, peerCert: &c,
}, },
vpnIp: iputil.Ip2VpnIp(ipNet.IP), vpnIp: netip.MustParseAddr(ipNet.IP.String()),
} }
h.CreateRemoteCIDR(&c) h.CreateRemoteCIDR(&c)
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c) fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
cp := cert.NewCAPool() cp := cert.NewCAPool()
// Drop outbound // Drop outbound
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule) assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrNoMatchingRule)
// Allow inbound // Allow inbound
resetConntrack(fw) resetConntrack(fw)
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil)) assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
// Allow outbound because conntrack // Allow outbound because conntrack
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil)) assert.NoError(t, fw.Drop(p, false, &h, cp, nil))
oldFw := fw oldFw := fw
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 10, 10, []string{"any"}, "", nil, nil, "", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 10, 10, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
fw.Conntrack = oldFw.Conntrack fw.Conntrack = oldFw.Conntrack
fw.rulesVersion = oldFw.rulesVersion + 1 fw.rulesVersion = oldFw.rulesVersion + 1
// Allow outbound because conntrack and new rules allow port 10 // Allow outbound because conntrack and new rules allow port 10
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil)) assert.NoError(t, fw.Drop(p, false, &h, cp, nil))
oldFw = fw oldFw = fw
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c) fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 11, 11, []string{"any"}, "", nil, nil, "", "")) assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 11, 11, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
fw.Conntrack = oldFw.Conntrack fw.Conntrack = oldFw.Conntrack
fw.rulesVersion = oldFw.rulesVersion + 1 fw.rulesVersion = oldFw.rulesVersion + 1
// Drop outbound because conntrack doesn't match new ruleset // Drop outbound because conntrack doesn't match new ruleset
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule) assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrNoMatchingRule)
} }
func BenchmarkLookup(b *testing.B) { func BenchmarkLookup(b *testing.B) {
@@ -709,13 +726,13 @@ func TestNewFirewallFromConfig(t *testing.T) {
conf = config.NewC(l) conf = config.NewC(l)
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "cidr": "testh", "proto": "any"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "cidr": "testh", "proto": "any"}}}
_, err = NewFirewallFromConfig(l, c, conf) _, err = NewFirewallFromConfig(l, c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; invalid CIDR address: testh") assert.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; netip.ParsePrefix(\"testh\"): no '/'")
// Test local_cidr parse error // Test local_cidr parse error
conf = config.NewC(l) conf = config.NewC(l)
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "local_cidr": "testh", "proto": "any"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "local_cidr": "testh", "proto": "any"}}}
_, err = NewFirewallFromConfig(l, c, conf) _, err = NewFirewallFromConfig(l, c, conf)
assert.EqualError(t, err, "firewall.outbound rule #0; local_cidr did not parse; invalid CIDR address: testh") assert.EqualError(t, err, "firewall.outbound rule #0; local_cidr did not parse; netip.ParsePrefix(\"testh\"): no '/'")
// Test both group and groups // Test both group and groups
conf = config.NewC(l) conf = config.NewC(l)
@@ -731,78 +748,78 @@ func TestAddFirewallRulesFromConfig(t *testing.T) {
mf := &mockFirewall{} mf := &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "tcp", "host": "a"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "tcp", "host": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
// Test adding udp rule // Test adding udp rule
conf = config.NewC(l) conf = config.NewC(l)
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "udp", "host": "a"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "udp", "host": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
// Test adding icmp rule // Test adding icmp rule
conf = config.NewC(l) conf = config.NewC(l)
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "icmp", "host": "a"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "icmp", "host": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
// Test adding any rule // Test adding any rule
conf = config.NewC(l) conf = config.NewC(l)
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
// Test adding rule with cidr // Test adding rule with cidr
cidr := &net.IPNet{IP: net.ParseIP("10.0.0.0").To4(), Mask: net.IPv4Mask(255, 0, 0, 0)} cidr := netip.MustParsePrefix("10.0.0.0/8")
conf = config.NewC(l) conf = config.NewC(l)
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "cidr": cidr.String()}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "cidr": cidr.String()}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: cidr, localIp: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: cidr, localIp: netip.Prefix{}}, mf.lastCall)
// Test adding rule with local_cidr // Test adding rule with local_cidr
conf = config.NewC(l) conf = config.NewC(l)
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "local_cidr": cidr.String()}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "local_cidr": cidr.String()}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: cidr}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: cidr}, mf.lastCall)
// Test adding rule with ca_sha // Test adding rule with ca_sha
conf = config.NewC(l) conf = config.NewC(l)
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_sha": "12312313123"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_sha": "12312313123"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: nil, caSha: "12312313123"}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: netip.Prefix{}, caSha: "12312313123"}, mf.lastCall)
// Test adding rule with ca_name // Test adding rule with ca_name
conf = config.NewC(l) conf = config.NewC(l)
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_name": "root01"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_name": "root01"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: nil, caName: "root01"}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: netip.Prefix{}, caName: "root01"}, mf.lastCall)
// Test single group // Test single group
conf = config.NewC(l) conf = config.NewC(l)
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil, localIp: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
// Test single groups // Test single groups
conf = config.NewC(l) conf = config.NewC(l)
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": "a"}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": "a"}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil, localIp: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
// Test multiple AND groups // Test multiple AND groups
conf = config.NewC(l) conf = config.NewC(l)
mf = &mockFirewall{} mf = &mockFirewall{}
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}} conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}}
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf)) assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: nil, localIp: nil}, mf.lastCall) assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
// Test Add error // Test Add error
conf = config.NewC(l) conf = config.NewC(l)
@@ -812,97 +829,6 @@ func TestAddFirewallRulesFromConfig(t *testing.T) {
assert.EqualError(t, AddFirewallRulesFromConfig(l, true, conf, mf), "firewall.inbound rule #0; `test error`") assert.EqualError(t, AddFirewallRulesFromConfig(l, true, conf, mf), "firewall.inbound rule #0; `test error`")
} }
func TestTCPRTTTracking(t *testing.T) {
b := make([]byte, 200)
// Max ip IHL (60 bytes) and tcp IHL (60 bytes)
b[0] = 15
b[60+12] = 15 << 4
f := Firewall{
metricTCPRTT: metrics.GetOrRegisterHistogram("nope", nil, metrics.NewExpDecaySample(1028, 0.015)),
}
// Set SEQ to 1
binary.BigEndian.PutUint32(b[60+4:60+8], 1)
c := &conn{}
setTCPRTTTracking(c, b)
assert.Equal(t, uint32(1), c.Seq)
// Bad ack - no ack flag
binary.BigEndian.PutUint32(b[60+8:60+12], 80)
assert.False(t, f.checkTCPRTT(c, b))
// Bad ack, number is too low
binary.BigEndian.PutUint32(b[60+8:60+12], 0)
b[60+13] = uint8(0x10)
assert.False(t, f.checkTCPRTT(c, b))
// Good ack
binary.BigEndian.PutUint32(b[60+8:60+12], 80)
assert.True(t, f.checkTCPRTT(c, b))
assert.Equal(t, uint32(0), c.Seq)
// Set SEQ to 1
binary.BigEndian.PutUint32(b[60+4:60+8], 1)
c = &conn{}
setTCPRTTTracking(c, b)
assert.Equal(t, uint32(1), c.Seq)
// Good acks
binary.BigEndian.PutUint32(b[60+8:60+12], 81)
assert.True(t, f.checkTCPRTT(c, b))
assert.Equal(t, uint32(0), c.Seq)
// Set SEQ to max uint32 - 20
binary.BigEndian.PutUint32(b[60+4:60+8], ^uint32(0)-20)
c = &conn{}
setTCPRTTTracking(c, b)
assert.Equal(t, ^uint32(0)-20, c.Seq)
// Good acks
binary.BigEndian.PutUint32(b[60+8:60+12], 81)
assert.True(t, f.checkTCPRTT(c, b))
assert.Equal(t, uint32(0), c.Seq)
// Set SEQ to max uint32 / 2
binary.BigEndian.PutUint32(b[60+4:60+8], ^uint32(0)/2)
c = &conn{}
setTCPRTTTracking(c, b)
assert.Equal(t, ^uint32(0)/2, c.Seq)
// Below
binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0)/2-1)
assert.False(t, f.checkTCPRTT(c, b))
assert.Equal(t, ^uint32(0)/2, c.Seq)
// Halfway below
binary.BigEndian.PutUint32(b[60+8:60+12], uint32(0))
assert.False(t, f.checkTCPRTT(c, b))
assert.Equal(t, ^uint32(0)/2, c.Seq)
// Halfway above is ok
binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0))
assert.True(t, f.checkTCPRTT(c, b))
assert.Equal(t, uint32(0), c.Seq)
// Set SEQ to max uint32
binary.BigEndian.PutUint32(b[60+4:60+8], ^uint32(0))
c = &conn{}
setTCPRTTTracking(c, b)
assert.Equal(t, ^uint32(0), c.Seq)
// Halfway + 1 above
binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0)/2+1)
assert.False(t, f.checkTCPRTT(c, b))
assert.Equal(t, ^uint32(0), c.Seq)
// Halfway above
binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0)/2)
assert.True(t, f.checkTCPRTT(c, b))
assert.Equal(t, uint32(0), c.Seq)
}
func TestFirewall_convertRule(t *testing.T) { func TestFirewall_convertRule(t *testing.T) {
l := test.NewLogger() l := test.NewLogger()
ob := &bytes.Buffer{} ob := &bytes.Buffer{}
@@ -946,8 +872,8 @@ type addRuleCall struct {
endPort int32 endPort int32
groups []string groups []string
host string host string
ip *net.IPNet ip netip.Prefix
localIp *net.IPNet localIp netip.Prefix
caName string caName string
caSha string caSha string
} }
@@ -957,7 +883,7 @@ type mockFirewall struct {
nextCallReturn error nextCallReturn error
} }
func (mf *mockFirewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error { func (mf *mockFirewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip netip.Prefix, localIp netip.Prefix, caName string, caSha string) error {
mf.lastCall = addRuleCall{ mf.lastCall = addRuleCall{
incoming: incoming, incoming: incoming,
proto: proto, proto: proto,

47
go.mod
View File

@@ -1,49 +1,52 @@
module github.com/slackhq/nebula module github.com/slackhq/nebula
go 1.20 go 1.24.0
require ( require (
dario.cat/mergo v1.0.0
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
github.com/armon/go-radix v1.0.0 github.com/armon/go-radix v1.0.0
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
github.com/flynn/noise v1.0.0 github.com/flynn/noise v1.1.0
github.com/gaissmai/bart v0.25.1
github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf v1.3.2
github.com/google/gopacket v1.1.19 github.com/google/gopacket v1.1.19
github.com/imdario/mergo v0.3.15
github.com/kardianos/service v1.2.2 github.com/kardianos/service v1.2.2
github.com/miekg/dns v1.1.54 github.com/miekg/dns v1.1.61
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f
github.com/prometheus/client_golang v1.15.1 github.com/prometheus/client_golang v1.19.1
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
github.com/sirupsen/logrus v1.9.0 github.com/sirupsen/logrus v1.9.3
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8
github.com/stretchr/testify v1.8.2 github.com/stretchr/testify v1.9.0
github.com/vishvananda/netlink v1.1.0 github.com/vishvananda/netlink v1.2.1-beta.2
golang.org/x/crypto v0.8.0 golang.org/x/crypto v0.43.0
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 golang.org/x/exp v0.0.0-20230725093048-515e97ebf090
golang.org/x/net v0.9.0 golang.org/x/net v0.45.0
golang.org/x/sys v0.8.0 golang.org/x/sync v0.8.0
golang.org/x/term v0.8.0 golang.org/x/sys v0.37.0
golang.org/x/term v0.36.0
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b
golang.zx2c4.com/wireguard/windows v0.5.3 golang.zx2c4.com/wireguard/windows v0.5.3
google.golang.org/protobuf v1.30.0 google.golang.org/protobuf v1.34.2
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
gvisor.dev/gvisor v0.0.0-20240423190808-9d7a357edefe
) )
require ( require (
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/golang/protobuf v1.5.3 // indirect github.com/google/btree v1.1.2 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.42.0 // indirect github.com/prometheus/common v0.48.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/vishvananda/netns v0.0.4 // indirect github.com/vishvananda/netns v0.0.4 // indirect
golang.org/x/mod v0.10.0 // indirect golang.org/x/mod v0.18.0 // indirect
golang.org/x/tools v0.8.0 // indirect golang.org/x/time v0.5.0 // indirect
golang.org/x/tools v0.22.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
) )

104
go.sum
View File

@@ -1,4 +1,6 @@
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@@ -20,8 +22,10 @@ github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
github.com/gaissmai/bart v0.25.1 h1:ctl4nH/za+trcbfmZz9uO9xGBZD684GWaDMsjMSo1l8=
github.com/gaissmai/bart v0.25.1/go.mod h1:GREWQfTLRWz/c5FTOsIw+KkscuFkIV5t8Rp7Nd1Td5c=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
@@ -42,20 +46,18 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@@ -72,14 +74,13 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ=
github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI=
github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
@@ -97,24 +98,24 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
@@ -122,27 +123,23 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0=
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M= github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8= github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8=
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E= github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0= github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -152,16 +149,16 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ= golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04=
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE= golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0=
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 h1:5llv2sWeaMSnA3w2kS57ouQQ4pudlXrR0dCgw51QK9o= golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 h1:Di6/M8l0O2lCLc6VVRWhgCiApHV8MnQurBnFSHsQtNY=
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= golang.org/x/exp v0.0.0-20230725093048-515e97ebf090/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -172,8 +169,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -181,44 +178,50 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y= golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4= golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg= golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI= golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b h1:J1CaxgLerRR5lgx3wnr6L04cJFbWoceSK9JWBdglINo=
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b/go.mod h1:tqur9LnfstdR9ep2LaJT4lFUl0EjlHtge+gAjmsHUG4=
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE= golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI= golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -229,9 +232,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -247,3 +249,5 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gvisor.dev/gvisor v0.0.0-20240423190808-9d7a357edefe h1:fre4i6mv4iBuz5lCMOzHD1rH1ljqHWSICFmZRbbgp3g=
gvisor.dev/gvisor v0.0.0-20240423190808-9d7a357edefe/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU=

View File

@@ -1,31 +0,0 @@
package nebula
import (
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/udp"
)
func HandleIncomingHandshake(f *Interface, addr *udp.Addr, via *ViaSender, packet []byte, h *header.H, hostinfo *HostInfo) {
// First remote allow list check before we know the vpnIp
if addr != nil {
if !f.lightHouse.GetRemoteAllowList().AllowUnknownVpnIp(addr.IP) {
f.l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
return
}
}
switch h.Subtype {
case header.HandshakeIXPSK0:
switch h.MessageCounter {
case 1:
ixHandshakeStage1(f, addr, via, packet, h)
case 2:
newHostinfo, _ := f.handshakeManager.QueryIndex(h.RemoteIndex)
tearDown := ixHandshakeStage2(f, addr, via, newHostinfo, packet, h)
if tearDown && newHostinfo != nil {
f.handshakeManager.DeleteHostInfo(newHostinfo)
}
}
}
}

View File

@@ -1,39 +1,34 @@
package nebula package nebula
import ( import (
"net/netip"
"time" "time"
"github.com/flynn/noise" "github.com/flynn/noise"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/header" "github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp"
) )
// NOISE IX Handshakes // NOISE IX Handshakes
// This function constructs a handshake packet, but does not actually send it // This function constructs a handshake packet, but does not actually send it
// Sending is done by the handshake manager // Sending is done by the handshake manager
func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) { func ixHandshakeStage0(f *Interface, hh *HandshakeHostInfo) bool {
// This queries the lighthouse if we don't know a remote for the host err := f.handshakeManager.allocateIndex(hh)
// We do it here to provoke the lighthouse to preempt our timer wheel and trigger the stage 1 packet to send
// more quickly, effect is a quicker handshake.
if hostinfo.remote == nil {
f.lightHouse.QueryServer(vpnIp, f)
}
err := f.handshakeManager.AddIndexHostInfo(hostinfo)
if err != nil { if err != nil {
f.l.WithError(err).WithField("vpnIp", vpnIp). f.l.WithError(err).WithField("vpnIp", hh.hostinfo.vpnIp).
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to generate index") WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to generate index")
return return false
} }
ci := hostinfo.ConnectionState certState := f.pki.GetCertState()
ci := NewConnectionState(f.l, f.cipher, certState, true, noise.HandshakeIX, []byte{}, 0)
hh.hostinfo.ConnectionState = ci
hsProto := &NebulaHandshakeDetails{ hsProto := &NebulaHandshakeDetails{
InitiatorIndex: hostinfo.localIndexId, InitiatorIndex: hh.hostinfo.localIndexId,
Time: uint64(time.Now().UnixNano()), Time: uint64(time.Now().UnixNano()),
Cert: ci.certState.rawCertificateNoKey, Cert: certState.RawCertificateNoKey,
} }
hsBytes := []byte{} hsBytes := []byte{}
@@ -44,32 +39,32 @@ func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) {
hsBytes, err = hs.Marshal() hsBytes, err = hs.Marshal()
if err != nil { if err != nil {
f.l.WithError(err).WithField("vpnIp", vpnIp). f.l.WithError(err).WithField("vpnIp", hh.hostinfo.vpnIp).
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to marshal handshake message") WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
return return false
} }
h := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, 0, 1) h := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, 0, 1)
ci.messageCounter.Add(1)
msg, _, _, err := ci.H.WriteMessage(h, hsBytes) msg, _, _, err := ci.H.WriteMessage(h, hsBytes)
if err != nil { if err != nil {
f.l.WithError(err).WithField("vpnIp", vpnIp). f.l.WithError(err).WithField("vpnIp", hh.hostinfo.vpnIp).
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage") WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage")
return return false
} }
// We are sending handshake packet 1, so we don't expect to receive // We are sending handshake packet 1, so we don't expect to receive
// handshake packet 1 from the responder // handshake packet 1 from the responder
ci.window.Update(f.l, 1) ci.window.Update(f.l, 1)
hostinfo.HandshakePacket[0] = msg hh.hostinfo.HandshakePacket[0] = msg
hostinfo.HandshakeReady = true hh.ready = true
hostinfo.handshakeStart = time.Now() return true
} }
func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []byte, h *header.H) { func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet []byte, h *header.H) {
ci := f.newConnectionState(f.l, false, noise.HandshakeIX, []byte{}, 0) certState := f.pki.GetCertState()
ci := NewConnectionState(f.l, f.cipher, certState, false, noise.HandshakeIX, []byte{}, 0)
// Mark packet 1 as seen so it doesn't show up as missed // Mark packet 1 as seen so it doesn't show up as missed
ci.window.Update(f.l, 1) ci.window.Update(f.l, 1)
@@ -91,19 +86,38 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
return return
} }
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.caPool) remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.pki.GetCAPool())
if err != nil { if err != nil {
f.l.WithError(err).WithField("udpAddr", addr). e := f.l.WithError(err).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).WithField("cert", remoteCert). WithField("handshake", m{"stage": 1, "style": "ix_psk0"})
Info("Invalid certificate from host")
if f.l.Level > logrus.DebugLevel {
e = e.WithField("cert", remoteCert)
}
e.Info("Invalid certificate from host")
return return
} }
vpnIp := iputil.Ip2VpnIp(remoteCert.Details.Ips[0].IP)
vpnIp, ok := netip.AddrFromSlice(remoteCert.Details.Ips[0].IP)
if !ok {
e := f.l.WithError(err).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"})
if f.l.Level > logrus.DebugLevel {
e = e.WithField("cert", remoteCert)
}
e.Info("Invalid vpn ip from host")
return
}
vpnIp = vpnIp.Unmap()
certName := remoteCert.Details.Name certName := remoteCert.Details.Name
fingerprint, _ := remoteCert.Sha256Sum() fingerprint, _ := remoteCert.Sha256Sum()
issuer := remoteCert.Details.Issuer issuer := remoteCert.Details.Issuer
if vpnIp == f.myVpnIp { if vpnIp == f.myVpnNet.Addr() {
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr). f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("certName", certName). WithField("certName", certName).
WithField("fingerprint", fingerprint). WithField("fingerprint", fingerprint).
@@ -112,8 +126,8 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
return return
} }
if addr != nil { if addr.IsValid() {
if !f.lightHouse.GetRemoteAllowList().Allow(vpnIp, addr.IP) { if !f.lightHouse.GetRemoteAllowList().Allow(vpnIp, addr.Addr()) {
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake") f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
return return
} }
@@ -137,15 +151,12 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
HandshakePacket: make(map[uint8][]byte, 0), HandshakePacket: make(map[uint8][]byte, 0),
lastHandshakeTime: hs.Details.Time, lastHandshakeTime: hs.Details.Time,
relayState: RelayState{ relayState: RelayState{
relays: map[iputil.VpnIp]struct{}{}, relays: nil,
relayForByIp: map[iputil.VpnIp]*Relay{}, relayForByIp: map[netip.Addr]*Relay{},
relayForByIdx: map[uint32]*Relay{}, relayForByIdx: map[uint32]*Relay{},
}, },
} }
hostinfo.Lock()
defer hostinfo.Unlock()
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr). f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("certName", certName). WithField("certName", certName).
WithField("fingerprint", fingerprint). WithField("fingerprint", fingerprint).
@@ -155,7 +166,7 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
Info("Handshake message received") Info("Handshake message received")
hs.Details.ResponderIndex = myIndex hs.Details.ResponderIndex = myIndex
hs.Details.Cert = ci.certState.rawCertificateNoKey hs.Details.Cert = certState.RawCertificateNoKey
// Update the time in case their clock is way off from ours // Update the time in case their clock is way off from ours
hs.Details.Time = uint64(time.Now().UnixNano()) hs.Details.Time = uint64(time.Now().UnixNano())
@@ -211,23 +222,16 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
if err != nil { if err != nil {
switch err { switch err {
case ErrAlreadySeen: case ErrAlreadySeen:
// Update remote if preferred (Note we have to switch to locking
// the existing hostinfo, and then switch back so the defer Unlock
// higher in this function still works)
hostinfo.Unlock()
existing.Lock()
// Update remote if preferred // Update remote if preferred
if existing.SetRemoteIfPreferred(f.hostMap, addr) { if existing.SetRemoteIfPreferred(f.hostMap, addr) {
// Send a test packet to ensure the other side has also switched to // Send a test packet to ensure the other side has also switched to
// the preferred remote // the preferred remote
f.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu)) f.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
} }
existing.Unlock()
hostinfo.Lock()
msg = existing.HandshakePacket[2] msg = existing.HandshakePacket[2]
f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1) f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1)
if addr != nil { if addr.IsValid() {
err := f.outside.WriteTo(msg, addr) err := f.outside.WriteTo(msg, addr)
if err != nil { if err != nil {
f.l.WithField("vpnIp", existing.vpnIp).WithField("udpAddr", addr). f.l.WithField("vpnIp", existing.vpnIp).WithField("udpAddr", addr).
@@ -293,7 +297,7 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
// Do the send // Do the send
f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1) f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1)
if addr != nil { if addr.IsValid() {
err = f.outside.WriteTo(msg, addr) err = f.outside.WriteTo(msg, addr)
if err != nil { if err != nil {
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr). f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
@@ -310,7 +314,6 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
WithField("issuer", issuer). WithField("issuer", issuer).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex). WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}). WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
WithField("sentCachedPackets", len(hostinfo.packetStore)).
Info("Handshake message sent") Info("Handshake message sent")
} }
} else { } else {
@@ -319,6 +322,9 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
return return
} }
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp) hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
// I successfully received a handshake. Just in case I marked this tunnel as 'Disestablished', ensure
// it's correctly marked as working.
via.relayHI.relayState.UpdateRelayForByIdxState(via.remoteIdx, Established)
f.SendVia(via.relayHI, via.relay, msg, make([]byte, 12), make([]byte, mtu), false) f.SendVia(via.relayHI, via.relay, msg, make([]byte, 12), make([]byte, mtu), false)
f.l.WithField("vpnIp", vpnIp).WithField("relay", via.relayHI.vpnIp). f.l.WithField("vpnIp", vpnIp).WithField("relay", via.relayHI.vpnIp).
WithField("certName", certName). WithField("certName", certName).
@@ -326,49 +332,34 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
WithField("issuer", issuer). WithField("issuer", issuer).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex). WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}). WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
WithField("sentCachedPackets", len(hostinfo.packetStore)).
Info("Handshake message sent") Info("Handshake message sent")
} }
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId) f.connectionManager.AddTrafficWatch(hostinfo)
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
hostinfo.remotes.ResetBlockedRemotes()
return return
} }
func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *HostInfo, packet []byte, h *header.H) bool { func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *HandshakeHostInfo, packet []byte, h *header.H) bool {
if hostinfo == nil { if hh == nil {
// Nothing here to tear down, got a bogus stage 2 packet // Nothing here to tear down, got a bogus stage 2 packet
return true return true
} }
hostinfo.Lock() hh.Lock()
defer hostinfo.Unlock() defer hh.Unlock()
if addr != nil { hostinfo := hh.hostinfo
if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, addr.IP) { if addr.IsValid() {
if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, addr.Addr()) {
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake") f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
return false return false
} }
} }
ci := hostinfo.ConnectionState ci := hostinfo.ConnectionState
if ci.ready {
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h).
Info("Handshake is already complete")
// Update remote if preferred
if hostinfo.SetRemoteIfPreferred(f.hostMap, addr) {
// Send a test packet to ensure the other side has also switched to
// the preferred remote
f.SendMessageToVpnIp(header.Test, header.TestRequest, hostinfo.vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
}
// We already have a complete tunnel, there is nothing that can be done by processing further stage 1 packets
return false
}
msg, eKey, dKey, err := ci.H.ReadMessage(nil, packet[header.Len:]) msg, eKey, dKey, err := ci.H.ReadMessage(nil, packet[header.Len:])
if err != nil { if err != nil {
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr). f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
@@ -399,17 +390,35 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
return true return true
} }
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.caPool) remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.pki.GetCAPool())
if err != nil { if err != nil {
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr). e := f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
WithField("cert", remoteCert).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}). WithField("handshake", m{"stage": 2, "style": "ix_psk0"})
Error("Invalid certificate from host")
if f.l.Level > logrus.DebugLevel {
e = e.WithField("cert", remoteCert)
}
e.Error("Invalid certificate from host")
// The handshake state machine is complete, if things break now there is no chance to recover. Tear down and start again // The handshake state machine is complete, if things break now there is no chance to recover. Tear down and start again
return true return true
} }
vpnIp := iputil.Ip2VpnIp(remoteCert.Details.Ips[0].IP) vpnIp, ok := netip.AddrFromSlice(remoteCert.Details.Ips[0].IP)
if !ok {
e := f.l.WithError(err).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"})
if f.l.Level > logrus.DebugLevel {
e = e.WithField("cert", remoteCert)
}
e.Info("Invalid vpn ip from host")
return true
}
vpnIp = vpnIp.Unmap()
certName := remoteCert.Details.Name certName := remoteCert.Details.Name
fingerprint, _ := remoteCert.Sha256Sum() fingerprint, _ := remoteCert.Sha256Sum()
issuer := remoteCert.Details.Issuer issuer := remoteCert.Details.Issuer
@@ -422,34 +431,30 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
Info("Incorrect host responded to handshake") Info("Incorrect host responded to handshake")
// Release our old handshake from pending, it should not continue // Release our old handshake from pending, it should not continue
f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo) f.handshakeManager.DeleteHostInfo(hostinfo)
// Create a new hostinfo/handshake for the intended vpn ip // Create a new hostinfo/handshake for the intended vpn ip
//TODO: this adds it to the timer wheel in a way that aggressively retries f.handshakeManager.StartHandshake(hostinfo.vpnIp, func(newHH *HandshakeHostInfo) {
newHostInfo := f.getOrHandshake(hostinfo.vpnIp) //TODO: this doesnt know if its being added or is being used for caching a packet
newHostInfo.Lock() // Block the current used address
newHH.hostinfo.remotes = hostinfo.remotes
newHH.hostinfo.remotes.BlockRemote(addr)
// Block the current used address // Get the correct remote list for the host we did handshake with
newHostInfo.remotes = hostinfo.remotes hostinfo.remotes = f.lightHouse.QueryCache(vpnIp)
newHostInfo.remotes.BlockRemote(addr)
// Get the correct remote list for the host we did handshake with f.l.WithField("blockedUdpAddrs", newHH.hostinfo.remotes.CopyBlockedRemotes()).WithField("vpnIp", vpnIp).
hostinfo.remotes = f.lightHouse.QueryCache(vpnIp) WithField("remotes", newHH.hostinfo.remotes.CopyAddrs(f.hostMap.GetPreferredRanges())).
Info("Blocked addresses for handshakes")
f.l.WithField("blockedUdpAddrs", newHostInfo.remotes.CopyBlockedRemotes()).WithField("vpnIp", vpnIp). // Swap the packet store to benefit the original intended recipient
WithField("remotes", newHostInfo.remotes.CopyAddrs(f.hostMap.preferredRanges)). newHH.packetStore = hh.packetStore
Info("Blocked addresses for handshakes") hh.packetStore = []*cachedPacket{}
// Swap the packet store to benefit the original intended recipient // Finally, put the correct vpn ip in the host info, tell them to close the tunnel, and return true to tear down
hostinfo.ConnectionState.queueLock.Lock() hostinfo.vpnIp = vpnIp
newHostInfo.packetStore = hostinfo.packetStore f.sendCloseTunnel(hostinfo)
hostinfo.packetStore = []*cachedPacket{} })
hostinfo.ConnectionState.queueLock.Unlock()
// Finally, put the correct vpn ip in the host info, tell them to close the tunnel, and return true to tear down
hostinfo.vpnIp = vpnIp
f.sendCloseTunnel(hostinfo)
newHostInfo.Unlock()
return true return true
} }
@@ -457,7 +462,7 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
// Mark packet 2 as seen so it doesn't show up as missed // Mark packet 2 as seen so it doesn't show up as missed
ci.window.Update(f.l, 2) ci.window.Update(f.l, 2)
duration := time.Since(hostinfo.handshakeStart).Nanoseconds() duration := time.Since(hh.startTime).Nanoseconds()
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr). f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("certName", certName). WithField("certName", certName).
WithField("fingerprint", fingerprint). WithField("fingerprint", fingerprint).
@@ -465,7 +470,7 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex). WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}). WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
WithField("durationNs", duration). WithField("durationNs", duration).
WithField("sentCachedPackets", len(hostinfo.packetStore)). WithField("sentCachedPackets", len(hh.packetStore)).
Info("Handshake message received") Info("Handshake message received")
hostinfo.remoteIndexId = hs.Details.ResponderIndex hostinfo.remoteIndexId = hs.Details.ResponderIndex
@@ -477,7 +482,7 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
ci.eKey = NewNebulaCipherState(eKey) ci.eKey = NewNebulaCipherState(eKey)
// Make sure the current udpAddr being used is set for responding // Make sure the current udpAddr being used is set for responding
if addr != nil { if addr.IsValid() {
hostinfo.SetRemote(addr) hostinfo.SetRemote(addr)
} else { } else {
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp) hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
@@ -488,8 +493,22 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
// Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp // Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp
f.handshakeManager.Complete(hostinfo, f) f.handshakeManager.Complete(hostinfo, f)
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId) f.connectionManager.AddTrafficWatch(hostinfo)
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
if f.l.Level >= logrus.DebugLevel {
hostinfo.logger(f.l).Debugf("Sending %d stored packets", len(hh.packetStore))
}
if len(hh.packetStore) > 0 {
nb := make([]byte, 12, 12)
out := make([]byte, mtu)
for _, cp := range hh.packetStore {
cp.callback(cp.messageType, cp.messageSubType, hostinfo, cp.packet, nb, out)
}
f.cachedPacketMetrics.sent.Inc(int64(len(hh.packetStore)))
}
hostinfo.remotes.ResetBlockedRemotes()
f.metricHandshakes.Update(duration) f.metricHandshakes.Update(duration)
return false return false

View File

@@ -6,14 +6,15 @@ import (
"crypto/rand" "crypto/rand"
"encoding/binary" "encoding/binary"
"errors" "errors"
"net" "net/netip"
"sync"
"time" "time"
"github.com/rcrowley/go-metrics" "github.com/rcrowley/go-metrics"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/header" "github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp" "github.com/slackhq/nebula/udp"
"golang.org/x/exp/slices"
) )
const ( const (
@@ -34,7 +35,7 @@ var (
type HandshakeConfig struct { type HandshakeConfig struct {
tryInterval time.Duration tryInterval time.Duration
retries int retries int64
triggerBuffer int triggerBuffer int
useRelays bool useRelays bool
@@ -42,30 +43,74 @@ type HandshakeConfig struct {
} }
type HandshakeManager struct { type HandshakeManager struct {
pendingHostMap *HostMap // Mutex for interacting with the vpnIps and indexes maps
sync.RWMutex
vpnIps map[netip.Addr]*HandshakeHostInfo
indexes map[uint32]*HandshakeHostInfo
mainHostMap *HostMap mainHostMap *HostMap
lightHouse *LightHouse lightHouse *LightHouse
outside *udp.Conn outside udp.Conn
config HandshakeConfig config HandshakeConfig
OutboundHandshakeTimer *LockingTimerWheel[iputil.VpnIp] OutboundHandshakeTimer *LockingTimerWheel[netip.Addr]
messageMetrics *MessageMetrics messageMetrics *MessageMetrics
metricInitiated metrics.Counter metricInitiated metrics.Counter
metricTimedOut metrics.Counter metricTimedOut metrics.Counter
f *Interface
l *logrus.Logger l *logrus.Logger
// can be used to trigger outbound handshake for the given vpnIp // can be used to trigger outbound handshake for the given vpnIp
trigger chan iputil.VpnIp trigger chan netip.Addr
} }
func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges []*net.IPNet, mainHostMap *HostMap, lightHouse *LightHouse, outside *udp.Conn, config HandshakeConfig) *HandshakeManager { type HandshakeHostInfo struct {
sync.Mutex
startTime time.Time // Time that we first started trying with this handshake
ready bool // Is the handshake ready
counter int64 // How many attempts have we made so far
lastRemotes []netip.AddrPort // Remotes that we sent to during the previous attempt
packetStore []*cachedPacket // A set of packets to be transmitted once the handshake completes
hostinfo *HostInfo
}
func (hh *HandshakeHostInfo) cachePacket(l *logrus.Logger, t header.MessageType, st header.MessageSubType, packet []byte, f packetCallback, m *cachedPacketMetrics) {
if len(hh.packetStore) < 100 {
tempPacket := make([]byte, len(packet))
copy(tempPacket, packet)
hh.packetStore = append(hh.packetStore, &cachedPacket{t, st, f, tempPacket})
if l.Level >= logrus.DebugLevel {
hh.hostinfo.logger(l).
WithField("length", len(hh.packetStore)).
WithField("stored", true).
Debugf("Packet store")
}
} else {
m.dropped.Inc(1)
if l.Level >= logrus.DebugLevel {
hh.hostinfo.logger(l).
WithField("length", len(hh.packetStore)).
WithField("stored", false).
Debugf("Packet store")
}
}
}
func NewHandshakeManager(l *logrus.Logger, mainHostMap *HostMap, lightHouse *LightHouse, outside udp.Conn, config HandshakeConfig) *HandshakeManager {
return &HandshakeManager{ return &HandshakeManager{
pendingHostMap: NewHostMap(l, "pending", tunCidr, preferredRanges), vpnIps: map[netip.Addr]*HandshakeHostInfo{},
indexes: map[uint32]*HandshakeHostInfo{},
mainHostMap: mainHostMap, mainHostMap: mainHostMap,
lightHouse: lightHouse, lightHouse: lightHouse,
outside: outside, outside: outside,
config: config, config: config,
trigger: make(chan iputil.VpnIp, config.triggerBuffer), trigger: make(chan netip.Addr, config.triggerBuffer),
OutboundHandshakeTimer: NewLockingTimerWheel[iputil.VpnIp](config.tryInterval, hsTimeout(config.retries, config.tryInterval)), OutboundHandshakeTimer: NewLockingTimerWheel[netip.Addr](config.tryInterval, hsTimeout(config.retries, config.tryInterval)),
messageMetrics: config.messageMetrics, messageMetrics: config.messageMetrics,
metricInitiated: metrics.GetOrRegisterCounter("handshake_manager.initiated", nil), metricInitiated: metrics.GetOrRegisterCounter("handshake_manager.initiated", nil),
metricTimedOut: metrics.GetOrRegisterCounter("handshake_manager.timed_out", nil), metricTimedOut: metrics.GetOrRegisterCounter("handshake_manager.timed_out", nil),
@@ -73,7 +118,7 @@ func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges [
} }
} }
func (c *HandshakeManager) Run(ctx context.Context, f EncWriter) { func (c *HandshakeManager) Run(ctx context.Context) {
clockSource := time.NewTicker(c.config.tryInterval) clockSource := time.NewTicker(c.config.tryInterval)
defer clockSource.Stop() defer clockSource.Stop()
@@ -82,58 +127,80 @@ func (c *HandshakeManager) Run(ctx context.Context, f EncWriter) {
case <-ctx.Done(): case <-ctx.Done():
return return
case vpnIP := <-c.trigger: case vpnIP := <-c.trigger:
c.handleOutbound(vpnIP, f, true) c.handleOutbound(vpnIP, true)
case now := <-clockSource.C: case now := <-clockSource.C:
c.NextOutboundHandshakeTimerTick(now, f) c.NextOutboundHandshakeTimerTick(now)
} }
} }
} }
func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time, f EncWriter) { func (hm *HandshakeManager) HandleIncoming(addr netip.AddrPort, via *ViaSender, packet []byte, h *header.H) {
// First remote allow list check before we know the vpnIp
if addr.IsValid() {
if !hm.lightHouse.GetRemoteAllowList().AllowUnknownVpnIp(addr.Addr()) {
hm.l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
return
}
}
switch h.Subtype {
case header.HandshakeIXPSK0:
switch h.MessageCounter {
case 1:
ixHandshakeStage1(hm.f, addr, via, packet, h)
case 2:
newHostinfo := hm.queryIndex(h.RemoteIndex)
tearDown := ixHandshakeStage2(hm.f, addr, via, newHostinfo, packet, h)
if tearDown && newHostinfo != nil {
hm.DeleteHostInfo(newHostinfo.hostinfo)
}
}
}
}
func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time) {
c.OutboundHandshakeTimer.Advance(now) c.OutboundHandshakeTimer.Advance(now)
for { for {
vpnIp, has := c.OutboundHandshakeTimer.Purge() vpnIp, has := c.OutboundHandshakeTimer.Purge()
if !has { if !has {
break break
} }
c.handleOutbound(vpnIp, f, false) c.handleOutbound(vpnIp, false)
} }
} }
func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, lighthouseTriggered bool) { func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered bool) {
hostinfo, err := c.pendingHostMap.QueryVpnIp(vpnIp) hh := hm.queryVpnIp(vpnIp)
if err != nil { if hh == nil {
return return
} }
hostinfo.Lock() hh.Lock()
defer hostinfo.Unlock() defer hh.Unlock()
// We may have raced to completion but now that we have a lock we should ensure we have not yet completed. hostinfo := hh.hostinfo
if hostinfo.HandshakeComplete { // If we are out of time, clean up
// Ensure we don't exist in the pending hostmap anymore since we have completed if hh.counter >= hm.config.retries {
c.pendingHostMap.DeleteHostInfo(hostinfo) hh.hostinfo.logger(hm.l).WithField("udpAddrs", hh.hostinfo.remotes.CopyAddrs(hm.mainHostMap.GetPreferredRanges())).
WithField("initiatorIndex", hh.hostinfo.localIndexId).
WithField("remoteIndex", hh.hostinfo.remoteIndexId).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
WithField("durationNs", time.Since(hh.startTime).Nanoseconds()).
Info("Handshake timed out")
hm.metricTimedOut.Inc(1)
hm.DeleteHostInfo(hostinfo)
return return
} }
// Increment the counter to increase our delay, linear backoff
hh.counter++
// Check if we have a handshake packet to transmit yet // Check if we have a handshake packet to transmit yet
if !hostinfo.HandshakeReady { if !hh.ready {
// There is currently a slight race in getOrHandshake due to ConnectionState not being part of the HostInfo directly if !ixHandshakeStage0(hm.f, hh) {
// Our hostinfo here was added to the pending map and the wheel may have ticked to us before we created ConnectionState hm.OutboundHandshakeTimer.Add(vpnIp, hm.config.tryInterval*time.Duration(hh.counter))
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter)) return
return }
}
// If we are out of time, clean up
if hostinfo.HandshakeCounter >= c.config.retries {
hostinfo.logger(c.l).WithField("udpAddrs", hostinfo.remotes.CopyAddrs(c.pendingHostMap.preferredRanges)).
WithField("initiatorIndex", hostinfo.localIndexId).
WithField("remoteIndex", hostinfo.remoteIndexId).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
WithField("durationNs", time.Since(hostinfo.handshakeStart).Nanoseconds()).
Info("Handshake timed out")
c.metricTimedOut.Inc(1)
c.pendingHostMap.DeleteHostInfo(hostinfo)
return
} }
// Get a remotes object if we don't already have one. // Get a remotes object if we don't already have one.
@@ -141,11 +208,11 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
// NB ^ This comment doesn't jive. It's how the thing gets initialized. // NB ^ This comment doesn't jive. It's how the thing gets initialized.
// It's the common path. Should it update every time, in case a future LH query/queries give us more info? // It's the common path. Should it update every time, in case a future LH query/queries give us more info?
if hostinfo.remotes == nil { if hostinfo.remotes == nil {
hostinfo.remotes = c.lightHouse.QueryCache(vpnIp) hostinfo.remotes = hm.lightHouse.QueryCache(vpnIp)
} }
remotes := hostinfo.remotes.CopyAddrs(c.pendingHostMap.preferredRanges) remotes := hostinfo.remotes.CopyAddrs(hm.mainHostMap.GetPreferredRanges())
remotesHaveChanged := !udp.AddrSlice(remotes).Equal(hostinfo.HandshakeLastRemotes) remotesHaveChanged := !slices.Equal(remotes, hh.lastRemotes)
// We only care about a lighthouse trigger if we have new remotes to send to. // We only care about a lighthouse trigger if we have new remotes to send to.
// This is a very specific optimization for a fast lighthouse reply. // This is a very specific optimization for a fast lighthouse reply.
@@ -154,25 +221,25 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
return return
} }
hostinfo.HandshakeLastRemotes = remotes hh.lastRemotes = remotes
// TODO: this will generate a load of queries for hosts with only 1 ip // TODO: this will generate a load of queries for hosts with only 1 ip
// (such as ones registered to the lighthouse with only a private IP) // (such as ones registered to the lighthouse with only a private IP)
// So we only do it one time after attempting 5 handshakes already. // So we only do it one time after attempting 5 handshakes already.
if len(remotes) <= 1 && hostinfo.HandshakeCounter == 5 { if len(remotes) <= 1 && hh.counter == 5 {
// If we only have 1 remote it is highly likely our query raced with the other host registered within the lighthouse // If we only have 1 remote it is highly likely our query raced with the other host registered within the lighthouse
// Our vpnIp here has a tunnel with a lighthouse but has yet to send a host update packet there so we only know about // Our vpnIp here has a tunnel with a lighthouse but has yet to send a host update packet there so we only know about
// the learned public ip for them. Query again to short circuit the promotion counter // the learned public ip for them. Query again to short circuit the promotion counter
c.lightHouse.QueryServer(vpnIp, f) hm.lightHouse.QueryServer(vpnIp)
} }
// Send the handshake to all known ips, stage 2 takes care of assigning the hostinfo.remote based on the first to reply // Send the handshake to all known ips, stage 2 takes care of assigning the hostinfo.remote based on the first to reply
var sentTo []*udp.Addr var sentTo []netip.AddrPort
hostinfo.remotes.ForEach(c.pendingHostMap.preferredRanges, func(addr *udp.Addr, _ bool) { hostinfo.remotes.ForEach(hm.mainHostMap.GetPreferredRanges(), func(addr netip.AddrPort, _ bool) {
c.messageMetrics.Tx(header.Handshake, header.MessageSubType(hostinfo.HandshakePacket[0][1]), 1) hm.messageMetrics.Tx(header.Handshake, header.MessageSubType(hostinfo.HandshakePacket[0][1]), 1)
err = c.outside.WriteTo(hostinfo.HandshakePacket[0], addr) err := hm.outside.WriteTo(hostinfo.HandshakePacket[0], addr)
if err != nil { if err != nil {
hostinfo.logger(c.l).WithField("udpAddr", addr). hostinfo.logger(hm.l).WithField("udpAddr", addr).
WithField("initiatorIndex", hostinfo.localIndexId). WithField("initiatorIndex", hostinfo.localIndexId).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}). WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
WithError(err).Error("Failed to send handshake message") WithError(err).Error("Failed to send handshake message")
@@ -185,118 +252,192 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
// Don't be too noisy or confusing if we fail to send a handshake - if we don't get through we'll eventually log a timeout, // Don't be too noisy or confusing if we fail to send a handshake - if we don't get through we'll eventually log a timeout,
// so only log when the list of remotes has changed // so only log when the list of remotes has changed
if remotesHaveChanged { if remotesHaveChanged {
hostinfo.logger(c.l).WithField("udpAddrs", sentTo). hostinfo.logger(hm.l).WithField("udpAddrs", sentTo).
WithField("initiatorIndex", hostinfo.localIndexId). WithField("initiatorIndex", hostinfo.localIndexId).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}). WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
Info("Handshake message sent") Info("Handshake message sent")
} else if c.l.IsLevelEnabled(logrus.DebugLevel) { } else if hm.l.IsLevelEnabled(logrus.DebugLevel) {
hostinfo.logger(c.l).WithField("udpAddrs", sentTo). hostinfo.logger(hm.l).WithField("udpAddrs", sentTo).
WithField("initiatorIndex", hostinfo.localIndexId). WithField("initiatorIndex", hostinfo.localIndexId).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}). WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
Debug("Handshake message sent") Debug("Handshake message sent")
} }
if c.config.useRelays && len(hostinfo.remotes.relays) > 0 { if hm.config.useRelays && len(hostinfo.remotes.relays) > 0 {
hostinfo.logger(c.l).WithField("relays", hostinfo.remotes.relays).Info("Attempt to relay through hosts") hostinfo.logger(hm.l).WithField("relays", hostinfo.remotes.relays).Info("Attempt to relay through hosts")
// Send a RelayRequest to all known Relay IP's // Send a RelayRequest to all known Relay IP's
for _, relay := range hostinfo.remotes.relays { for _, relay := range hostinfo.remotes.relays {
// Don't relay to myself, and don't relay through the host I'm trying to connect to // Don't relay to myself, and don't relay through the host I'm trying to connect to
if *relay == vpnIp || *relay == c.lightHouse.myVpnIp { if relay == vpnIp || relay == hm.lightHouse.myVpnNet.Addr() {
continue continue
} }
relayHostInfo, err := c.mainHostMap.QueryVpnIp(*relay) relayHostInfo := hm.mainHostMap.QueryVpnIp(relay)
if err != nil || relayHostInfo.remote == nil { if relayHostInfo == nil || !relayHostInfo.remote.IsValid() {
hostinfo.logger(c.l).WithError(err).WithField("relay", relay.String()).Info("Establish tunnel to relay target") hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Establish tunnel to relay target")
f.Handshake(*relay) hm.f.Handshake(relay)
continue continue
} }
// Check the relay HostInfo to see if we already established a relay through it // Check the relay HostInfo to see if we already established a relay through it
if existingRelay, ok := relayHostInfo.relayState.QueryRelayForByIp(vpnIp); ok { existingRelay, ok := relayHostInfo.relayState.QueryRelayForByIp(vpnIp)
switch existingRelay.State { if !ok {
case Established:
hostinfo.logger(c.l).WithField("relay", relay.String()).Info("Send handshake via relay")
f.SendVia(relayHostInfo, existingRelay, hostinfo.HandshakePacket[0], make([]byte, 12), make([]byte, mtu), false)
case Requested:
hostinfo.logger(c.l).WithField("relay", relay.String()).Info("Re-send CreateRelay request")
// Re-send the CreateRelay request, in case the previous one was lost.
m := NebulaControl{
Type: NebulaControl_CreateRelayRequest,
InitiatorRelayIndex: existingRelay.LocalIndex,
RelayFromIp: uint32(c.lightHouse.myVpnIp),
RelayToIp: uint32(vpnIp),
}
msg, err := m.Marshal()
if err != nil {
hostinfo.logger(c.l).
WithError(err).
Error("Failed to marshal Control message to create relay")
} else {
// This must send over the hostinfo, not over hm.Hosts[ip]
f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
c.l.WithFields(logrus.Fields{
"relayFrom": c.lightHouse.myVpnIp,
"relayTo": vpnIp,
"initiatorRelayIndex": existingRelay.LocalIndex,
"relay": *relay}).
Info("send CreateRelayRequest")
}
default:
hostinfo.logger(c.l).
WithField("vpnIp", vpnIp).
WithField("state", existingRelay.State).
WithField("relay", relayHostInfo.vpnIp).
Errorf("Relay unexpected state")
}
} else {
// No relays exist or requested yet. // No relays exist or requested yet.
if relayHostInfo.remote != nil { if relayHostInfo.remote.IsValid() {
idx, err := AddRelay(c.l, relayHostInfo, c.mainHostMap, vpnIp, nil, TerminalType, Requested) idx, err := AddRelay(hm.l, relayHostInfo, hm.mainHostMap, vpnIp, nil, TerminalType, Requested)
if err != nil { if err != nil {
hostinfo.logger(c.l).WithField("relay", relay.String()).WithError(err).Info("Failed to add relay to hostmap") hostinfo.logger(hm.l).WithField("relay", relay.String()).WithError(err).Info("Failed to add relay to hostmap")
} }
//TODO: IPV6-WORK
myVpnIpB := hm.f.myVpnNet.Addr().As4()
theirVpnIpB := vpnIp.As4()
m := NebulaControl{ m := NebulaControl{
Type: NebulaControl_CreateRelayRequest, Type: NebulaControl_CreateRelayRequest,
InitiatorRelayIndex: idx, InitiatorRelayIndex: idx,
RelayFromIp: uint32(c.lightHouse.myVpnIp), RelayFromIp: binary.BigEndian.Uint32(myVpnIpB[:]),
RelayToIp: uint32(vpnIp), RelayToIp: binary.BigEndian.Uint32(theirVpnIpB[:]),
} }
msg, err := m.Marshal() msg, err := m.Marshal()
if err != nil { if err != nil {
hostinfo.logger(c.l). hostinfo.logger(hm.l).
WithError(err). WithError(err).
Error("Failed to marshal Control message to create relay") Error("Failed to marshal Control message to create relay")
} else { } else {
f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu)) hm.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
c.l.WithFields(logrus.Fields{ hm.l.WithFields(logrus.Fields{
"relayFrom": c.lightHouse.myVpnIp, "relayFrom": hm.f.myVpnNet.Addr(),
"relayTo": vpnIp, "relayTo": vpnIp,
"initiatorRelayIndex": idx, "initiatorRelayIndex": idx,
"relay": *relay}). "relay": relay}).
Info("send CreateRelayRequest") Info("send CreateRelayRequest")
} }
} }
continue
}
switch existingRelay.State {
case Established:
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Send handshake via relay")
hm.f.SendVia(relayHostInfo, existingRelay, hostinfo.HandshakePacket[0], make([]byte, 12), make([]byte, mtu), false)
case Disestablished:
// Mark this relay as 'requested'
relayHostInfo.relayState.UpdateRelayForByIpState(vpnIp, Requested)
fallthrough
case Requested:
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Re-send CreateRelay request")
// Re-send the CreateRelay request, in case the previous one was lost.
relayFrom := hm.f.myVpnNet.Addr().As4()
relayTo := vpnIp.As4()
m := NebulaControl{
Type: NebulaControl_CreateRelayRequest,
InitiatorRelayIndex: existingRelay.LocalIndex,
RelayFromIp: binary.BigEndian.Uint32(relayFrom[:]),
RelayToIp: binary.BigEndian.Uint32(relayTo[:]),
}
msg, err := m.Marshal()
if err != nil {
hostinfo.logger(hm.l).
WithError(err).
Error("Failed to marshal Control message to create relay")
} else {
// This must send over the hostinfo, not over hm.Hosts[ip]
hm.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
hm.l.WithFields(logrus.Fields{
"relayFrom": hm.f.myVpnNet,
"relayTo": vpnIp,
"initiatorRelayIndex": existingRelay.LocalIndex,
"relay": relay}).
Info("send CreateRelayRequest")
}
case PeerRequested:
// PeerRequested only occurs in Forwarding relays, not Terminal relays, and this is a Terminal relay case.
fallthrough
default:
hostinfo.logger(hm.l).
WithField("vpnIp", vpnIp).
WithField("state", existingRelay.State).
WithField("relay", relay).
Errorf("Relay unexpected state")
} }
} }
} }
// Increment the counter to increase our delay, linear backoff
hostinfo.HandshakeCounter++
// If a lighthouse triggered this attempt then we are still in the timer wheel and do not need to re-add // If a lighthouse triggered this attempt then we are still in the timer wheel and do not need to re-add
if !lighthouseTriggered { if !lighthouseTriggered {
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter)) hm.OutboundHandshakeTimer.Add(vpnIp, hm.config.tryInterval*time.Duration(hh.counter))
} }
} }
func (c *HandshakeManager) AddVpnIp(vpnIp iputil.VpnIp, init func(*HostInfo)) *HostInfo { // GetOrHandshake will try to find a hostinfo with a fully formed tunnel or start a new handshake if one is not present
hostinfo, created := c.pendingHostMap.AddVpnIp(vpnIp, init) // The 2nd argument will be true if the hostinfo is ready to transmit traffic
func (hm *HandshakeManager) GetOrHandshake(vpnIp netip.Addr, cacheCb func(*HandshakeHostInfo)) (*HostInfo, bool) {
hm.mainHostMap.RLock()
h, ok := hm.mainHostMap.Hosts[vpnIp]
hm.mainHostMap.RUnlock()
if created { if ok {
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval) // Do not attempt promotion if you are a lighthouse
c.metricInitiated.Inc(1) if !hm.lightHouse.amLighthouse {
h.TryPromoteBest(hm.mainHostMap.GetPreferredRanges(), hm.f)
}
return h, true
} }
return hm.StartHandshake(vpnIp, cacheCb), false
}
// StartHandshake will ensure a handshake is currently being attempted for the provided vpn ip
func (hm *HandshakeManager) StartHandshake(vpnIp netip.Addr, cacheCb func(*HandshakeHostInfo)) *HostInfo {
hm.Lock()
if hh, ok := hm.vpnIps[vpnIp]; ok {
// We are already trying to handshake with this vpn ip
if cacheCb != nil {
cacheCb(hh)
}
hm.Unlock()
return hh.hostinfo
}
hostinfo := &HostInfo{
vpnIp: vpnIp,
HandshakePacket: make(map[uint8][]byte, 0),
relayState: RelayState{
relays: nil,
relayForByIp: map[netip.Addr]*Relay{},
relayForByIdx: map[uint32]*Relay{},
},
}
hh := &HandshakeHostInfo{
hostinfo: hostinfo,
startTime: time.Now(),
}
hm.vpnIps[vpnIp] = hh
hm.metricInitiated.Inc(1)
hm.OutboundHandshakeTimer.Add(vpnIp, hm.config.tryInterval)
if cacheCb != nil {
cacheCb(hh)
}
// If this is a static host, we don't need to wait for the HostQueryReply
// We can trigger the handshake right now
_, doTrigger := hm.lightHouse.GetStaticHostList()[vpnIp]
if !doTrigger {
// Add any calculated remotes, and trigger early handshake if one found
doTrigger = hm.lightHouse.addCalculatedRemotes(vpnIp)
}
if doTrigger {
select {
case hm.trigger <- vpnIp:
default:
}
}
hm.Unlock()
hm.lightHouse.QueryServer(vpnIp)
return hostinfo return hostinfo
} }
@@ -318,10 +459,10 @@ var (
// ErrLocalIndexCollision if we already have an entry in the main or pending // ErrLocalIndexCollision if we already have an entry in the main or pending
// hostmap for the hostinfo.localIndexId. // hostmap for the hostinfo.localIndexId.
func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket uint8, f *Interface) (*HostInfo, error) { func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket uint8, f *Interface) (*HostInfo, error) {
c.pendingHostMap.Lock()
defer c.pendingHostMap.Unlock()
c.mainHostMap.Lock() c.mainHostMap.Lock()
defer c.mainHostMap.Unlock() defer c.mainHostMap.Unlock()
c.Lock()
defer c.Unlock()
// Check if we already have a tunnel with this vpn ip // Check if we already have a tunnel with this vpn ip
existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp] existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp]
@@ -350,10 +491,10 @@ func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket
return existingIndex, ErrLocalIndexCollision return existingIndex, ErrLocalIndexCollision
} }
existingIndex, found = c.pendingHostMap.Indexes[hostinfo.localIndexId] existingPendingIndex, found := c.indexes[hostinfo.localIndexId]
if found && existingIndex != hostinfo { if found && existingPendingIndex.hostinfo != hostinfo {
// We have a collision, but for a different hostinfo // We have a collision, but for a different hostinfo
return existingIndex, ErrLocalIndexCollision return existingPendingIndex.hostinfo, ErrLocalIndexCollision
} }
existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId] existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
@@ -372,47 +513,47 @@ func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket
// Complete is a simpler version of CheckAndComplete when we already know we // Complete is a simpler version of CheckAndComplete when we already know we
// won't have a localIndexId collision because we already have an entry in the // won't have a localIndexId collision because we already have an entry in the
// pendingHostMap. An existing hostinfo is returned if there was one. // pendingHostMap. An existing hostinfo is returned if there was one.
func (c *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) { func (hm *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) {
c.pendingHostMap.Lock() hm.mainHostMap.Lock()
defer c.pendingHostMap.Unlock() defer hm.mainHostMap.Unlock()
c.mainHostMap.Lock() hm.Lock()
defer c.mainHostMap.Unlock() defer hm.Unlock()
existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId] existingRemoteIndex, found := hm.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
if found && existingRemoteIndex != nil { if found && existingRemoteIndex != nil {
// We have a collision, but this can happen since we can't control // We have a collision, but this can happen since we can't control
// the remote ID. Just log about the situation as a note. // the remote ID. Just log about the situation as a note.
hostinfo.logger(c.l). hostinfo.logger(hm.l).
WithField("remoteIndex", hostinfo.remoteIndexId).WithField("collision", existingRemoteIndex.vpnIp). WithField("remoteIndex", hostinfo.remoteIndexId).WithField("collision", existingRemoteIndex.vpnIp).
Info("New host shadows existing host remoteIndex") Info("New host shadows existing host remoteIndex")
} }
// We need to remove from the pending hostmap first to avoid undoing work when after to the main hostmap. // We need to remove from the pending hostmap first to avoid undoing work when after to the main hostmap.
c.pendingHostMap.unlockedDeleteHostInfo(hostinfo) hm.unlockedDeleteHostInfo(hostinfo)
c.mainHostMap.unlockedAddHostInfo(hostinfo, f) hm.mainHostMap.unlockedAddHostInfo(hostinfo, f)
} }
// AddIndexHostInfo generates a unique localIndexId for this HostInfo // allocateIndex generates a unique localIndexId for this HostInfo
// and adds it to the pendingHostMap. Will error if we are unable to generate // and adds it to the pendingHostMap. Will error if we are unable to generate
// a unique localIndexId // a unique localIndexId
func (c *HandshakeManager) AddIndexHostInfo(h *HostInfo) error { func (hm *HandshakeManager) allocateIndex(hh *HandshakeHostInfo) error {
c.pendingHostMap.Lock() hm.mainHostMap.RLock()
defer c.pendingHostMap.Unlock() defer hm.mainHostMap.RUnlock()
c.mainHostMap.RLock() hm.Lock()
defer c.mainHostMap.RUnlock() defer hm.Unlock()
for i := 0; i < 32; i++ { for i := 0; i < 32; i++ {
index, err := generateIndex(c.l) index, err := generateIndex(hm.l)
if err != nil { if err != nil {
return err return err
} }
_, inPending := c.pendingHostMap.Indexes[index] _, inPending := hm.indexes[index]
_, inMain := c.mainHostMap.Indexes[index] _, inMain := hm.mainHostMap.Indexes[index]
if !inMain && !inPending { if !inMain && !inPending {
h.localIndexId = index hh.hostinfo.localIndexId = index
c.pendingHostMap.Indexes[index] = h hm.indexes[index] = hh
return nil return nil
} }
} }
@@ -420,22 +561,90 @@ func (c *HandshakeManager) AddIndexHostInfo(h *HostInfo) error {
return errors.New("failed to generate unique localIndexId") return errors.New("failed to generate unique localIndexId")
} }
func (c *HandshakeManager) addRemoteIndexHostInfo(index uint32, h *HostInfo) {
c.pendingHostMap.addRemoteIndexHostInfo(index, h)
}
func (c *HandshakeManager) DeleteHostInfo(hostinfo *HostInfo) { func (c *HandshakeManager) DeleteHostInfo(hostinfo *HostInfo) {
//l.Debugln("Deleting pending hostinfo :", hostinfo) c.Lock()
c.pendingHostMap.DeleteHostInfo(hostinfo) defer c.Unlock()
c.unlockedDeleteHostInfo(hostinfo)
} }
func (c *HandshakeManager) QueryIndex(index uint32) (*HostInfo, error) { func (c *HandshakeManager) unlockedDeleteHostInfo(hostinfo *HostInfo) {
return c.pendingHostMap.QueryIndex(index) delete(c.vpnIps, hostinfo.vpnIp)
if len(c.vpnIps) == 0 {
c.vpnIps = map[netip.Addr]*HandshakeHostInfo{}
}
delete(c.indexes, hostinfo.localIndexId)
if len(c.vpnIps) == 0 {
c.indexes = map[uint32]*HandshakeHostInfo{}
}
if c.l.Level >= logrus.DebugLevel {
c.l.WithField("hostMap", m{"mapTotalSize": len(c.vpnIps),
"vpnIp": hostinfo.vpnIp, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}).
Debug("Pending hostmap hostInfo deleted")
}
}
func (hm *HandshakeManager) QueryVpnIp(vpnIp netip.Addr) *HostInfo {
hh := hm.queryVpnIp(vpnIp)
if hh != nil {
return hh.hostinfo
}
return nil
}
func (hm *HandshakeManager) queryVpnIp(vpnIp netip.Addr) *HandshakeHostInfo {
hm.RLock()
defer hm.RUnlock()
return hm.vpnIps[vpnIp]
}
func (hm *HandshakeManager) QueryIndex(index uint32) *HostInfo {
hh := hm.queryIndex(index)
if hh != nil {
return hh.hostinfo
}
return nil
}
func (hm *HandshakeManager) queryIndex(index uint32) *HandshakeHostInfo {
hm.RLock()
defer hm.RUnlock()
return hm.indexes[index]
}
func (c *HandshakeManager) GetPreferredRanges() []netip.Prefix {
return c.mainHostMap.GetPreferredRanges()
}
func (c *HandshakeManager) ForEachVpnIp(f controlEach) {
c.RLock()
defer c.RUnlock()
for _, v := range c.vpnIps {
f(v.hostinfo)
}
}
func (c *HandshakeManager) ForEachIndex(f controlEach) {
c.RLock()
defer c.RUnlock()
for _, v := range c.indexes {
f(v.hostinfo)
}
} }
func (c *HandshakeManager) EmitStats() { func (c *HandshakeManager) EmitStats() {
c.pendingHostMap.EmitStats("pending") c.RLock()
c.mainHostMap.EmitStats("main") hostLen := len(c.vpnIps)
indexLen := len(c.indexes)
c.RUnlock()
metrics.GetOrRegisterGauge("hostmap.pending.hosts", nil).Update(int64(hostLen))
metrics.GetOrRegisterGauge("hostmap.pending.indexes", nil).Update(int64(indexLen))
c.mainHostMap.EmitStats()
} }
// Utility functions below // Utility functions below
@@ -462,6 +671,6 @@ func generateIndex(l *logrus.Logger) (uint32, error) {
return index, nil return index, nil
} }
func hsTimeout(tries int, interval time.Duration) time.Duration { func hsTimeout(tries int64, interval time.Duration) time.Duration {
return time.Duration(tries / 2 * ((2 * int(interval)) + (tries-1)*int(interval))) return time.Duration(tries / 2 * ((2 * int64(interval)) + (tries-1)*int64(interval)))
} }

View File

@@ -1,12 +1,12 @@
package nebula package nebula
import ( import (
"net" "net/netip"
"testing" "testing"
"time" "time"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/header" "github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/test" "github.com/slackhq/nebula/test"
"github.com/slackhq/nebula/udp" "github.com/slackhq/nebula/udp"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
@@ -14,59 +14,59 @@ import (
func Test_NewHandshakeManagerVpnIp(t *testing.T) { func Test_NewHandshakeManagerVpnIp(t *testing.T) {
l := test.NewLogger() l := test.NewLogger()
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24") vpncidr := netip.MustParsePrefix("172.1.1.1/24")
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24") localrange := netip.MustParsePrefix("10.1.1.1/24")
_, localrange, _ := net.ParseCIDR("10.1.1.1/24") ip := netip.MustParseAddr("172.1.1.2")
ip := iputil.Ip2VpnIp(net.ParseIP("172.1.1.2"))
preferredRanges := []*net.IPNet{localrange} preferredRanges := []netip.Prefix{localrange}
mw := &mockEncWriter{} mainHM := newHostMap(l, vpncidr)
mainHM := NewHostMap(l, "test", vpncidr, preferredRanges) mainHM.preferredRanges.Store(&preferredRanges)
lh := newTestLighthouse() lh := newTestLighthouse()
blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig) cs := &CertState{
RawCertificate: []byte{},
now := time.Now() PrivateKey: []byte{},
blah.NextOutboundHandshakeTimerTick(now, mw) Certificate: &cert.NebulaCertificate{},
RawCertificateNoKey: []byte{},
var initCalled bool
initFunc := func(*HostInfo) {
initCalled = true
} }
i := blah.AddVpnIp(ip, initFunc) blah := NewHandshakeManager(l, mainHM, lh, &udp.NoopConn{}, defaultHandshakeConfig)
assert.True(t, initCalled) blah.f = &Interface{handshakeManager: blah, pki: &PKI{}, l: l}
blah.f.pki.cs.Store(cs)
initCalled = false now := time.Now()
i2 := blah.AddVpnIp(ip, initFunc) blah.NextOutboundHandshakeTimerTick(now)
assert.False(t, initCalled)
i := blah.StartHandshake(ip, nil)
i2 := blah.StartHandshake(ip, nil)
assert.Same(t, i, i2) assert.Same(t, i, i2)
i.remotes = NewRemoteList(nil) i.remotes = NewRemoteList(nil)
i.HandshakeReady = true
// Adding something to pending should not affect the main hostmap // Adding something to pending should not affect the main hostmap
assert.Len(t, mainHM.Hosts, 0) assert.Len(t, mainHM.Hosts, 0)
// Confirm they are in the pending index list // Confirm they are in the pending index list
assert.Contains(t, blah.pendingHostMap.Hosts, ip) assert.Contains(t, blah.vpnIps, ip)
// Jump ahead `HandshakeRetries` ticks, offset by one to get the sleep logic right // Jump ahead `HandshakeRetries` ticks, offset by one to get the sleep logic right
for i := 1; i <= DefaultHandshakeRetries+1; i++ { for i := 1; i <= DefaultHandshakeRetries+1; i++ {
now = now.Add(time.Duration(i) * DefaultHandshakeTryInterval) now = now.Add(time.Duration(i) * DefaultHandshakeTryInterval)
blah.NextOutboundHandshakeTimerTick(now, mw) blah.NextOutboundHandshakeTimerTick(now)
} }
// Confirm they are still in the pending index list // Confirm they are still in the pending index list
assert.Contains(t, blah.pendingHostMap.Hosts, ip) assert.Contains(t, blah.vpnIps, ip)
// Tick 1 more time, a minute will certainly flush it out // Tick 1 more time, a minute will certainly flush it out
blah.NextOutboundHandshakeTimerTick(now.Add(time.Minute), mw) blah.NextOutboundHandshakeTimerTick(now.Add(time.Minute))
// Confirm they have been removed // Confirm they have been removed
assert.NotContains(t, blah.pendingHostMap.Hosts, ip) assert.NotContains(t, blah.vpnIps, ip)
} }
func testCountTimerWheelEntries(tw *LockingTimerWheel[iputil.VpnIp]) (c int) { func testCountTimerWheelEntries(tw *LockingTimerWheel[netip.Addr]) (c int) {
for _, i := range tw.t.wheel { for _, i := range tw.t.wheel {
n := i.Head n := i.Head
for n != nil { for n != nil {
@@ -80,7 +80,7 @@ func testCountTimerWheelEntries(tw *LockingTimerWheel[iputil.VpnIp]) (c int) {
type mockEncWriter struct { type mockEncWriter struct {
} }
func (mw *mockEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, nb, out []byte) { func (mw *mockEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp netip.Addr, p, nb, out []byte) {
return return
} }
@@ -92,4 +92,4 @@ func (mw *mockEncWriter) SendMessageToHostInfo(t header.MessageType, st header.M
return return
} }
func (mw *mockEncWriter) Handshake(vpnIP iputil.VpnIp) {} func (mw *mockEncWriter) Handshake(vpnIP netip.Addr) {}

View File

@@ -2,24 +2,25 @@ package nebula
import ( import (
"errors" "errors"
"fmt"
"net" "net"
"net/netip"
"slices"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/gaissmai/bart"
"github.com/rcrowley/go-metrics" "github.com/rcrowley/go-metrics"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert" "github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/cidr" "github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/header" "github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp"
) )
// const ProbeLen = 100 // const ProbeLen = 100
const PromoteEvery = 1000 const defaultPromoteEvery = 1000 // Count of packets sent before we try moving a tunnel to a preferred underlay ip address
const ReQueryEvery = 5000 const defaultReQueryEvery = 5000 // Count of packets sent before re-querying a hostinfo to the lighthouse
const defaultReQueryWait = time.Minute // Minimum amount of seconds to wait before re-querying a hostinfo the lighthouse. Evaluated every ReQueryEvery
const MaxRemotes = 10 const MaxRemotes = 10
// MaxHostInfosPerVpnIp is the max number of hostinfos we will track for a given vpn ip // MaxHostInfosPerVpnIp is the max number of hostinfos we will track for a given vpn ip
@@ -34,6 +35,7 @@ const (
Requested = iota Requested = iota
PeerRequested PeerRequested
Established Established
Disestablished
) )
const ( const (
@@ -47,19 +49,17 @@ type Relay struct {
State int State int
LocalIndex uint32 LocalIndex uint32
RemoteIndex uint32 RemoteIndex uint32
PeerIp iputil.VpnIp PeerIp netip.Addr
} }
type HostMap struct { type HostMap struct {
sync.RWMutex //Because we concurrently read and write to our maps sync.RWMutex //Because we concurrently read and write to our maps
name string
Indexes map[uint32]*HostInfo Indexes map[uint32]*HostInfo
Relays map[uint32]*HostInfo // Maps a Relay IDX to a Relay HostInfo object Relays map[uint32]*HostInfo // Maps a Relay IDX to a Relay HostInfo object
RemoteIndexes map[uint32]*HostInfo RemoteIndexes map[uint32]*HostInfo
Hosts map[iputil.VpnIp]*HostInfo Hosts map[netip.Addr]*HostInfo
preferredRanges []*net.IPNet preferredRanges atomic.Pointer[[]netip.Prefix]
vpnCIDR *net.IPNet vpnCIDR netip.Prefix
metricsEnabled bool
l *logrus.Logger l *logrus.Logger
} }
@@ -69,15 +69,42 @@ type HostMap struct {
type RelayState struct { type RelayState struct {
sync.RWMutex sync.RWMutex
relays map[iputil.VpnIp]struct{} // Set of VpnIp's of Hosts to use as relays to access this peer relays []netip.Addr // Ordered set of VpnIp's of Hosts to use as relays to access this peer
relayForByIp map[iputil.VpnIp]*Relay // Maps VpnIps of peers for which this HostInfo is a relay to some Relay info relayForByIp map[netip.Addr]*Relay // Maps VpnIps of peers for which this HostInfo is a relay to some Relay info
relayForByIdx map[uint32]*Relay // Maps a local index to some Relay info relayForByIdx map[uint32]*Relay // Maps a local index to some Relay info
} }
func (rs *RelayState) DeleteRelay(ip iputil.VpnIp) { func (rs *RelayState) DeleteRelay(ip netip.Addr) {
rs.Lock() rs.Lock()
defer rs.Unlock() defer rs.Unlock()
delete(rs.relays, ip) for idx, val := range rs.relays {
if val == ip {
rs.relays = append(rs.relays[:idx], rs.relays[idx+1:]...)
return
}
}
}
func (rs *RelayState) UpdateRelayForByIpState(vpnIp netip.Addr, state int) {
rs.Lock()
defer rs.Unlock()
if r, ok := rs.relayForByIp[vpnIp]; ok {
newRelay := *r
newRelay.State = state
rs.relayForByIp[newRelay.PeerIp] = &newRelay
rs.relayForByIdx[newRelay.LocalIndex] = &newRelay
}
}
func (rs *RelayState) UpdateRelayForByIdxState(idx uint32, state int) {
rs.Lock()
defer rs.Unlock()
if r, ok := rs.relayForByIdx[idx]; ok {
newRelay := *r
newRelay.State = state
rs.relayForByIp[newRelay.PeerIp] = &newRelay
rs.relayForByIdx[newRelay.LocalIndex] = &newRelay
}
} }
func (rs *RelayState) CopyAllRelayFor() []*Relay { func (rs *RelayState) CopyAllRelayFor() []*Relay {
@@ -90,33 +117,33 @@ func (rs *RelayState) CopyAllRelayFor() []*Relay {
return ret return ret
} }
func (rs *RelayState) GetRelayForByIp(ip iputil.VpnIp) (*Relay, bool) { func (rs *RelayState) GetRelayForByIp(ip netip.Addr) (*Relay, bool) {
rs.RLock() rs.RLock()
defer rs.RUnlock() defer rs.RUnlock()
r, ok := rs.relayForByIp[ip] r, ok := rs.relayForByIp[ip]
return r, ok return r, ok
} }
func (rs *RelayState) InsertRelayTo(ip iputil.VpnIp) { func (rs *RelayState) InsertRelayTo(ip netip.Addr) {
rs.Lock() rs.Lock()
defer rs.Unlock() defer rs.Unlock()
rs.relays[ip] = struct{}{} if !slices.Contains(rs.relays, ip) {
rs.relays = append(rs.relays, ip)
}
} }
func (rs *RelayState) CopyRelayIps() []iputil.VpnIp { func (rs *RelayState) CopyRelayIps() []netip.Addr {
ret := make([]netip.Addr, len(rs.relays))
rs.RLock() rs.RLock()
defer rs.RUnlock() defer rs.RUnlock()
ret := make([]iputil.VpnIp, 0, len(rs.relays)) copy(ret, rs.relays)
for ip := range rs.relays {
ret = append(ret, ip)
}
return ret return ret
} }
func (rs *RelayState) CopyRelayForIps() []iputil.VpnIp { func (rs *RelayState) CopyRelayForIps() []netip.Addr {
rs.RLock() rs.RLock()
defer rs.RUnlock() defer rs.RUnlock()
currentRelays := make([]iputil.VpnIp, 0, len(rs.relayForByIp)) currentRelays := make([]netip.Addr, 0, len(rs.relayForByIp))
for relayIp := range rs.relayForByIp { for relayIp := range rs.relayForByIp {
currentRelays = append(currentRelays, relayIp) currentRelays = append(currentRelays, relayIp)
} }
@@ -133,19 +160,7 @@ func (rs *RelayState) CopyRelayForIdxs() []uint32 {
return ret return ret
} }
func (rs *RelayState) RemoveRelay(localIdx uint32) (iputil.VpnIp, bool) { func (rs *RelayState) CompleteRelayByIP(vpnIp netip.Addr, remoteIdx uint32) bool {
rs.Lock()
defer rs.Unlock()
r, ok := rs.relayForByIdx[localIdx]
if !ok {
return iputil.VpnIp(0), false
}
delete(rs.relayForByIdx, localIdx)
delete(rs.relayForByIp, r.PeerIp)
return r.PeerIp, true
}
func (rs *RelayState) CompleteRelayByIP(vpnIp iputil.VpnIp, remoteIdx uint32) bool {
rs.Lock() rs.Lock()
defer rs.Unlock() defer rs.Unlock()
r, ok := rs.relayForByIp[vpnIp] r, ok := rs.relayForByIp[vpnIp]
@@ -175,7 +190,7 @@ func (rs *RelayState) CompleteRelayByIdx(localIdx uint32, remoteIdx uint32) (*Re
return &newRelay, true return &newRelay, true
} }
func (rs *RelayState) QueryRelayForByIp(vpnIp iputil.VpnIp) (*Relay, bool) { func (rs *RelayState) QueryRelayForByIp(vpnIp netip.Addr) (*Relay, bool) {
rs.RLock() rs.RLock()
defer rs.RUnlock() defer rs.RUnlock()
r, ok := rs.relayForByIp[vpnIp] r, ok := rs.relayForByIp[vpnIp]
@@ -189,7 +204,7 @@ func (rs *RelayState) QueryRelayForByIdx(idx uint32) (*Relay, bool) {
return r, ok return r, ok
} }
func (rs *RelayState) InsertRelay(ip iputil.VpnIp, idx uint32, r *Relay) { func (rs *RelayState) InsertRelay(ip netip.Addr, idx uint32, r *Relay) {
rs.Lock() rs.Lock()
defer rs.Unlock() defer rs.Unlock()
rs.relayForByIp[ip] = r rs.relayForByIp[ip] = r
@@ -197,25 +212,23 @@ func (rs *RelayState) InsertRelay(ip iputil.VpnIp, idx uint32, r *Relay) {
} }
type HostInfo struct { type HostInfo struct {
sync.RWMutex remote netip.AddrPort
remotes *RemoteList
promoteCounter atomic.Uint32
ConnectionState *ConnectionState
remoteIndexId uint32
localIndexId uint32
vpnIp netip.Addr
remoteCidr *bart.Table[struct{}]
relayState RelayState
remote *udp.Addr // HandshakePacket records the packets used to create this hostinfo
remotes *RemoteList // We need these to avoid replayed handshake packets creating new hostinfos which causes churn
promoteCounter atomic.Uint32 HandshakePacket map[uint8][]byte
ConnectionState *ConnectionState
handshakeStart time.Time //todo: this an entry in the handshake manager // nextLHQuery is the earliest we can ask the lighthouse for new information.
HandshakeReady bool //todo: being in the manager means you are ready // This is used to limit lighthouse re-queries in chatty clients
HandshakeCounter int //todo: another handshake manager entry nextLHQuery atomic.Int64
HandshakeLastRemotes []*udp.Addr //todo: another handshake manager entry, which remotes we sent to last time
HandshakeComplete bool //todo: this should go away in favor of ConnectionState.ready
HandshakePacket map[uint8][]byte //todo: this is other handshake manager entry
packetStore []*cachedPacket //todo: this is other handshake manager entry
remoteIndexId uint32
localIndexId uint32
vpnIp iputil.VpnIp
recvError int
remoteCidr *cidr.Tree4
relayState RelayState
// lastRebindCount is the other side of Interface.rebindCount, if these values don't match then we need to ask LH // lastRebindCount is the other side of Interface.rebindCount, if these values don't match then we need to ask LH
// for a punch from the remote end of this tunnel. The goal being to prime their conntrack for our traffic just like // for a punch from the remote end of this tunnel. The goal being to prime their conntrack for our traffic just like
@@ -228,11 +241,19 @@ type HostInfo struct {
lastHandshakeTime uint64 lastHandshakeTime uint64
lastRoam time.Time lastRoam time.Time
lastRoamRemote *udp.Addr lastRoamRemote netip.AddrPort
// Used to track other hostinfos for this vpn ip since only 1 can be primary // Used to track other hostinfos for this vpn ip since only 1 can be primary
// Synchronised via hostmap lock and not the hostinfo lock. // Synchronised via hostmap lock and not the hostinfo lock.
next, prev *HostInfo next, prev *HostInfo
//TODO: in, out, and others might benefit from being an atomic.Int32. We could collapse connectionManager pendingDeletion, relayUsed, and in/out into this 1 thing
in, out, pendingDeletion atomic.Bool
// lastUsed tracks the last time ConnectionManager checked the tunnel and it was in use.
// This value will be behind against actual tunnel utilization in the hot path.
// This should only be used by the ConnectionManagers ticker routine.
lastUsed time.Time
} }
type ViaSender struct { type ViaSender struct {
@@ -255,26 +276,57 @@ type cachedPacketMetrics struct {
dropped metrics.Counter dropped metrics.Counter
} }
func NewHostMap(l *logrus.Logger, name string, vpnCIDR *net.IPNet, preferredRanges []*net.IPNet) *HostMap { func NewHostMapFromConfig(l *logrus.Logger, vpnCIDR netip.Prefix, c *config.C) *HostMap {
h := map[iputil.VpnIp]*HostInfo{} hm := newHostMap(l, vpnCIDR)
i := map[uint32]*HostInfo{}
r := map[uint32]*HostInfo{} hm.reload(c, true)
relays := map[uint32]*HostInfo{} c.RegisterReloadCallback(func(c *config.C) {
m := HostMap{ hm.reload(c, false)
name: name, })
Indexes: i,
Relays: relays, l.WithField("network", hm.vpnCIDR.String()).
RemoteIndexes: r, WithField("preferredRanges", hm.GetPreferredRanges()).
Hosts: h, Info("Main HostMap created")
preferredRanges: preferredRanges,
vpnCIDR: vpnCIDR, return hm
l: l,
}
return &m
} }
// UpdateStats takes a name and reports host and index counts to the stats collection system func newHostMap(l *logrus.Logger, vpnCIDR netip.Prefix) *HostMap {
func (hm *HostMap) EmitStats(name string) { return &HostMap{
Indexes: map[uint32]*HostInfo{},
Relays: map[uint32]*HostInfo{},
RemoteIndexes: map[uint32]*HostInfo{},
Hosts: map[netip.Addr]*HostInfo{},
vpnCIDR: vpnCIDR,
l: l,
}
}
func (hm *HostMap) reload(c *config.C, initial bool) {
if initial || c.HasChanged("preferred_ranges") {
var preferredRanges []netip.Prefix
rawPreferredRanges := c.GetStringSlice("preferred_ranges", []string{})
for _, rawPreferredRange := range rawPreferredRanges {
preferredRange, err := netip.ParsePrefix(rawPreferredRange)
if err != nil {
hm.l.WithError(err).WithField("range", rawPreferredRanges).Warn("Failed to parse preferred ranges, ignoring")
continue
}
preferredRanges = append(preferredRanges, preferredRange)
}
oldRanges := hm.preferredRanges.Swap(&preferredRanges)
if !initial {
hm.l.WithField("oldPreferredRanges", *oldRanges).WithField("newPreferredRanges", preferredRanges).Info("preferred_ranges changed")
}
}
}
// EmitStats reports host, index, and relay counts to the stats collection system
func (hm *HostMap) EmitStats() {
hm.RLock() hm.RLock()
hostLen := len(hm.Hosts) hostLen := len(hm.Hosts)
indexLen := len(hm.Indexes) indexLen := len(hm.Indexes)
@@ -282,10 +334,10 @@ func (hm *HostMap) EmitStats(name string) {
relaysLen := len(hm.Relays) relaysLen := len(hm.Relays)
hm.RUnlock() hm.RUnlock()
metrics.GetOrRegisterGauge("hostmap."+name+".hosts", nil).Update(int64(hostLen)) metrics.GetOrRegisterGauge("hostmap.main.hosts", nil).Update(int64(hostLen))
metrics.GetOrRegisterGauge("hostmap."+name+".indexes", nil).Update(int64(indexLen)) metrics.GetOrRegisterGauge("hostmap.main.indexes", nil).Update(int64(indexLen))
metrics.GetOrRegisterGauge("hostmap."+name+".remoteIndexes", nil).Update(int64(remoteIndexLen)) metrics.GetOrRegisterGauge("hostmap.main.remoteIndexes", nil).Update(int64(remoteIndexLen))
metrics.GetOrRegisterGauge("hostmap."+name+".relayIndexes", nil).Update(int64(relaysLen)) metrics.GetOrRegisterGauge("hostmap.main.relayIndexes", nil).Update(int64(relaysLen))
} }
func (hm *HostMap) RemoveRelay(localIdx uint32) { func (hm *HostMap) RemoveRelay(localIdx uint32) {
@@ -299,88 +351,6 @@ func (hm *HostMap) RemoveRelay(localIdx uint32) {
hm.Unlock() hm.Unlock()
} }
func (hm *HostMap) GetIndexByVpnIp(vpnIp iputil.VpnIp) (uint32, error) {
hm.RLock()
if i, ok := hm.Hosts[vpnIp]; ok {
index := i.localIndexId
hm.RUnlock()
return index, nil
}
hm.RUnlock()
return 0, errors.New("vpn IP not found")
}
func (hm *HostMap) Add(ip iputil.VpnIp, hostinfo *HostInfo) {
hm.Lock()
hm.Hosts[ip] = hostinfo
hm.Unlock()
}
func (hm *HostMap) AddVpnIp(vpnIp iputil.VpnIp, init func(hostinfo *HostInfo)) (hostinfo *HostInfo, created bool) {
hm.RLock()
if h, ok := hm.Hosts[vpnIp]; !ok {
hm.RUnlock()
h = &HostInfo{
vpnIp: vpnIp,
HandshakePacket: make(map[uint8][]byte, 0),
relayState: RelayState{
relays: map[iputil.VpnIp]struct{}{},
relayForByIp: map[iputil.VpnIp]*Relay{},
relayForByIdx: map[uint32]*Relay{},
},
}
if init != nil {
init(h)
}
hm.Lock()
hm.Hosts[vpnIp] = h
hm.Unlock()
return h, true
} else {
hm.RUnlock()
return h, false
}
}
// Only used by pendingHostMap when the remote index is not initially known
func (hm *HostMap) addRemoteIndexHostInfo(index uint32, h *HostInfo) {
hm.Lock()
h.remoteIndexId = index
hm.RemoteIndexes[index] = h
hm.Unlock()
if hm.l.Level > logrus.DebugLevel {
hm.l.WithField("hostMap", m{"mapName": hm.name, "indexNumber": index, "mapTotalSize": len(hm.Indexes),
"hostinfo": m{"existing": true, "localIndexId": h.localIndexId, "hostId": h.vpnIp}}).
Debug("Hostmap remoteIndex added")
}
}
// DeleteReverseIndex is used to clean up on recv_error
// This function should only ever be called on the pending hostmap
func (hm *HostMap) DeleteReverseIndex(index uint32) {
hm.Lock()
hostinfo, ok := hm.RemoteIndexes[index]
if ok {
delete(hm.Indexes, hostinfo.localIndexId)
delete(hm.RemoteIndexes, index)
// Check if we have an entry under hostId that matches the same hostinfo
// instance. Clean it up as well if we do (they might not match in pendingHostmap)
var hostinfo2 *HostInfo
hostinfo2, ok = hm.Hosts[hostinfo.vpnIp]
if ok && hostinfo2 == hostinfo {
delete(hm.Hosts, hostinfo.vpnIp)
}
}
hm.Unlock()
if hm.l.Level >= logrus.DebugLevel {
hm.l.WithField("hostMap", m{"mapName": hm.name, "indexNumber": index, "mapTotalSize": len(hm.Indexes)}).
Debug("Hostmap remote index deleted")
}
}
// DeleteHostInfo will fully unlink the hostinfo and return true if it was the final hostinfo for this vpn ip // DeleteHostInfo will fully unlink the hostinfo and return true if it was the final hostinfo for this vpn ip
func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) bool { func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) bool {
// Delete the host itself, ensuring it's not modified anymore // Delete the host itself, ensuring it's not modified anymore
@@ -393,12 +363,6 @@ func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) bool {
return final return final
} }
func (hm *HostMap) DeleteRelayIdx(localIdx uint32) {
hm.Lock()
defer hm.Unlock()
delete(hm.RemoteIndexes, localIdx)
}
func (hm *HostMap) MakePrimary(hostinfo *HostInfo) { func (hm *HostMap) MakePrimary(hostinfo *HostInfo) {
hm.Lock() hm.Lock()
defer hm.Unlock() defer hm.Unlock()
@@ -432,11 +396,12 @@ func (hm *HostMap) unlockedMakePrimary(hostinfo *HostInfo) {
func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) { func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
primary, ok := hm.Hosts[hostinfo.vpnIp] primary, ok := hm.Hosts[hostinfo.vpnIp]
isLastHostinfo := hostinfo.next == nil && hostinfo.prev == nil
if ok && primary == hostinfo { if ok && primary == hostinfo {
// The vpnIp pointer points to the same hostinfo as the local index id, we can remove it // The vpnIp pointer points to the same hostinfo as the local index id, we can remove it
delete(hm.Hosts, hostinfo.vpnIp) delete(hm.Hosts, hostinfo.vpnIp)
if len(hm.Hosts) == 0 { if len(hm.Hosts) == 0 {
hm.Hosts = map[iputil.VpnIp]*HostInfo{} hm.Hosts = map[netip.Addr]*HostInfo{}
} }
if hostinfo.next != nil { if hostinfo.next != nil {
@@ -476,69 +441,60 @@ func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
} }
if hm.l.Level >= logrus.DebugLevel { if hm.l.Level >= logrus.DebugLevel {
hm.l.WithField("hostMap", m{"mapName": hm.name, "mapTotalSize": len(hm.Hosts), hm.l.WithField("hostMap", m{"mapTotalSize": len(hm.Hosts),
"vpnIp": hostinfo.vpnIp, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}). "vpnIp": hostinfo.vpnIp, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}).
Debug("Hostmap hostInfo deleted") Debug("Hostmap hostInfo deleted")
} }
if isLastHostinfo {
// I have lost connectivity to my peers. My relay tunnel is likely broken. Mark the next
// hops as 'Disestablished' so that new relay tunnels are created in the future.
hm.unlockedDisestablishVpnAddrRelayFor(hostinfo)
}
// Clean up any local relay indexes for which I am acting as a relay hop
for _, localRelayIdx := range hostinfo.relayState.CopyRelayForIdxs() { for _, localRelayIdx := range hostinfo.relayState.CopyRelayForIdxs() {
delete(hm.Relays, localRelayIdx) delete(hm.Relays, localRelayIdx)
} }
} }
func (hm *HostMap) QueryIndex(index uint32) (*HostInfo, error) { func (hm *HostMap) QueryIndex(index uint32) *HostInfo {
//TODO: we probably just want to return bool instead of error, or at least a static error
hm.RLock() hm.RLock()
if h, ok := hm.Indexes[index]; ok { if h, ok := hm.Indexes[index]; ok {
hm.RUnlock() hm.RUnlock()
return h, nil return h
} else { } else {
hm.RUnlock() hm.RUnlock()
return nil, errors.New("unable to find index") return nil
} }
} }
// Retrieves a HostInfo by Index. Returns whether the HostInfo is primary at time of query. func (hm *HostMap) QueryRelayIndex(index uint32) *HostInfo {
// This helper exists so that the hostinfo.prev pointer can be read while the hostmap lock is held.
func (hm *HostMap) QueryIndexIsPrimary(index uint32) (*HostInfo, bool, error) {
//TODO: we probably just want to return bool instead of error, or at least a static error
hm.RLock()
if h, ok := hm.Indexes[index]; ok {
hm.RUnlock()
return h, h.prev == nil, nil
} else {
hm.RUnlock()
return nil, false, errors.New("unable to find index")
}
}
func (hm *HostMap) QueryRelayIndex(index uint32) (*HostInfo, error) {
//TODO: we probably just want to return bool instead of error, or at least a static error
hm.RLock() hm.RLock()
if h, ok := hm.Relays[index]; ok { if h, ok := hm.Relays[index]; ok {
hm.RUnlock() hm.RUnlock()
return h, nil return h
} else { } else {
hm.RUnlock() hm.RUnlock()
return nil, errors.New("unable to find index") return nil
} }
} }
func (hm *HostMap) QueryReverseIndex(index uint32) (*HostInfo, error) { func (hm *HostMap) QueryReverseIndex(index uint32) *HostInfo {
hm.RLock() hm.RLock()
if h, ok := hm.RemoteIndexes[index]; ok { if h, ok := hm.RemoteIndexes[index]; ok {
hm.RUnlock() hm.RUnlock()
return h, nil return h
} else { } else {
hm.RUnlock() hm.RUnlock()
return nil, fmt.Errorf("unable to find reverse index or connectionstate nil in %s hostmap", hm.name) return nil
} }
} }
func (hm *HostMap) QueryVpnIp(vpnIp iputil.VpnIp) (*HostInfo, error) { func (hm *HostMap) QueryVpnIp(vpnIp netip.Addr) *HostInfo {
return hm.queryVpnIp(vpnIp, nil) return hm.queryVpnIp(vpnIp, nil)
} }
func (hm *HostMap) QueryVpnIpRelayFor(targetIp, relayHostIp iputil.VpnIp) (*HostInfo, *Relay, error) { func (hm *HostMap) QueryVpnIpRelayFor(targetIp, relayHostIp netip.Addr) (*HostInfo, *Relay, error) {
hm.RLock() hm.RLock()
defer hm.RUnlock() defer hm.RUnlock()
@@ -556,26 +512,41 @@ func (hm *HostMap) QueryVpnIpRelayFor(targetIp, relayHostIp iputil.VpnIp) (*Host
return nil, nil, errors.New("unable to find host with relay") return nil, nil, errors.New("unable to find host with relay")
} }
// PromoteBestQueryVpnIp will attempt to lazily switch to the best remote every func (hm *HostMap) unlockedDisestablishVpnAddrRelayFor(hi *HostInfo) {
// `PromoteEvery` calls to this function for a given host. for _, relayHostIp := range hi.relayState.CopyRelayIps() {
func (hm *HostMap) PromoteBestQueryVpnIp(vpnIp iputil.VpnIp, ifce *Interface) (*HostInfo, error) { if h, ok := hm.Hosts[relayHostIp]; ok {
return hm.queryVpnIp(vpnIp, ifce) for h != nil {
h.relayState.UpdateRelayForByIpState(hi.vpnIp, Disestablished)
h = h.next
}
}
}
for _, rs := range hi.relayState.CopyAllRelayFor() {
if rs.Type == ForwardingType {
if h, ok := hm.Hosts[rs.PeerIp]; ok {
for h != nil {
h.relayState.UpdateRelayForByIpState(hi.vpnIp, Disestablished)
h = h.next
}
}
}
}
} }
func (hm *HostMap) queryVpnIp(vpnIp iputil.VpnIp, promoteIfce *Interface) (*HostInfo, error) { func (hm *HostMap) queryVpnIp(vpnIp netip.Addr, promoteIfce *Interface) *HostInfo {
hm.RLock() hm.RLock()
if h, ok := hm.Hosts[vpnIp]; ok { if h, ok := hm.Hosts[vpnIp]; ok {
hm.RUnlock() hm.RUnlock()
// Do not attempt promotion if you are a lighthouse // Do not attempt promotion if you are a lighthouse
if promoteIfce != nil && !promoteIfce.lightHouse.amLighthouse { if promoteIfce != nil && !promoteIfce.lightHouse.amLighthouse {
h.TryPromoteBest(hm.preferredRanges, promoteIfce) h.TryPromoteBest(hm.GetPreferredRanges(), promoteIfce)
} }
return h, nil return h
} }
hm.RUnlock() hm.RUnlock()
return nil, errors.New("unable to find host") return nil
} }
// unlockedAddHostInfo assumes you have a write-lock and will add a hostinfo object to the hostmap Indexes and RemoteIndexes maps. // unlockedAddHostInfo assumes you have a write-lock and will add a hostinfo object to the hostmap Indexes and RemoteIndexes maps.
@@ -598,7 +569,7 @@ func (hm *HostMap) unlockedAddHostInfo(hostinfo *HostInfo, f *Interface) {
hm.RemoteIndexes[hostinfo.remoteIndexId] = hostinfo hm.RemoteIndexes[hostinfo.remoteIndexId] = hostinfo
if hm.l.Level >= logrus.DebugLevel { if hm.l.Level >= logrus.DebugLevel {
hm.l.WithField("hostMap", m{"mapName": hm.name, "vpnIp": hostinfo.vpnIp, "mapTotalSize": len(hm.Hosts), hm.l.WithField("hostMap", m{"vpnIp": hostinfo.vpnIp, "mapTotalSize": len(hm.Hosts),
"hostinfo": m{"existing": true, "localIndexId": hostinfo.localIndexId, "hostId": hostinfo.vpnIp}}). "hostinfo": m{"existing": true, "localIndexId": hostinfo.localIndexId, "hostId": hostinfo.vpnIp}}).
Debug("Hostmap vpnIp added") Debug("Hostmap vpnIp added")
} }
@@ -614,19 +585,39 @@ func (hm *HostMap) unlockedAddHostInfo(hostinfo *HostInfo, f *Interface) {
} }
} }
func (hm *HostMap) GetPreferredRanges() []netip.Prefix {
//NOTE: if preferredRanges is ever not stored before a load this will fail to dereference a nil pointer
return *hm.preferredRanges.Load()
}
func (hm *HostMap) ForEachVpnIp(f controlEach) {
hm.RLock()
defer hm.RUnlock()
for _, v := range hm.Hosts {
f(v)
}
}
func (hm *HostMap) ForEachIndex(f controlEach) {
hm.RLock()
defer hm.RUnlock()
for _, v := range hm.Indexes {
f(v)
}
}
// TryPromoteBest handles re-querying lighthouses and probing for better paths // TryPromoteBest handles re-querying lighthouses and probing for better paths
// NOTE: It is an error to call this if you are a lighthouse since they should not roam clients! // NOTE: It is an error to call this if you are a lighthouse since they should not roam clients!
func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface) { func (i *HostInfo) TryPromoteBest(preferredRanges []netip.Prefix, ifce *Interface) {
c := i.promoteCounter.Add(1) c := i.promoteCounter.Add(1)
if c%PromoteEvery == 0 { if c%ifce.tryPromoteEvery.Load() == 0 {
// The lock here is currently protecting i.remote access
i.RLock()
remote := i.remote remote := i.remote
i.RUnlock()
// return early if we are already on a preferred remote // return early if we are already on a preferred remote
if remote != nil { if remote.IsValid() {
rIP := remote.IP rIP := remote.Addr()
for _, l := range preferredRanges { for _, l := range preferredRanges {
if l.Contains(rIP) { if l.Contains(rIP) {
return return
@@ -634,8 +625,8 @@ func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface)
} }
} }
i.remotes.ForEach(preferredRanges, func(addr *udp.Addr, preferred bool) { i.remotes.ForEach(preferredRanges, func(addr netip.AddrPort, preferred bool) {
if remote != nil && (addr == nil || !preferred) { if remote.IsValid() && (!addr.IsValid() || !preferred) {
return return
} }
@@ -646,65 +637,17 @@ func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface)
} }
// Re query our lighthouses for new remotes occasionally // Re query our lighthouses for new remotes occasionally
if c%ReQueryEvery == 0 && ifce.lightHouse != nil { if c%ifce.reQueryEvery.Load() == 0 && ifce.lightHouse != nil {
ifce.lightHouse.QueryServer(i.vpnIp, ifce) now := time.Now().UnixNano()
} if now < i.nextLHQuery.Load() {
} return
func (i *HostInfo) cachePacket(l *logrus.Logger, t header.MessageType, st header.MessageSubType, packet []byte, f packetCallback, m *cachedPacketMetrics) {
//TODO: return the error so we can log with more context
if len(i.packetStore) < 100 {
tempPacket := make([]byte, len(packet))
copy(tempPacket, packet)
//l.WithField("trace", string(debug.Stack())).Error("Caching packet", tempPacket)
i.packetStore = append(i.packetStore, &cachedPacket{t, st, f, tempPacket})
if l.Level >= logrus.DebugLevel {
i.logger(l).
WithField("length", len(i.packetStore)).
WithField("stored", true).
Debugf("Packet store")
} }
} else if l.Level >= logrus.DebugLevel { i.nextLHQuery.Store(now + ifce.reQueryWait.Load())
m.dropped.Inc(1) ifce.lightHouse.QueryServer(i.vpnIp)
i.logger(l).
WithField("length", len(i.packetStore)).
WithField("stored", false).
Debugf("Packet store")
} }
} }
// handshakeComplete will set the connection as ready to communicate, as well as flush any stored packets
func (i *HostInfo) handshakeComplete(l *logrus.Logger, m *cachedPacketMetrics) {
//TODO: I'm not certain the distinction between handshake complete and ConnectionState being ready matters because:
//TODO: HandshakeComplete means send stored packets and ConnectionState.ready means we are ready to send
//TODO: if the transition from HandhsakeComplete to ConnectionState.ready happens all within this function they are identical
i.ConnectionState.queueLock.Lock()
i.HandshakeComplete = true
//TODO: this should be managed by the handshake state machine to set it based on how many handshake were seen.
// Clamping it to 2 gets us out of the woods for now
i.ConnectionState.messageCounter.Store(2)
if l.Level >= logrus.DebugLevel {
i.logger(l).Debugf("Sending %d stored packets", len(i.packetStore))
}
if len(i.packetStore) > 0 {
nb := make([]byte, 12, 12)
out := make([]byte, mtu)
for _, cp := range i.packetStore {
cp.callback(cp.messageType, cp.messageSubType, i, cp.packet, nb, out)
}
m.sent.Inc(int64(len(i.packetStore)))
}
i.remotes.ResetBlockedRemotes()
i.packetStore = make([]*cachedPacket, 0)
i.ConnectionState.ready = true
i.ConnectionState.queueLock.Unlock()
}
func (i *HostInfo) GetCert() *cert.NebulaCertificate { func (i *HostInfo) GetCert() *cert.NebulaCertificate {
if i.ConnectionState != nil { if i.ConnectionState != nil {
return i.ConnectionState.peerCert return i.ConnectionState.peerCert
@@ -712,23 +655,23 @@ func (i *HostInfo) GetCert() *cert.NebulaCertificate {
return nil return nil
} }
func (i *HostInfo) SetRemote(remote *udp.Addr) { func (i *HostInfo) SetRemote(remote netip.AddrPort) {
// We copy here because we likely got this remote from a source that reuses the object // We copy here because we likely got this remote from a source that reuses the object
if !i.remote.Equals(remote) { if i.remote != remote {
i.remote = remote.Copy() i.remote = remote
i.remotes.LearnRemote(i.vpnIp, remote.Copy()) i.remotes.LearnRemote(i.vpnIp, remote)
} }
} }
// SetRemoteIfPreferred returns true if the remote was changed. The lastRoam // SetRemoteIfPreferred returns true if the remote was changed. The lastRoam
// time on the HostInfo will also be updated. // time on the HostInfo will also be updated.
func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote *udp.Addr) bool { func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote netip.AddrPort) bool {
if newRemote == nil { if !newRemote.IsValid() {
// relays have nil udp Addrs // relays have nil udp Addrs
return false return false
} }
currentRemote := i.remote currentRemote := i.remote
if currentRemote == nil { if !currentRemote.IsValid() {
i.SetRemote(newRemote) i.SetRemote(newRemote)
return true return true
} }
@@ -736,13 +679,13 @@ func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote *udp.Addr) bool {
// NOTE: We do this loop here instead of calling `isPreferred` in // NOTE: We do this loop here instead of calling `isPreferred` in
// remote_list.go so that we only have to loop over preferredRanges once. // remote_list.go so that we only have to loop over preferredRanges once.
newIsPreferred := false newIsPreferred := false
for _, l := range hm.preferredRanges { for _, l := range hm.GetPreferredRanges() {
// return early if we are already on a preferred remote // return early if we are already on a preferred remote
if l.Contains(currentRemote.IP) { if l.Contains(currentRemote.Addr()) {
return false return false
} }
if l.Contains(newRemote.IP) { if l.Contains(newRemote.Addr()) {
newIsPreferred = true newIsPreferred = true
} }
} }
@@ -750,7 +693,7 @@ func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote *udp.Addr) bool {
if newIsPreferred { if newIsPreferred {
// Consider this a roaming event // Consider this a roaming event
i.lastRoam = time.Now() i.lastRoam = time.Now()
i.lastRoamRemote = currentRemote.Copy() i.lastRoamRemote = currentRemote
i.SetRemote(newRemote) i.SetRemote(newRemote)
@@ -760,27 +703,26 @@ func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote *udp.Addr) bool {
return false return false
} }
func (i *HostInfo) RecvErrorExceeded() bool {
if i.recvError < 3 {
i.recvError += 1
return false
}
return true
}
func (i *HostInfo) CreateRemoteCIDR(c *cert.NebulaCertificate) { func (i *HostInfo) CreateRemoteCIDR(c *cert.NebulaCertificate) {
if len(c.Details.Ips) == 1 && len(c.Details.Subnets) == 0 { if len(c.Details.Ips) == 1 && len(c.Details.Subnets) == 0 {
// Simple case, no CIDRTree needed // Simple case, no CIDRTree needed
return return
} }
remoteCidr := cidr.NewTree4() remoteCidr := new(bart.Table[struct{}])
for _, ip := range c.Details.Ips { for _, ip := range c.Details.Ips {
remoteCidr.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{}) //TODO: IPV6-WORK what to do when ip is invalid?
nip, _ := netip.AddrFromSlice(ip.IP)
nip = nip.Unmap()
remoteCidr.Insert(netip.PrefixFrom(nip, nip.BitLen()), struct{}{})
} }
for _, n := range c.Details.Subnets { for _, n := range c.Details.Subnets {
remoteCidr.AddCIDR(n, struct{}{}) //TODO: IPV6-WORK what to do when ip is invalid?
nip, _ := netip.AddrFromSlice(n.IP)
nip = nip.Unmap()
bits, _ := n.Mask.Size()
remoteCidr.Insert(netip.PrefixFrom(nip, bits), struct{}{})
} }
i.remoteCidr = remoteCidr i.remoteCidr = remoteCidr
} }
@@ -805,9 +747,9 @@ func (i *HostInfo) logger(l *logrus.Logger) *logrus.Entry {
// Utility functions // Utility functions
func localIps(l *logrus.Logger, allowList *LocalAllowList) *[]net.IP { func localIps(l *logrus.Logger, allowList *LocalAllowList) []netip.Addr {
//FIXME: This function is pretty garbage //FIXME: This function is pretty garbage
var ips []net.IP var ips []netip.Addr
ifaces, _ := net.Interfaces() ifaces, _ := net.Interfaces()
for _, i := range ifaces { for _, i := range ifaces {
allow := allowList.AllowName(i.Name) allow := allowList.AllowName(i.Name)
@@ -829,20 +771,29 @@ func localIps(l *logrus.Logger, allowList *LocalAllowList) *[]net.IP {
ip = v.IP ip = v.IP
} }
nip, ok := netip.AddrFromSlice(ip)
if !ok {
if l.Level >= logrus.DebugLevel {
l.WithField("localIp", ip).Debug("ip was invalid for netip")
}
continue
}
nip = nip.Unmap()
//TODO: Filtering out link local for now, this is probably the most correct thing //TODO: Filtering out link local for now, this is probably the most correct thing
//TODO: Would be nice to filter out SLAAC MAC based ips as well //TODO: Would be nice to filter out SLAAC MAC based ips as well
if ip.IsLoopback() == false && !ip.IsLinkLocalUnicast() { if nip.IsLoopback() == false && nip.IsLinkLocalUnicast() == false {
allow := allowList.Allow(ip) allow := allowList.Allow(nip)
if l.Level >= logrus.TraceLevel { if l.Level >= logrus.TraceLevel {
l.WithField("localIp", ip).WithField("allow", allow).Trace("localAllowList.Allow") l.WithField("localIp", nip).WithField("allow", allow).Trace("localAllowList.Allow")
} }
if !allow { if !allow {
continue continue
} }
ips = append(ips, ip) ips = append(ips, nip)
} }
} }
} }
return &ips return ips
} }

Some files were not shown because too many files have changed in this diff Show More