Compare commits

...

72 Commits

Author SHA1 Message Date
Wade Simmons
e5945a60aa v1.8.1 (#1049)
Update CHANGELOG for Nebula v1.8.1
2023-12-19 15:11:25 -05:00
Nate Brown
072edd56b3 Fix re-entrant GetOrHandshake issues (#1044) 2023-12-19 11:58:31 -06:00
dependabot[bot]
beb5f6bddc Bump golang.org/x/crypto from 0.16.0 to 0.17.0 (#1048)
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.16.0 to 0.17.0.
- [Commits](https://github.com/golang/crypto/compare/v0.16.0...v0.17.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-19 10:57:09 -05:00
dependabot[bot]
8be9792059 Bump actions/setup-go from 4 to 5 (#1039)
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 4 to 5.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](https://github.com/actions/setup-go/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/setup-go
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-13 22:45:09 -06:00
John Maguire
af2fc48378 Fix mobile builds (#1035) 2023-12-06 16:18:21 -05:00
Wade Simmons
1d2f95e718 v1.8.0 (#1017)
Update CHANGELOG for Nebula v1.8.0
2023-12-06 14:38:58 -05:00
Lars Lehtonen
3a8743d511 cmd/nebula-cert: fix clobbered error (#1032)
* cmd/nebula-cert: fix clobbered error

Signed-off-by: Lars Lehtonen <lars.lehtonen@gmail.com>

* apply suggestions from Nate

This makes it much clearer what is happening in the code

---------

Signed-off-by: Lars Lehtonen <lars.lehtonen@gmail.com>
Co-authored-by: Wade Simmons <wsimmons@slack-corp.com>
2023-12-06 13:20:49 -05:00
Dave Russell
0209402942 SIGHUP is only useful when config was loaded from a file (#1030)
Have (*config.C).CatchHUP() return early when there is no file
path available from which to reload.
This will allow wrapping service to manage their own signal
trapping (which is particularly important if they've used
config from a string.
2023-12-06 10:13:38 -05:00
dependabot[bot]
fb55f5b762 Bump the golang-x-dependencies group with 3 updates (#1028)
Bumps the golang-x-dependencies group with 3 updates: [golang.org/x/crypto](https://github.com/golang/crypto), [golang.org/x/net](https://github.com/golang/net) and [golang.org/x/sync](https://github.com/golang/sync).


Updates `golang.org/x/crypto` from 0.14.0 to 0.16.0
- [Commits](https://github.com/golang/crypto/compare/v0.14.0...v0.16.0)

Updates `golang.org/x/net` from 0.17.0 to 0.19.0
- [Commits](https://github.com/golang/net/compare/v0.17.0...v0.19.0)

Updates `golang.org/x/sync` from 0.3.0 to 0.5.0
- [Commits](https://github.com/golang/sync/compare/v0.3.0...v0.5.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/net
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
- dependency-name: golang.org/x/sync
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-12-04 11:12:52 -05:00
Ben Ritcey
01cddb8013 Added firewall.rules.hash metric (#1010)
* Added firewall.rules.hash metric

Added a FNV-1 hash of the firewall rules as a Prometheus value.

* Switch FNV has to int64, include both hashes in log messages

* Use a uint32 for the FNV hash

Let go-metrics cast the uint32 to a int64, so it won't be lossy
when it eventually emits a float64 Prometheus metric.
2023-11-28 11:56:47 -05:00
Tristan Rice
1083279a45 add gvisor based service library (#965)
* add service/ library
2023-11-21 11:50:18 -05:00
Wade Simmons
fe16ea566d firewall reject packets: cleanup error cases (#957) 2023-11-13 12:43:51 -06:00
Nate Brown
3356e03d85 Default pki.disconnect_invalid to true and make it reloadable (#859) 2023-11-13 12:39:38 -06:00
dependabot[bot]
f41db52560 Bump the golang-x-dependencies group with 1 update (#1006)
Bumps the golang-x-dependencies group with 1 update: [golang.org/x/sys](https://github.com/golang/sys).

- [Commits](https://github.com/golang/sys/compare/v0.13.0...v0.14.0)

---
updated-dependencies:
- dependency-name: golang.org/x/sys
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: golang-x-dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-11-13 07:58:45 -08:00
Nate Brown
5181cb0474 Use generics for CIDRTrees to avoid casting issues (#1004) 2023-11-02 17:05:08 -05:00
Nate Brown
a44e1b8b05 Clean up a hostinfo to reduce memory usage (#955) 2023-11-02 16:53:59 -05:00
guangwu
276978377a chore: remove refs to deprecated io/ioutil (#987)
Signed-off-by: guoguangwu <guoguangwu@magic-shield.com>
2023-10-31 10:35:13 -04:00
dependabot[bot]
777eb96aea Bump github.com/prometheus/client_golang from 1.16.0 to 1.17.0 (#984)
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.16.0 to 1.17.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.16.0...v1.17.0)

---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-31 10:33:04 -04:00
Wade Simmons
0912ef14f4 github actions smoke-test: run with data race detector (#988)
Run the github actions smoke tests with data race detector enabled, so
we can detect if a PR introduces a simple data race.
2023-10-31 10:32:39 -04:00
Lars Lehtonen
77a8ce1712 main: fix dropped error (#1002)
This isn't an actual issue because the current implementation of NewSSHServer never returns an error (https://github.com/slackhq/nebula/blob/v1.7.2/sshd/server.go#L56), but still good to fix so no surprises happen in the future.
2023-10-31 10:32:08 -04:00
John Maguire
87b628ba24 Fix truncated comment in config.yml (#999) 2023-10-27 08:39:34 -04:00
Nate Brown
50d6a1e8ca QueryServer needs to be done outside of the lock (#996) 2023-10-17 15:43:51 -05:00
dependabot[bot]
e78fe0b9ef Bump golang.org/x/net from 0.15.0 to 0.17.0 (#990)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.15.0 to 0.17.0.
- [Commits](https://github.com/golang/net/compare/v0.15.0...v0.17.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-10-16 13:28:59 -04:00
Nate Brown
5fccbb8676 Retry wintun creation (#985) 2023-10-16 10:06:43 -05:00
dependabot[bot]
c289c7a7ca Bump github.com/miekg/dns from 1.1.55 to 1.1.56 (#979)
Bumps [github.com/miekg/dns](https://github.com/miekg/dns) from 1.1.55 to 1.1.56.
- [Changelog](https://github.com/miekg/dns/blob/master/Makefile.release)
- [Commits](https://github.com/miekg/dns/compare/v1.1.55...v1.1.56)

---
updated-dependencies:
- dependency-name: github.com/miekg/dns
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-22 09:48:26 -04:00
dependabot[bot]
e3fbfbfd4d Bump golang.org/x/net from 0.14.0 to 0.15.0 (#977)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.14.0 to 0.15.0.
- [Commits](https://github.com/golang/net/compare/v0.14.0...v0.15.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-22 09:47:45 -04:00
dependabot[bot]
282ca4368e Bump golang.org/x/crypto from 0.12.0 to 0.13.0 (#976)
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.12.0 to 0.13.0.
- [Commits](https://github.com/golang/crypto/compare/v0.12.0...v0.13.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-22 09:47:00 -04:00
Wade Simmons
280fa026ea smoke-test: don't assume docker needs sudo (#958)
Let the host deal with this detail if necessary
2023-09-07 13:57:41 -04:00
Lars Lehtonen
dbdb48f182 cert: fix dropped errors (#961) 2023-09-07 13:54:01 -04:00
Nate Brown
f7e392995a Fix rebind to not put the socket in blocking mode (#972) 2023-09-07 11:56:09 -05:00
dependabot[bot]
d271df8da8 Bump golang.org/x/term from 0.11.0 to 0.12.0 (#967)
Bumps [golang.org/x/term](https://github.com/golang/term) from 0.11.0 to 0.12.0.
- [Commits](https://github.com/golang/term/compare/v0.11.0...v0.12.0)

---
updated-dependencies:
- dependency-name: golang.org/x/term
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-05 12:47:55 -04:00
dependabot[bot]
eea5e6a5df Bump actions/checkout from 3 to 4 (#969)
Bumps [actions/checkout](https://github.com/actions/checkout) from 3 to 4.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v3...v4)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-05 11:43:56 -04:00
dependabot[bot]
790268a176 Bump golang.org/x/sys from 0.11.0 to 0.12.0 (#968)
Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.11.0 to 0.12.0.
- [Commits](https://github.com/golang/sys/compare/v0.11.0...v0.12.0)

---
updated-dependencies:
- dependency-name: golang.org/x/sys
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-09-05 11:42:08 -04:00
brad-defined
06b480e177 Fix relay migration (#964)
* Fix for relay migration on rehandshaking issue. On rehandshake, the relay tunnel doesn't migrate to the new hostinfo object correctly, due to an incorrect Nebula IP sent in the CreateRelayRequest message.
* Add a test for this case

---------

Co-authored-by: Nate Brown <nbrown.us@gmail.com>
2023-09-05 09:29:27 -04:00
Nate Brown
076ebc6c6e Simplify getting a hostinfo or starting a handshake with one (#954) 2023-08-21 18:51:45 -05:00
Nate Brown
7edcf620c0 We only need the certificate in ConnectionState (#953) 2023-08-21 14:11:06 -05:00
Nate Brown
5a131b2975 Combine ca, cert, and key handling (#952) 2023-08-14 21:32:40 -05:00
Nate Brown
223cc6e660 Limit how often a busy tunnel can requery the lighthouse (#940)
Co-authored-by: Wade Simmons <wadey@slack-corp.com>
2023-08-08 13:26:41 -05:00
Wade Simmons
5671c6607c dependabot: group together common deps (#950)
Group together deps that are often updated together.

- https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file#groups
2023-08-08 13:15:42 -04:00
dependabot[bot]
7ecafbe61d Bump golang.org/x/net from 0.13.0 to 0.14.0 (#947)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.13.0 to 0.14.0.
- [Commits](https://github.com/golang/net/compare/v0.13.0...v0.14.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-08 10:04:46 -05:00
dependabot[bot]
546eb3bfbc Bump golang.org/x/crypto from 0.11.0 to 0.12.0 (#949)
Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.11.0 to 0.12.0.
- [Commits](https://github.com/golang/crypto/compare/v0.11.0...v0.12.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-07 21:28:06 -05:00
dependabot[bot]
7364d99e34 Bump golang.org/x/term from 0.10.0 to 0.11.0 (#946)
Bumps [golang.org/x/term](https://github.com/golang/term) from 0.10.0 to 0.11.0.
- [Commits](https://github.com/golang/term/compare/v0.10.0...v0.11.0)

---
updated-dependencies:
- dependency-name: golang.org/x/term
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-07 21:07:30 -05:00
dependabot[bot]
83b6dc7b16 Bump golang.org/x/net from 0.12.0 to 0.13.0 (#943)
Bumps [golang.org/x/net](https://github.com/golang/net) from 0.12.0 to 0.13.0.
- [Commits](https://github.com/golang/net/compare/v0.12.0...v0.13.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-08-02 14:28:32 -04:00
Wade Simmons
3d0da7c859 update mergo to 1.0.0 (#941)
The mergo package has moved to a vanity URL. This causes fun issues with
dependabot. Update to the new release:

- https://github.com/darccio/mergo/releases/tag/v1.0.0
- https://github.com/darccio/mergo/compare/v0.3.15...v1.0.0
2023-08-02 14:00:20 -04:00
Caleb Jasik
ed00f5d530 Remove unused config code (last edited 4yrs ago) (#938) 2023-07-31 15:59:20 -05:00
dependabot[bot]
38e56a4858 Bump golang.org/x/net from 0.9.0 to 0.12.0 (#931) 2023-07-27 15:43:16 -05:00
dependabot[bot]
fce93ccb54 Bump google.golang.org/protobuf from 1.30.0 to 1.31.0 (#930) 2023-07-27 15:42:33 -05:00
dependabot[bot]
0d715effbc Bump Apple-Actions/import-codesign-certs from 1 to 2 (#923) 2023-07-27 15:31:36 -05:00
dependabot[bot]
0c003b64f1 Bump golang.org/x/term from 0.8.0 to 0.10.0 (#928) 2023-07-27 14:38:36 -05:00
Nate Brown
14d0106716 Send the lh update worker into its own routine instead of taking over the reload routine (#935) 2023-07-27 14:38:10 -05:00
dependabot[bot]
959b015b3b Bump github.com/sirupsen/logrus from 1.9.0 to 1.9.3 (#933) 2023-07-27 14:36:36 -05:00
Nate Brown
0bffa76b5e Build for openbsd (#812) 2023-07-27 14:27:35 -05:00
c0repwn3r
03e70210a5 Add support for NetBSD (#916) 2023-07-27 13:44:47 -05:00
Nate Brown
9c6592b159 Guard e2e udp and tun channels when closed (#934) 2023-07-26 12:52:14 -05:00
dependabot[bot]
e5af94e27a Bump github.com/prometheus/client_golang from 1.15.1 to 1.16.0 (#927)
Bumps [github.com/prometheus/client_golang](https://github.com/prometheus/client_golang) from 1.15.1 to 1.16.0.
- [Release notes](https://github.com/prometheus/client_golang/releases)
- [Changelog](https://github.com/prometheus/client_golang/blob/main/CHANGELOG.md)
- [Commits](https://github.com/prometheus/client_golang/compare/v1.15.1...v1.16.0)

---
updated-dependencies:
- dependency-name: github.com/prometheus/client_golang
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-24 13:56:09 -04:00
dependabot[bot]
96f51f78ea Bump golang.org/x/sys from 0.8.0 to 0.10.0 (#926)
Bumps [golang.org/x/sys](https://github.com/golang/sys) from 0.8.0 to 0.10.0.
- [Commits](https://github.com/golang/sys/compare/v0.8.0...v0.10.0)

---
updated-dependencies:
- dependency-name: golang.org/x/sys
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-24 13:53:39 -04:00
Nate Brown
a10baeee92 Pull hostmap and pending hostmap apart, remove unused functions (#843) 2023-07-24 12:37:52 -05:00
dependabot[bot]
52c9e360e7 Bump github.com/miekg/dns from 1.1.54 to 1.1.55 (#925)
Bumps [github.com/miekg/dns](https://github.com/miekg/dns) from 1.1.54 to 1.1.55.
- [Changelog](https://github.com/miekg/dns/blob/master/Makefile.release)
- [Commits](https://github.com/miekg/dns/compare/v1.1.54...v1.1.55)

---
updated-dependencies:
- dependency-name: github.com/miekg/dns
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-24 12:52:29 -04:00
dependabot[bot]
8caaff7109 Bump github.com/stretchr/testify from 1.8.2 to 1.8.4 (#924)
Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.8.2 to 1.8.4.
- [Release notes](https://github.com/stretchr/testify/releases)
- [Commits](https://github.com/stretchr/testify/compare/v1.8.2...v1.8.4)

---
updated-dependencies:
- dependency-name: github.com/stretchr/testify
  dependency-type: direct:production
  update-type: version-update:semver-patch
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2023-07-24 12:51:31 -04:00
Nate Brown
1e3c155896 Attempt to notify systemd of service readiness on linux (#929) 2023-07-24 11:30:18 -05:00
Wade Simmons
f5db03c834 add dependabot config (#922)
This should give us PRs weekly with dependency updates, and also let us
manually check for updates when needed.

- https://docs.github.com/en/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file
2023-07-21 17:21:58 -04:00
Nate Brown
c5ce945852 Update README to include a link to go install docs (#919) 2023-07-20 21:30:38 -05:00
John Maguire
7e380bde7e Document new DNS config options (#879) 2023-07-10 15:19:05 -04:00
Nate Brown
a3e59a38ef Use registered io on Windows when possible (#905) 2023-07-10 12:43:48 -05:00
John Maguire
8ba5d64dbc Add support for naming FreeBSD tun devices (#903) 2023-06-22 12:13:31 -04:00
Nate Brown
3bbf5f4e67 Use an interface for udp conns (#901) 2023-06-14 10:48:52 -05:00
Wade Simmons
928731acfe fix up the release workflow (#891)
actions/create-release is deprecated, just switch to using `gh` cli.
This is actually much easier anyways!
2023-06-14 11:45:01 -04:00
Nate Brown
57eb80e9fb v1.7.2 (#887)
Update CHANGELOG for Nebula v1.7.2
2023-06-01 11:05:07 -04:00
brad-defined
96f4dcaab8 Fix reconfig freeze attempting to send to an unbuffered, unread channel (#886)
* Fixes a reocnfig freeze where the reconfig attempts to send to an unbuffered channel with no readers.
Only create stop channel when a DNS goroutine is created, and only send when the channel exists.
Buffer to size 1 so that the stop message can be immediately sent even if the goroutine is busy doing DNS lookups.
2023-05-31 16:05:46 -04:00
Wade Simmons
6d8c5f437c GitHub actions update setup-go (#881)
This does caching for us, so we can remove our manual caching of modules
2023-05-23 13:24:33 -04:00
John Maguire
165b671e70 v1.7.1 (#878)
Update CHANGELOG for Nebula v1.7.1
2023-05-18 15:39:24 -04:00
brad-defined
6be0bad68a Fix static_host_map DNS lookup Linux issue - put v4 addr into v6 slice(#877) 2023-05-18 14:13:32 -04:00
108 changed files with 4136 additions and 2117 deletions

22
.github/dependabot.yml vendored Normal file
View File

@@ -0,0 +1,22 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "weekly"
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "weekly"
groups:
golang-x-dependencies:
patterns:
- "golang.org/x/*"
zx2c4-dependencies:
patterns:
- "golang.zx2c4.com/*"
protobuf-dependencies:
patterns:
- "github.com/golang/protobuf"
- "google.golang.org/protobuf"

View File

@@ -14,21 +14,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.20
uses: actions/setup-go@v2
with:
go-version: "1.20"
id: go
- uses: actions/checkout@v4
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- uses: actions/cache@v2
- uses: actions/setup-go@v5
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-gofmt1.20-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-gofmt1.20-
go-version-file: 'go.mod'
check-latest: true
- name: Install goimports
run: |

View File

@@ -7,25 +7,24 @@ name: Create release and upload binaries
jobs:
build-linux:
name: Build Linux All
name: Build Linux/BSD All
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.20
uses: actions/setup-go@v2
with:
go-version: "1.20"
- uses: actions/checkout@v4
- name: Checkout code
uses: actions/checkout@v2
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
check-latest: true
- name: Build
run: |
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" release-linux release-freebsd
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" release-linux release-freebsd release-openbsd release-netbsd
mkdir release
mv build/*.tar.gz release
- name: Upload artifacts
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: linux-latest
path: release
@@ -34,13 +33,12 @@ jobs:
name: Build Windows
runs-on: windows-latest
steps:
- name: Set up Go 1.20
uses: actions/setup-go@v2
with:
go-version: "1.20"
- uses: actions/checkout@v4
- name: Checkout code
uses: actions/checkout@v2
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
check-latest: true
- name: Build
run: |
@@ -57,7 +55,7 @@ jobs:
mv dist\windows\wintun build\dist\windows\
- name: Upload artifacts
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: windows-latest
path: build
@@ -68,17 +66,16 @@ jobs:
HAS_SIGNING_CREDS: ${{ secrets.AC_USERNAME != '' }}
runs-on: macos-11
steps:
- name: Set up Go 1.20
uses: actions/setup-go@v2
with:
go-version: "1.20"
- uses: actions/checkout@v4
- name: Checkout code
uses: actions/checkout@v2
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod'
check-latest: true
- name: Import certificates
if: env.HAS_SIGNING_CREDS == 'true'
uses: Apple-Actions/import-codesign-certs@v1
uses: Apple-Actions/import-codesign-certs@v2
with:
p12-file-base64: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_P12_BASE64 }}
p12-password: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_PASSWORD }}
@@ -107,7 +104,7 @@ jobs:
fi
- name: Upload artifacts
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: darwin-latest
path: ./release/*
@@ -117,12 +114,16 @@ jobs:
needs: [build-linux, build-darwin, build-windows]
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Download artifacts
uses: actions/download-artifact@v2
uses: actions/download-artifact@v3
with:
path: artifacts
- name: Zip Windows
run: |
cd windows-latest
cd artifacts/windows-latest
cp windows-amd64/* .
zip -r nebula-windows-amd64.zip nebula.exe nebula-cert.exe dist
cp windows-arm64/* .
@@ -130,6 +131,7 @@ jobs:
- name: Create sha256sum
run: |
cd artifacts
for dir in linux-latest darwin-latest windows-latest
do
(
@@ -159,195 +161,12 @@ jobs:
- name: Create Release
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ github.ref }}
release_name: Release ${{ github.ref }}
draft: false
prerelease: false
##
## Upload assets (I wish we could just upload the whole folder at once...
##
- name: Upload SHASUM256.txt
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./SHASUM256.txt
asset_name: SHASUM256.txt
asset_content_type: text/plain
- name: Upload darwin zip
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./darwin-latest/nebula-darwin.zip
asset_name: nebula-darwin.zip
asset_content_type: application/zip
- name: Upload windows-amd64
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./windows-latest/nebula-windows-amd64.zip
asset_name: nebula-windows-amd64.zip
asset_content_type: application/zip
- name: Upload windows-arm64
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./windows-latest/nebula-windows-arm64.zip
asset_name: nebula-windows-arm64.zip
asset_content_type: application/zip
- name: Upload linux-amd64
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-amd64.tar.gz
asset_name: nebula-linux-amd64.tar.gz
asset_content_type: application/gzip
- name: Upload linux-386
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-386.tar.gz
asset_name: nebula-linux-386.tar.gz
asset_content_type: application/gzip
- name: Upload linux-ppc64le
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-ppc64le.tar.gz
asset_name: nebula-linux-ppc64le.tar.gz
asset_content_type: application/gzip
- name: Upload linux-arm-5
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-arm-5.tar.gz
asset_name: nebula-linux-arm-5.tar.gz
asset_content_type: application/gzip
- name: Upload linux-arm-6
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-arm-6.tar.gz
asset_name: nebula-linux-arm-6.tar.gz
asset_content_type: application/gzip
- name: Upload linux-arm-7
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-arm-7.tar.gz
asset_name: nebula-linux-arm-7.tar.gz
asset_content_type: application/gzip
- name: Upload linux-arm64
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-arm64.tar.gz
asset_name: nebula-linux-arm64.tar.gz
asset_content_type: application/gzip
- name: Upload linux-mips
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-mips.tar.gz
asset_name: nebula-linux-mips.tar.gz
asset_content_type: application/gzip
- name: Upload linux-mipsle
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-mipsle.tar.gz
asset_name: nebula-linux-mipsle.tar.gz
asset_content_type: application/gzip
- name: Upload linux-mips64
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-mips64.tar.gz
asset_name: nebula-linux-mips64.tar.gz
asset_content_type: application/gzip
- name: Upload linux-mips64le
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-mips64le.tar.gz
asset_name: nebula-linux-mips64le.tar.gz
asset_content_type: application/gzip
- name: Upload linux-mips-softfloat
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-mips-softfloat.tar.gz
asset_name: nebula-linux-mips-softfloat.tar.gz
asset_content_type: application/gzip
- name: Upload linux-riscv64
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-linux-riscv64.tar.gz
asset_name: nebula-linux-riscv64.tar.gz
asset_content_type: application/gzip
- name: Upload freebsd-amd64
uses: actions/upload-release-asset@v1.0.1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }}
asset_path: ./linux-latest/nebula-freebsd-amd64.tar.gz
asset_name: nebula-freebsd-amd64.tar.gz
asset_content_type: application/gzip
run: |
cd artifacts
gh release create \
--verify-tag \
--title "Release ${{ github.ref_name }}" \
"${{ github.ref_name }}" \
SHASUM256.txt *-latest/*.zip *-latest/*.tar.gz

View File

@@ -18,24 +18,15 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.20
uses: actions/setup-go@v2
with:
go-version: "1.20"
id: go
- uses: actions/checkout@v4
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- uses: actions/cache@v2
- uses: actions/setup-go@v5
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go1.20-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go1.20-
go-version-file: 'go.mod'
check-latest: true
- name: build
run: make bin-docker
run: make bin-docker CGO_ENABLED=1 BUILD_ARGS=-race
- name: setup docker image
working-directory: ./.github/workflows/smoke

View File

@@ -41,4 +41,4 @@ EOF
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
)
sudo docker build -t nebula:smoke-relay .
docker build -t nebula:smoke-relay .

View File

@@ -36,4 +36,4 @@ mkdir ./build
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
)
sudo docker build -t "nebula:${NAME:-smoke}" .
docker build -t "nebula:${NAME:-smoke}" .

View File

@@ -14,24 +14,24 @@ cleanup() {
set +e
if [ "$(jobs -r)" ]
then
sudo docker kill lighthouse1 host2 host3 host4
docker kill lighthouse1 host2 host3 host4
fi
}
trap cleanup EXIT
sudo docker run --name lighthouse1 --rm nebula:smoke-relay -config lighthouse1.yml -test
sudo docker run --name host2 --rm nebula:smoke-relay -config host2.yml -test
sudo docker run --name host3 --rm nebula:smoke-relay -config host3.yml -test
sudo docker run --name host4 --rm nebula:smoke-relay -config host4.yml -test
docker run --name lighthouse1 --rm nebula:smoke-relay -config lighthouse1.yml -test
docker run --name host2 --rm nebula:smoke-relay -config host2.yml -test
docker run --name host3 --rm nebula:smoke-relay -config host3.yml -test
docker run --name host4 --rm nebula:smoke-relay -config host4.yml -test
sudo docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
sleep 1
sudo docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
sleep 1
sudo docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
sleep 1
sudo docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
sleep 1
set +x
@@ -39,43 +39,43 @@ echo
echo " *** Testing ping from lighthouse1"
echo
set -x
sudo docker exec lighthouse1 ping -c1 192.168.100.2
sudo docker exec lighthouse1 ping -c1 192.168.100.3
sudo docker exec lighthouse1 ping -c1 192.168.100.4
docker exec lighthouse1 ping -c1 192.168.100.2
docker exec lighthouse1 ping -c1 192.168.100.3
docker exec lighthouse1 ping -c1 192.168.100.4
set +x
echo
echo " *** Testing ping from host2"
echo
set -x
sudo docker exec host2 ping -c1 192.168.100.1
docker exec host2 ping -c1 192.168.100.1
# Should fail because no relay configured in this direction
! sudo docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
! sudo docker exec host2 ping -c1 192.168.100.4 -w5 || exit 1
! docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
! docker exec host2 ping -c1 192.168.100.4 -w5 || exit 1
set +x
echo
echo " *** Testing ping from host3"
echo
set -x
sudo docker exec host3 ping -c1 192.168.100.1
sudo docker exec host3 ping -c1 192.168.100.2
sudo docker exec host3 ping -c1 192.168.100.4
docker exec host3 ping -c1 192.168.100.1
docker exec host3 ping -c1 192.168.100.2
docker exec host3 ping -c1 192.168.100.4
set +x
echo
echo " *** Testing ping from host4"
echo
set -x
sudo docker exec host4 ping -c1 192.168.100.1
docker exec host4 ping -c1 192.168.100.1
# Should fail because relays not allowed
! sudo docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
sudo docker exec host4 ping -c1 192.168.100.3
! docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
docker exec host4 ping -c1 192.168.100.3
sudo docker exec host4 sh -c 'kill 1'
sudo docker exec host3 sh -c 'kill 1'
sudo docker exec host2 sh -c 'kill 1'
sudo docker exec lighthouse1 sh -c 'kill 1'
docker exec host4 sh -c 'kill 1'
docker exec host3 sh -c 'kill 1'
docker exec host2 sh -c 'kill 1'
docker exec lighthouse1 sh -c 'kill 1'
sleep 1
if [ "$(jobs -r)" ]

View File

@@ -14,7 +14,7 @@ cleanup() {
set +e
if [ "$(jobs -r)" ]
then
sudo docker kill lighthouse1 host2 host3 host4
docker kill lighthouse1 host2 host3 host4
fi
}
@@ -22,51 +22,51 @@ trap cleanup EXIT
CONTAINER="nebula:${NAME:-smoke}"
sudo docker run --name lighthouse1 --rm "$CONTAINER" -config lighthouse1.yml -test
sudo docker run --name host2 --rm "$CONTAINER" -config host2.yml -test
sudo docker run --name host3 --rm "$CONTAINER" -config host3.yml -test
sudo docker run --name host4 --rm "$CONTAINER" -config host4.yml -test
docker run --name lighthouse1 --rm "$CONTAINER" -config lighthouse1.yml -test
docker run --name host2 --rm "$CONTAINER" -config host2.yml -test
docker run --name host3 --rm "$CONTAINER" -config host3.yml -test
docker run --name host4 --rm "$CONTAINER" -config host4.yml -test
sudo docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
sleep 1
sudo docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
sleep 1
sudo docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
sleep 1
sudo docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
sleep 1
# grab tcpdump pcaps for debugging
sudo docker exec lighthouse1 tcpdump -i nebula1 -q -w - -U 2>logs/lighthouse1.inside.log >logs/lighthouse1.inside.pcap &
sudo docker exec lighthouse1 tcpdump -i eth0 -q -w - -U 2>logs/lighthouse1.outside.log >logs/lighthouse1.outside.pcap &
sudo docker exec host2 tcpdump -i nebula1 -q -w - -U 2>logs/host2.inside.log >logs/host2.inside.pcap &
sudo docker exec host2 tcpdump -i eth0 -q -w - -U 2>logs/host2.outside.log >logs/host2.outside.pcap &
sudo docker exec host3 tcpdump -i nebula1 -q -w - -U 2>logs/host3.inside.log >logs/host3.inside.pcap &
sudo docker exec host3 tcpdump -i eth0 -q -w - -U 2>logs/host3.outside.log >logs/host3.outside.pcap &
sudo docker exec host4 tcpdump -i nebula1 -q -w - -U 2>logs/host4.inside.log >logs/host4.inside.pcap &
sudo docker exec host4 tcpdump -i eth0 -q -w - -U 2>logs/host4.outside.log >logs/host4.outside.pcap &
docker exec lighthouse1 tcpdump -i nebula1 -q -w - -U 2>logs/lighthouse1.inside.log >logs/lighthouse1.inside.pcap &
docker exec lighthouse1 tcpdump -i eth0 -q -w - -U 2>logs/lighthouse1.outside.log >logs/lighthouse1.outside.pcap &
docker exec host2 tcpdump -i nebula1 -q -w - -U 2>logs/host2.inside.log >logs/host2.inside.pcap &
docker exec host2 tcpdump -i eth0 -q -w - -U 2>logs/host2.outside.log >logs/host2.outside.pcap &
docker exec host3 tcpdump -i nebula1 -q -w - -U 2>logs/host3.inside.log >logs/host3.inside.pcap &
docker exec host3 tcpdump -i eth0 -q -w - -U 2>logs/host3.outside.log >logs/host3.outside.pcap &
docker exec host4 tcpdump -i nebula1 -q -w - -U 2>logs/host4.inside.log >logs/host4.inside.pcap &
docker exec host4 tcpdump -i eth0 -q -w - -U 2>logs/host4.outside.log >logs/host4.outside.pcap &
sudo docker exec host2 ncat -nklv 0.0.0.0 2000 &
sudo docker exec host3 ncat -nklv 0.0.0.0 2000 &
sudo docker exec host2 ncat -e '/usr/bin/echo host2' -nkluv 0.0.0.0 3000 &
sudo docker exec host3 ncat -e '/usr/bin/echo host3' -nkluv 0.0.0.0 3000 &
docker exec host2 ncat -nklv 0.0.0.0 2000 &
docker exec host3 ncat -nklv 0.0.0.0 2000 &
docker exec host2 ncat -e '/usr/bin/echo host2' -nkluv 0.0.0.0 3000 &
docker exec host3 ncat -e '/usr/bin/echo host3' -nkluv 0.0.0.0 3000 &
set +x
echo
echo " *** Testing ping from lighthouse1"
echo
set -x
sudo docker exec lighthouse1 ping -c1 192.168.100.2
sudo docker exec lighthouse1 ping -c1 192.168.100.3
docker exec lighthouse1 ping -c1 192.168.100.2
docker exec lighthouse1 ping -c1 192.168.100.3
set +x
echo
echo " *** Testing ping from host2"
echo
set -x
sudo docker exec host2 ping -c1 192.168.100.1
docker exec host2 ping -c1 192.168.100.1
# Should fail because not allowed by host3 inbound firewall
! sudo docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
! docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
set +x
echo
@@ -74,34 +74,34 @@ echo " *** Testing ncat from host2"
echo
set -x
# Should fail because not allowed by host3 inbound firewall
! sudo docker exec host2 ncat -nzv -w5 192.168.100.3 2000 || exit 1
! sudo docker exec host2 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
! docker exec host2 ncat -nzv -w5 192.168.100.3 2000 || exit 1
! docker exec host2 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
set +x
echo
echo " *** Testing ping from host3"
echo
set -x
sudo docker exec host3 ping -c1 192.168.100.1
sudo docker exec host3 ping -c1 192.168.100.2
docker exec host3 ping -c1 192.168.100.1
docker exec host3 ping -c1 192.168.100.2
set +x
echo
echo " *** Testing ncat from host3"
echo
set -x
sudo docker exec host3 ncat -nzv -w5 192.168.100.2 2000
sudo docker exec host3 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2
docker exec host3 ncat -nzv -w5 192.168.100.2 2000
docker exec host3 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2
set +x
echo
echo " *** Testing ping from host4"
echo
set -x
sudo docker exec host4 ping -c1 192.168.100.1
docker exec host4 ping -c1 192.168.100.1
# Should fail because not allowed by host4 outbound firewall
! sudo docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
! sudo docker exec host4 ping -c1 192.168.100.3 -w5 || exit 1
! docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
! docker exec host4 ping -c1 192.168.100.3 -w5 || exit 1
set +x
echo
@@ -109,10 +109,10 @@ echo " *** Testing ncat from host4"
echo
set -x
# Should fail because not allowed by host4 outbound firewall
! sudo docker exec host4 ncat -nzv -w5 192.168.100.2 2000 || exit 1
! sudo docker exec host4 ncat -nzv -w5 192.168.100.3 2000 || exit 1
! sudo docker exec host4 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2 || exit 1
! sudo docker exec host4 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
! docker exec host4 ncat -nzv -w5 192.168.100.2 2000 || exit 1
! docker exec host4 ncat -nzv -w5 192.168.100.3 2000 || exit 1
! docker exec host4 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2 || exit 1
! docker exec host4 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
set +x
echo
@@ -120,15 +120,15 @@ echo " *** Testing conntrack"
echo
set -x
# host2 can ping host3 now that host3 pinged it first
sudo docker exec host2 ping -c1 192.168.100.3
docker exec host2 ping -c1 192.168.100.3
# host4 can ping host2 once conntrack established
sudo docker exec host2 ping -c1 192.168.100.4
sudo docker exec host4 ping -c1 192.168.100.2
docker exec host2 ping -c1 192.168.100.4
docker exec host4 ping -c1 192.168.100.2
sudo docker exec host4 sh -c 'kill 1'
sudo docker exec host3 sh -c 'kill 1'
sudo docker exec host2 sh -c 'kill 1'
sudo docker exec lighthouse1 sh -c 'kill 1'
docker exec host4 sh -c 'kill 1'
docker exec host3 sh -c 'kill 1'
docker exec host2 sh -c 'kill 1'
docker exec lighthouse1 sh -c 'kill 1'
sleep 1
if [ "$(jobs -r)" ]

View File

@@ -18,21 +18,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.20
uses: actions/setup-go@v2
with:
go-version: "1.20"
id: go
- uses: actions/checkout@v4
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- uses: actions/cache@v2
- uses: actions/setup-go@v5
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go1.20-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go1.20-
go-version-file: 'go.mod'
check-latest: true
- name: Build
run: make all
@@ -57,21 +48,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Set up Go 1.20
uses: actions/setup-go@v2
with:
go-version: "1.20"
id: go
- uses: actions/checkout@v4
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- uses: actions/cache@v2
- uses: actions/setup-go@v5
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go1.20-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go1.20-
go-version-file: 'go.mod'
check-latest: true
- name: Build
run: make bin-boringcrypto
@@ -90,21 +72,12 @@ jobs:
os: [windows-latest, macos-11]
steps:
- name: Set up Go 1.20
uses: actions/setup-go@v2
with:
go-version: "1.20"
id: go
- uses: actions/checkout@v4
- name: Check out code into the Go module directory
uses: actions/checkout@v2
- uses: actions/cache@v2
- uses: actions/setup-go@v5
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go1.20-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go1.20-
go-version-file: 'go.mod'
check-latest: true
- name: Build nebula
run: go build ./cmd/nebula

View File

@@ -7,6 +7,78 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
## [Unreleased]
## [1.8.1] - 2023-12-19
### Security
- Update `golang.org/x/crypto`, which includes a fix for CVE-2023-48795. (#1048)
### Fixed
- Fix a deadlock introduced in v1.8.0 that could occur during handshakes. (#1044)
- Fix mobile builds. (#1035)
## [1.8.0] - 2023-12-06
### Deprecated
- The next minor release of Nebula, 1.9.0, will require at least Windows 10 or
Windows Server 2016. This is because support for earlier versions was removed
in Go 1.21. See https://go.dev/doc/go1.21#windows
### Added
- Linux: Notify systemd of service readiness. This should resolve timing issues
with services that depend on Nebula being active. For an example of how to
enable this, see: `examples/service_scripts/nebula.service`. (#929)
- Windows: Use Registered IO (RIO) when possible. Testing on a Windows 11
machine shows ~50x improvement in throughput. (#905)
- NetBSD, OpenBSD: Added rudimentary support. (#916, #812)
- FreeBSD: Add support for naming tun devices. (#903)
### Changed
- `pki.disconnect_invalid` will now default to true. This means that once a
certificate expires, the tunnel will be disconnected. If you use SIGHUP to
reload certificates without restarting Nebula, you should ensure all of your
clients are on 1.7.0 or newer before you enable this feature. (#859)
- Limit how often a busy tunnel can requery the lighthouse. The new config
option `timers.requery_wait_duration` defaults to `60s`. (#940)
- The internal structures for hostmaps were refactored to reduce memory usage
and the potential for subtle bugs. (#843, #938, #953, #954, #955)
- Lots of dependency updates.
### Fixed
- Windows: Retry wintun device creation if it fails the first time. (#985)
- Fix issues with firewall reject packets that could cause panics. (#957)
- Fix relay migration during re-handshakes. (#964)
- Various other refactors and fixes. (#935, #952, #972, #961, #996, #1002,
#987, #1004, #1030, #1032, ...)
## [1.7.2] - 2023-06-01
### Fixed
- Fix a freeze during config reload if the `static_host_map` config was changed. (#886)
## [1.7.1] - 2023-05-18
### Fixed
- Fix IPv4 addresses returned by `static_host_map` DNS lookup queries being
treated as IPv6 addresses. (#877)
## [1.7.0] - 2023-05-17
### Added
@@ -475,7 +547,11 @@ created.)
- Initial public release.
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.7.0...HEAD
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.8.1...HEAD
[1.8.1]: https://github.com/slackhq/nebula/releases/tag/v1.8.1
[1.8.0]: https://github.com/slackhq/nebula/releases/tag/v1.8.0
[1.7.2]: https://github.com/slackhq/nebula/releases/tag/v1.7.2
[1.7.1]: https://github.com/slackhq/nebula/releases/tag/v1.7.1
[1.7.0]: https://github.com/slackhq/nebula/releases/tag/v1.7.0
[1.6.1]: https://github.com/slackhq/nebula/releases/tag/v1.6.1
[1.6.0]: https://github.com/slackhq/nebula/releases/tag/v1.6.0

View File

@@ -12,6 +12,8 @@ ifeq ($(OS),Windows_NT)
GOISMIN := $(shell IF "$(GOVERSION)" GEQ "$(GOMINVERSION)" ECHO 1)
NEBULA_CMD_SUFFIX = .exe
NULL_FILE = nul
# RIO on windows does pointer stuff that makes go vet angry
VET_FLAGS = -unsafeptr=false
else
GOVERSION := $(shell go version | awk '{print substr($$3, 3)}')
GOISMIN := $(shell expr "$(GOVERSION)" ">=" "$(GOMINVERSION)")
@@ -44,10 +46,21 @@ ALL_LINUX = linux-amd64 \
linux-mips-softfloat \
linux-riscv64
ALL_FREEBSD = freebsd-amd64 \
freebsd-arm64
ALL_OPENBSD = openbsd-amd64 \
openbsd-arm64
ALL_NETBSD = netbsd-amd64 \
netbsd-arm64
ALL = $(ALL_LINUX) \
$(ALL_FREEBSD) \
$(ALL_OPENBSD) \
$(ALL_NETBSD) \
darwin-amd64 \
darwin-arm64 \
freebsd-amd64 \
windows-amd64 \
windows-arm64
@@ -75,7 +88,11 @@ release: $(ALL:%=build/nebula-%.tar.gz)
release-linux: $(ALL_LINUX:%=build/nebula-%.tar.gz)
release-freebsd: build/nebula-freebsd-amd64.tar.gz
release-freebsd: $(ALL_FREEBSD:%=build/nebula-%.tar.gz)
release-openbsd: $(ALL_OPENBSD:%=build/nebula-%.tar.gz)
release-netbsd: $(ALL_NETBSD:%=build/nebula-%.tar.gz)
release-boringcrypto: build/nebula-linux-$(shell go env GOARCH)-boringcrypto.tar.gz
@@ -93,6 +110,9 @@ bin-darwin: build/darwin-amd64/nebula build/darwin-amd64/nebula-cert
bin-freebsd: build/freebsd-amd64/nebula build/freebsd-amd64/nebula-cert
mv $? .
bin-freebsd-arm64: build/freebsd-arm64/nebula build/freebsd-arm64/nebula-cert
mv $? .
bin-boringcrypto: build/linux-$(shell go env GOARCH)-boringcrypto/nebula build/linux-$(shell go env GOARCH)-boringcrypto/nebula-cert
mv $? .
@@ -137,7 +157,7 @@ build/nebula-%.zip: build/%/nebula.exe build/%/nebula-cert.exe
cd build/$* && zip ../nebula-$*.zip nebula.exe nebula-cert.exe
vet:
go vet -v ./...
go vet $(VET_FLAGS) -v ./...
test:
go test -v ./...
@@ -190,6 +210,7 @@ smoke-relay-docker: bin-docker
cd .github/workflows/smoke/ && ./smoke-relay.sh
smoke-docker-race: BUILD_ARGS = -race
smoke-docker-race: CGO_ENABLED = 1
smoke-docker-race: smoke-docker
.FORCE:

View File

@@ -108,7 +108,7 @@ For each host, copy the nebula binary to the host, along with `config.yml` from
## Building Nebula from source
Download go and clone this repo. Change to the nebula directory.
Make sure you have [go](https://go.dev/doc/install) installed and clone this repo. Change to the nebula directory.
To build nebula for all platforms:
`make all`

View File

@@ -12,7 +12,7 @@ import (
type AllowList struct {
// The values of this cidrTree are `bool`, signifying allow/deny
cidrTree *cidr.Tree6
cidrTree *cidr.Tree6[bool]
}
type RemoteAllowList struct {
@@ -20,7 +20,7 @@ type RemoteAllowList struct {
// Inside Range Specific, keys of this tree are inside CIDRs and values
// are *AllowList
insideAllowLists *cidr.Tree6
insideAllowLists *cidr.Tree6[*AllowList]
}
type LocalAllowList struct {
@@ -88,7 +88,7 @@ func newAllowList(k string, raw interface{}, handleKey func(key string, value in
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, raw)
}
tree := cidr.NewTree6()
tree := cidr.NewTree6[bool]()
// Keep track of the rules we have added for both ipv4 and ipv6
type allowListRules struct {
@@ -218,13 +218,13 @@ func getAllowListInterfaces(k string, v interface{}) ([]AllowListNameRule, error
return nameRules, nil
}
func getRemoteAllowRanges(c *config.C, k string) (*cidr.Tree6, error) {
func getRemoteAllowRanges(c *config.C, k string) (*cidr.Tree6[*AllowList], error) {
value := c.Get(k)
if value == nil {
return nil, nil
}
remoteAllowRanges := cidr.NewTree6()
remoteAllowRanges := cidr.NewTree6[*AllowList]()
rawMap, ok := value.(map[interface{}]interface{})
if !ok {
@@ -257,13 +257,8 @@ func (al *AllowList) Allow(ip net.IP) bool {
return true
}
result := al.cidrTree.MostSpecificContains(ip)
switch v := result.(type) {
case bool:
return v
default:
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
}
_, result := al.cidrTree.MostSpecificContains(ip)
return result
}
func (al *AllowList) AllowIpV4(ip iputil.VpnIp) bool {
@@ -271,13 +266,8 @@ func (al *AllowList) AllowIpV4(ip iputil.VpnIp) bool {
return true
}
result := al.cidrTree.MostSpecificContainsIpV4(ip)
switch v := result.(type) {
case bool:
return v
default:
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
}
_, result := al.cidrTree.MostSpecificContainsIpV4(ip)
return result
}
func (al *AllowList) AllowIpV6(hi, lo uint64) bool {
@@ -285,13 +275,8 @@ func (al *AllowList) AllowIpV6(hi, lo uint64) bool {
return true
}
result := al.cidrTree.MostSpecificContainsIpV6(hi, lo)
switch v := result.(type) {
case bool:
return v
default:
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
}
_, result := al.cidrTree.MostSpecificContainsIpV6(hi, lo)
return result
}
func (al *LocalAllowList) Allow(ip net.IP) bool {
@@ -352,9 +337,9 @@ func (al *RemoteAllowList) AllowIpV6(vpnIp iputil.VpnIp, hi, lo uint64) bool {
func (al *RemoteAllowList) getInsideAllowList(vpnIp iputil.VpnIp) *AllowList {
if al.insideAllowLists != nil {
inside := al.insideAllowLists.MostSpecificContainsIpV4(vpnIp)
if inside != nil {
return inside.(*AllowList)
ok, inside := al.insideAllowLists.MostSpecificContainsIpV4(vpnIp)
if ok {
return inside
}
}
return nil

View File

@@ -100,7 +100,7 @@ func TestNewAllowListFromConfig(t *testing.T) {
func TestAllowList_Allow(t *testing.T) {
assert.Equal(t, true, ((*AllowList)(nil)).Allow(net.ParseIP("1.1.1.1")))
tree := cidr.NewTree6()
tree := cidr.NewTree6[bool]()
tree.AddCIDR(cidr.Parse("0.0.0.0/0"), true)
tree.AddCIDR(cidr.Parse("10.0.0.0/8"), false)
tree.AddCIDR(cidr.Parse("10.42.42.42/32"), true)

View File

@@ -51,13 +51,13 @@ func (c *calculatedRemote) Apply(ip iputil.VpnIp) *Ip4AndPort {
return &Ip4AndPort{Ip: uint32(masked), Port: c.port}
}
func NewCalculatedRemotesFromConfig(c *config.C, k string) (*cidr.Tree4, error) {
func NewCalculatedRemotesFromConfig(c *config.C, k string) (*cidr.Tree4[[]*calculatedRemote], error) {
value := c.Get(k)
if value == nil {
return nil, nil
}
calculatedRemotes := cidr.NewTree4()
calculatedRemotes := cidr.NewTree4[[]*calculatedRemote]()
rawMap, ok := value.(map[any]any)
if !ok {

163
cert.go
View File

@@ -1,163 +0,0 @@
package nebula
import (
"errors"
"fmt"
"io/ioutil"
"strings"
"time"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config"
)
type CertState struct {
certificate *cert.NebulaCertificate
rawCertificate []byte
rawCertificateNoKey []byte
publicKey []byte
privateKey []byte
}
func NewCertState(certificate *cert.NebulaCertificate, privateKey []byte) (*CertState, error) {
// Marshal the certificate to ensure it is valid
rawCertificate, err := certificate.Marshal()
if err != nil {
return nil, fmt.Errorf("invalid nebula certificate on interface: %s", err)
}
publicKey := certificate.Details.PublicKey
cs := &CertState{
rawCertificate: rawCertificate,
certificate: certificate, // PublicKey has been set to nil above
privateKey: privateKey,
publicKey: publicKey,
}
cs.certificate.Details.PublicKey = nil
rawCertNoKey, err := cs.certificate.Marshal()
if err != nil {
return nil, fmt.Errorf("error marshalling certificate no key: %s", err)
}
cs.rawCertificateNoKey = rawCertNoKey
// put public key back
cs.certificate.Details.PublicKey = cs.publicKey
return cs, nil
}
func NewCertStateFromConfig(c *config.C) (*CertState, error) {
var pemPrivateKey []byte
var err error
privPathOrPEM := c.GetString("pki.key", "")
if privPathOrPEM == "" {
return nil, errors.New("no pki.key path or PEM data provided")
}
if strings.Contains(privPathOrPEM, "-----BEGIN") {
pemPrivateKey = []byte(privPathOrPEM)
privPathOrPEM = "<inline>"
} else {
pemPrivateKey, err = ioutil.ReadFile(privPathOrPEM)
if err != nil {
return nil, fmt.Errorf("unable to read pki.key file %s: %s", privPathOrPEM, err)
}
}
rawKey, _, curve, err := cert.UnmarshalPrivateKey(pemPrivateKey)
if err != nil {
return nil, fmt.Errorf("error while unmarshaling pki.key %s: %s", privPathOrPEM, err)
}
var rawCert []byte
pubPathOrPEM := c.GetString("pki.cert", "")
if pubPathOrPEM == "" {
return nil, errors.New("no pki.cert path or PEM data provided")
}
if strings.Contains(pubPathOrPEM, "-----BEGIN") {
rawCert = []byte(pubPathOrPEM)
pubPathOrPEM = "<inline>"
} else {
rawCert, err = ioutil.ReadFile(pubPathOrPEM)
if err != nil {
return nil, fmt.Errorf("unable to read pki.cert file %s: %s", pubPathOrPEM, err)
}
}
nebulaCert, _, err := cert.UnmarshalNebulaCertificateFromPEM(rawCert)
if err != nil {
return nil, fmt.Errorf("error while unmarshaling pki.cert %s: %s", pubPathOrPEM, err)
}
if nebulaCert.Expired(time.Now()) {
return nil, fmt.Errorf("nebula certificate for this host is expired")
}
if len(nebulaCert.Details.Ips) == 0 {
return nil, fmt.Errorf("no IPs encoded in certificate")
}
if err = nebulaCert.VerifyPrivateKey(curve, rawKey); err != nil {
return nil, fmt.Errorf("private key is not a pair with public key in nebula cert")
}
return NewCertState(nebulaCert, rawKey)
}
func loadCAFromConfig(l *logrus.Logger, c *config.C) (*cert.NebulaCAPool, error) {
var rawCA []byte
var err error
caPathOrPEM := c.GetString("pki.ca", "")
if caPathOrPEM == "" {
return nil, errors.New("no pki.ca path or PEM data provided")
}
if strings.Contains(caPathOrPEM, "-----BEGIN") {
rawCA = []byte(caPathOrPEM)
} else {
rawCA, err = ioutil.ReadFile(caPathOrPEM)
if err != nil {
return nil, fmt.Errorf("unable to read pki.ca file %s: %s", caPathOrPEM, err)
}
}
CAs, err := cert.NewCAPoolFromBytes(rawCA)
if errors.Is(err, cert.ErrExpired) {
var expired int
for _, cert := range CAs.CAs {
if cert.Expired(time.Now()) {
expired++
l.WithField("cert", cert).Warn("expired certificate present in CA pool")
}
}
if expired >= len(CAs.CAs) {
return nil, errors.New("no valid CA certificates present")
}
} else if err != nil {
return nil, fmt.Errorf("error while adding CA certificate to CA trust store: %s", err)
}
for _, fp := range c.GetStringSlice("pki.blocklist", []string{}) {
l.WithField("fingerprint", fp).Info("Blocklisting cert")
CAs.BlocklistFingerprint(fp)
}
// Support deprecated config for at least one minor release to allow for migrations
//TODO: remove in 2022 or later
for _, fp := range c.GetStringSlice("pki.blacklist", []string{}) {
l.WithField("fingerprint", fp).Info("Blocklisting cert")
l.Warn("pki.blacklist is deprecated and will not be supported in a future release. Please migrate your config to use pki.blocklist")
CAs.BlocklistFingerprint(fp)
}
return CAs, nil
}

View File

@@ -272,6 +272,9 @@ func EncryptAndMarshalSigningPrivateKey(curve Curve, b []byte, passphrase []byte
},
Ciphertext: ciphertext,
})
if err != nil {
return nil, err
}
switch curve {
case Curve_CURVE25519:

View File

@@ -77,6 +77,9 @@ func aes256Decrypt(passphrase []byte, kdfParams *Argon2Parameters, data []byte)
}
gcm, err := cipher.NewGCM(block)
if err != nil {
return nil, err
}
nonce, ciphertext, err := splitNonceCiphertext(data, gcm.NonceSize())
if err != nil {

View File

@@ -6,35 +6,36 @@ import (
"github.com/slackhq/nebula/iputil"
)
type Node struct {
left *Node
right *Node
parent *Node
value interface{}
type Node[T any] struct {
left *Node[T]
right *Node[T]
parent *Node[T]
hasValue bool
value T
}
type entry struct {
type entry[T any] struct {
CIDR *net.IPNet
Value *interface{}
Value T
}
type Tree4 struct {
root *Node
list []entry
type Tree4[T any] struct {
root *Node[T]
list []entry[T]
}
const (
startbit = iputil.VpnIp(0x80000000)
)
func NewTree4() *Tree4 {
tree := new(Tree4)
tree.root = &Node{}
tree.list = []entry{}
func NewTree4[T any]() *Tree4[T] {
tree := new(Tree4[T])
tree.root = &Node[T]{}
tree.list = []entry[T]{}
return tree
}
func (tree *Tree4) AddCIDR(cidr *net.IPNet, val interface{}) {
func (tree *Tree4[T]) AddCIDR(cidr *net.IPNet, val T) {
bit := startbit
node := tree.root
next := tree.root
@@ -68,14 +69,15 @@ func (tree *Tree4) AddCIDR(cidr *net.IPNet, val interface{}) {
}
}
tree.list = append(tree.list, entry{CIDR: cidr, Value: &val})
tree.list = append(tree.list, entry[T]{CIDR: cidr, Value: val})
node.value = val
node.hasValue = true
return
}
// Build up the rest of the tree we don't already have
for bit&mask != 0 {
next = &Node{}
next = &Node[T]{}
next.parent = node
if ip&bit != 0 {
@@ -90,17 +92,18 @@ func (tree *Tree4) AddCIDR(cidr *net.IPNet, val interface{}) {
// Final node marks our cidr, set the value
node.value = val
tree.list = append(tree.list, entry{CIDR: cidr, Value: &val})
node.hasValue = true
tree.list = append(tree.list, entry[T]{CIDR: cidr, Value: val})
}
// Contains finds the first match, which may be the least specific
func (tree *Tree4) Contains(ip iputil.VpnIp) (value interface{}) {
func (tree *Tree4[T]) Contains(ip iputil.VpnIp) (ok bool, value T) {
bit := startbit
node := tree.root
for node != nil {
if node.value != nil {
return node.value
if node.hasValue {
return true, node.value
}
if ip&bit != 0 {
@@ -113,17 +116,18 @@ func (tree *Tree4) Contains(ip iputil.VpnIp) (value interface{}) {
}
return value
return false, value
}
// MostSpecificContains finds the most specific match
func (tree *Tree4) MostSpecificContains(ip iputil.VpnIp) (value interface{}) {
func (tree *Tree4[T]) MostSpecificContains(ip iputil.VpnIp) (ok bool, value T) {
bit := startbit
node := tree.root
for node != nil {
if node.value != nil {
if node.hasValue {
value = node.value
ok = true
}
if ip&bit != 0 {
@@ -135,11 +139,12 @@ func (tree *Tree4) MostSpecificContains(ip iputil.VpnIp) (value interface{}) {
bit >>= 1
}
return value
return ok, value
}
// Match finds the most specific match
func (tree *Tree4) Match(ip iputil.VpnIp) (value interface{}) {
// TODO this is exact match
func (tree *Tree4[T]) Match(ip iputil.VpnIp) (ok bool, value T) {
bit := startbit
node := tree.root
lastNode := node
@@ -157,11 +162,12 @@ func (tree *Tree4) Match(ip iputil.VpnIp) (value interface{}) {
if bit == 0 && lastNode != nil {
value = lastNode.value
ok = true
}
return value
return ok, value
}
// List will return all CIDRs and their current values. Do not modify the contents!
func (tree *Tree4) List() []entry {
func (tree *Tree4[T]) List() []entry[T] {
return tree.list
}

View File

@@ -9,7 +9,7 @@ import (
)
func TestCIDRTree_List(t *testing.T) {
tree := NewTree4()
tree := NewTree4[string]()
tree.AddCIDR(Parse("1.0.0.0/16"), "1")
tree.AddCIDR(Parse("1.0.0.0/8"), "2")
tree.AddCIDR(Parse("1.0.0.0/16"), "3")
@@ -17,13 +17,13 @@ func TestCIDRTree_List(t *testing.T) {
list := tree.List()
assert.Len(t, list, 2)
assert.Equal(t, "1.0.0.0/8", list[0].CIDR.String())
assert.Equal(t, "2", *list[0].Value)
assert.Equal(t, "2", list[0].Value)
assert.Equal(t, "1.0.0.0/16", list[1].CIDR.String())
assert.Equal(t, "4", *list[1].Value)
assert.Equal(t, "4", list[1].Value)
}
func TestCIDRTree_Contains(t *testing.T) {
tree := NewTree4()
tree := NewTree4[string]()
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
@@ -33,35 +33,43 @@ func TestCIDRTree_Contains(t *testing.T) {
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
tests := []struct {
Found bool
Result interface{}
IP string
}{
{"1", "1.0.0.0"},
{"1", "1.255.255.255"},
{"2", "2.1.0.0"},
{"2", "2.1.255.255"},
{"3", "3.1.1.0"},
{"3", "3.1.1.255"},
{"4a", "4.1.1.255"},
{"4a", "4.1.1.1"},
{"5", "240.0.0.0"},
{"5", "255.255.255.255"},
{nil, "239.0.0.0"},
{nil, "4.1.2.2"},
{true, "1", "1.0.0.0"},
{true, "1", "1.255.255.255"},
{true, "2", "2.1.0.0"},
{true, "2", "2.1.255.255"},
{true, "3", "3.1.1.0"},
{true, "3", "3.1.1.255"},
{true, "4a", "4.1.1.255"},
{true, "4a", "4.1.1.1"},
{true, "5", "240.0.0.0"},
{true, "5", "255.255.255.255"},
{false, "", "239.0.0.0"},
{false, "", "4.1.2.2"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.Contains(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
ok, r := tree.Contains(iputil.Ip2VpnIp(net.ParseIP(tt.IP)))
assert.Equal(t, tt.Found, ok)
assert.Equal(t, tt.Result, r)
}
tree = NewTree4()
tree = NewTree4[string]()
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
ok, r := tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0")))
assert.True(t, ok)
assert.Equal(t, "cool", r)
ok, r = tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255")))
assert.True(t, ok)
assert.Equal(t, "cool", r)
}
func TestCIDRTree_MostSpecificContains(t *testing.T) {
tree := NewTree4()
tree := NewTree4[string]()
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
@@ -71,59 +79,75 @@ func TestCIDRTree_MostSpecificContains(t *testing.T) {
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
tests := []struct {
Found bool
Result interface{}
IP string
}{
{"1", "1.0.0.0"},
{"1", "1.255.255.255"},
{"2", "2.1.0.0"},
{"2", "2.1.255.255"},
{"3", "3.1.1.0"},
{"3", "3.1.1.255"},
{"4a", "4.1.1.255"},
{"4b", "4.1.1.2"},
{"4c", "4.1.1.1"},
{"5", "240.0.0.0"},
{"5", "255.255.255.255"},
{nil, "239.0.0.0"},
{nil, "4.1.2.2"},
{true, "1", "1.0.0.0"},
{true, "1", "1.255.255.255"},
{true, "2", "2.1.0.0"},
{true, "2", "2.1.255.255"},
{true, "3", "3.1.1.0"},
{true, "3", "3.1.1.255"},
{true, "4a", "4.1.1.255"},
{true, "4b", "4.1.1.2"},
{true, "4c", "4.1.1.1"},
{true, "5", "240.0.0.0"},
{true, "5", "255.255.255.255"},
{false, "", "239.0.0.0"},
{false, "", "4.1.2.2"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
ok, r := tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP(tt.IP)))
assert.Equal(t, tt.Found, ok)
assert.Equal(t, tt.Result, r)
}
tree = NewTree4()
tree = NewTree4[string]()
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
assert.Equal(t, "cool", tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
assert.Equal(t, "cool", tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
ok, r := tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0")))
assert.True(t, ok)
assert.Equal(t, "cool", r)
ok, r = tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255")))
assert.True(t, ok)
assert.Equal(t, "cool", r)
}
func TestCIDRTree_Match(t *testing.T) {
tree := NewTree4()
tree := NewTree4[string]()
tree.AddCIDR(Parse("4.1.1.0/32"), "1a")
tree.AddCIDR(Parse("4.1.1.1/32"), "1b")
tests := []struct {
Found bool
Result interface{}
IP string
}{
{"1a", "4.1.1.0"},
{"1b", "4.1.1.1"},
{true, "1a", "4.1.1.0"},
{true, "1b", "4.1.1.1"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.Match(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
ok, r := tree.Match(iputil.Ip2VpnIp(net.ParseIP(tt.IP)))
assert.Equal(t, tt.Found, ok)
assert.Equal(t, tt.Result, r)
}
tree = NewTree4()
tree = NewTree4[string]()
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
ok, r := tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0")))
assert.True(t, ok)
assert.Equal(t, "cool", r)
ok, r = tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255")))
assert.True(t, ok)
assert.Equal(t, "cool", r)
}
func BenchmarkCIDRTree_Contains(b *testing.B) {
tree := NewTree4()
tree := NewTree4[string]()
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
tree.AddCIDR(Parse("192.2.1.1/32"), "1")
@@ -145,7 +169,7 @@ func BenchmarkCIDRTree_Contains(b *testing.B) {
}
func BenchmarkCIDRTree_Match(b *testing.B) {
tree := NewTree4()
tree := NewTree4[string]()
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
tree.AddCIDR(Parse("192.2.1.1/32"), "1")

View File

@@ -8,20 +8,20 @@ import (
const startbit6 = uint64(1 << 63)
type Tree6 struct {
root4 *Node
root6 *Node
type Tree6[T any] struct {
root4 *Node[T]
root6 *Node[T]
}
func NewTree6() *Tree6 {
tree := new(Tree6)
tree.root4 = &Node{}
tree.root6 = &Node{}
func NewTree6[T any]() *Tree6[T] {
tree := new(Tree6[T])
tree.root4 = &Node[T]{}
tree.root6 = &Node[T]{}
return tree
}
func (tree *Tree6) AddCIDR(cidr *net.IPNet, val interface{}) {
var node, next *Node
func (tree *Tree6[T]) AddCIDR(cidr *net.IPNet, val T) {
var node, next *Node[T]
cidrIP, ipv4 := isIPV4(cidr.IP)
if ipv4 {
@@ -56,7 +56,7 @@ func (tree *Tree6) AddCIDR(cidr *net.IPNet, val interface{}) {
// Build up the rest of the tree we don't already have
for bit&mask != 0 {
next = &Node{}
next = &Node[T]{}
next.parent = node
if ip&bit != 0 {
@@ -72,11 +72,12 @@ func (tree *Tree6) AddCIDR(cidr *net.IPNet, val interface{}) {
// Final node marks our cidr, set the value
node.value = val
node.hasValue = true
}
// Finds the most specific match
func (tree *Tree6) MostSpecificContains(ip net.IP) (value interface{}) {
var node *Node
func (tree *Tree6[T]) MostSpecificContains(ip net.IP) (ok bool, value T) {
var node *Node[T]
wholeIP, ipv4 := isIPV4(ip)
if ipv4 {
@@ -90,8 +91,9 @@ func (tree *Tree6) MostSpecificContains(ip net.IP) (value interface{}) {
bit := startbit
for node != nil {
if node.value != nil {
if node.hasValue {
value = node.value
ok = true
}
if bit == 0 {
@@ -108,16 +110,17 @@ func (tree *Tree6) MostSpecificContains(ip net.IP) (value interface{}) {
}
}
return value
return ok, value
}
func (tree *Tree6) MostSpecificContainsIpV4(ip iputil.VpnIp) (value interface{}) {
func (tree *Tree6[T]) MostSpecificContainsIpV4(ip iputil.VpnIp) (ok bool, value T) {
bit := startbit
node := tree.root4
for node != nil {
if node.value != nil {
if node.hasValue {
value = node.value
ok = true
}
if ip&bit != 0 {
@@ -129,10 +132,10 @@ func (tree *Tree6) MostSpecificContainsIpV4(ip iputil.VpnIp) (value interface{})
bit >>= 1
}
return value
return ok, value
}
func (tree *Tree6) MostSpecificContainsIpV6(hi, lo uint64) (value interface{}) {
func (tree *Tree6[T]) MostSpecificContainsIpV6(hi, lo uint64) (ok bool, value T) {
ip := hi
node := tree.root6
@@ -140,8 +143,9 @@ func (tree *Tree6) MostSpecificContainsIpV6(hi, lo uint64) (value interface{}) {
bit := startbit6
for node != nil {
if node.value != nil {
if node.hasValue {
value = node.value
ok = true
}
if bit == 0 {
@@ -160,7 +164,7 @@ func (tree *Tree6) MostSpecificContainsIpV6(hi, lo uint64) (value interface{}) {
ip = lo
}
return value
return ok, value
}
func isIPV4(ip net.IP) (net.IP, bool) {

View File

@@ -9,7 +9,7 @@ import (
)
func TestCIDR6Tree_MostSpecificContains(t *testing.T) {
tree := NewTree6()
tree := NewTree6[string]()
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
@@ -22,53 +22,68 @@ func TestCIDR6Tree_MostSpecificContains(t *testing.T) {
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
tests := []struct {
Found bool
Result interface{}
IP string
}{
{"1", "1.0.0.0"},
{"1", "1.255.255.255"},
{"2", "2.1.0.0"},
{"2", "2.1.255.255"},
{"3", "3.1.1.0"},
{"3", "3.1.1.255"},
{"4a", "4.1.1.255"},
{"4b", "4.1.1.2"},
{"4c", "4.1.1.1"},
{"5", "240.0.0.0"},
{"5", "255.255.255.255"},
{"6a", "1:2:0:4:1:1:1:1"},
{"6b", "1:2:0:4:5:1:1:1"},
{"6c", "1:2:0:4:5:0:0:0"},
{nil, "239.0.0.0"},
{nil, "4.1.2.2"},
{true, "1", "1.0.0.0"},
{true, "1", "1.255.255.255"},
{true, "2", "2.1.0.0"},
{true, "2", "2.1.255.255"},
{true, "3", "3.1.1.0"},
{true, "3", "3.1.1.255"},
{true, "4a", "4.1.1.255"},
{true, "4b", "4.1.1.2"},
{true, "4c", "4.1.1.1"},
{true, "5", "240.0.0.0"},
{true, "5", "255.255.255.255"},
{true, "6a", "1:2:0:4:1:1:1:1"},
{true, "6b", "1:2:0:4:5:1:1:1"},
{true, "6c", "1:2:0:4:5:0:0:0"},
{false, "", "239.0.0.0"},
{false, "", "4.1.2.2"},
}
for _, tt := range tests {
assert.Equal(t, tt.Result, tree.MostSpecificContains(net.ParseIP(tt.IP)))
ok, r := tree.MostSpecificContains(net.ParseIP(tt.IP))
assert.Equal(t, tt.Found, ok)
assert.Equal(t, tt.Result, r)
}
tree = NewTree6()
tree = NewTree6[string]()
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
tree.AddCIDR(Parse("::/0"), "cool6")
assert.Equal(t, "cool", tree.MostSpecificContains(net.ParseIP("0.0.0.0")))
assert.Equal(t, "cool", tree.MostSpecificContains(net.ParseIP("255.255.255.255")))
assert.Equal(t, "cool6", tree.MostSpecificContains(net.ParseIP("::")))
assert.Equal(t, "cool6", tree.MostSpecificContains(net.ParseIP("1:2:3:4:5:6:7:8")))
ok, r := tree.MostSpecificContains(net.ParseIP("0.0.0.0"))
assert.True(t, ok)
assert.Equal(t, "cool", r)
ok, r = tree.MostSpecificContains(net.ParseIP("255.255.255.255"))
assert.True(t, ok)
assert.Equal(t, "cool", r)
ok, r = tree.MostSpecificContains(net.ParseIP("::"))
assert.True(t, ok)
assert.Equal(t, "cool6", r)
ok, r = tree.MostSpecificContains(net.ParseIP("1:2:3:4:5:6:7:8"))
assert.True(t, ok)
assert.Equal(t, "cool6", r)
}
func TestCIDR6Tree_MostSpecificContainsIpV6(t *testing.T) {
tree := NewTree6()
tree := NewTree6[string]()
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/64"), "6a")
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/80"), "6b")
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
tests := []struct {
Found bool
Result interface{}
IP string
}{
{"6a", "1:2:0:4:1:1:1:1"},
{"6b", "1:2:0:4:5:1:1:1"},
{"6c", "1:2:0:4:5:0:0:0"},
{true, "6a", "1:2:0:4:1:1:1:1"},
{true, "6b", "1:2:0:4:5:1:1:1"},
{true, "6c", "1:2:0:4:5:0:0:0"},
}
for _, tt := range tests {
@@ -76,6 +91,8 @@ func TestCIDR6Tree_MostSpecificContainsIpV6(t *testing.T) {
hi := binary.BigEndian.Uint64(ip[:8])
lo := binary.BigEndian.Uint64(ip[8:])
assert.Equal(t, tt.Result, tree.MostSpecificContainsIpV6(hi, lo))
ok, r := tree.MostSpecificContainsIpV6(hi, lo)
assert.Equal(t, tt.Found, ok)
assert.Equal(t, tt.Result, r)
}
}

View File

@@ -7,7 +7,6 @@ import (
"flag"
"fmt"
"io"
"io/ioutil"
"math"
"net"
"os"
@@ -213,27 +212,27 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
return fmt.Errorf("error while signing: %s", err)
}
var b []byte
if *cf.encryption {
b, err := cert.EncryptAndMarshalSigningPrivateKey(curve, rawPriv, passphrase, kdfParams)
b, err = cert.EncryptAndMarshalSigningPrivateKey(curve, rawPriv, passphrase, kdfParams)
if err != nil {
return fmt.Errorf("error while encrypting out-key: %s", err)
}
err = ioutil.WriteFile(*cf.outKeyPath, b, 0600)
} else {
err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalSigningPrivateKey(curve, rawPriv), 0600)
b = cert.MarshalSigningPrivateKey(curve, rawPriv)
}
err = os.WriteFile(*cf.outKeyPath, b, 0600)
if err != nil {
return fmt.Errorf("error while writing out-key: %s", err)
}
b, err := nc.MarshalToPEM()
b, err = nc.MarshalToPEM()
if err != nil {
return fmt.Errorf("error while marshalling certificate: %s", err)
}
err = ioutil.WriteFile(*cf.outCertPath, b, 0600)
err = os.WriteFile(*cf.outCertPath, b, 0600)
if err != nil {
return fmt.Errorf("error while writing out-crt: %s", err)
}
@@ -244,7 +243,7 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
return fmt.Errorf("error while generating qr code: %s", err)
}
err = ioutil.WriteFile(*cf.outQRPath, b, 0600)
err = os.WriteFile(*cf.outQRPath, b, 0600)
if err != nil {
return fmt.Errorf("error while writing out-qr: %s", err)
}

View File

@@ -7,7 +7,6 @@ import (
"bytes"
"encoding/pem"
"errors"
"io/ioutil"
"os"
"strings"
"testing"
@@ -107,7 +106,7 @@ func Test_ca(t *testing.T) {
assert.Equal(t, "", eb.String())
// create temp key file
keyF, err := ioutil.TempFile("", "test.key")
keyF, err := os.CreateTemp("", "test.key")
assert.Nil(t, err)
os.Remove(keyF.Name())
@@ -120,7 +119,7 @@ func Test_ca(t *testing.T) {
assert.Equal(t, "", eb.String())
// create temp cert file
crtF, err := ioutil.TempFile("", "test.crt")
crtF, err := os.CreateTemp("", "test.crt")
assert.Nil(t, err)
os.Remove(crtF.Name())
os.Remove(keyF.Name())
@@ -134,13 +133,13 @@ func Test_ca(t *testing.T) {
assert.Equal(t, "", eb.String())
// read cert and key files
rb, _ := ioutil.ReadFile(keyF.Name())
rb, _ := os.ReadFile(keyF.Name())
lKey, b, err := cert.UnmarshalEd25519PrivateKey(rb)
assert.Len(t, b, 0)
assert.Nil(t, err)
assert.Len(t, lKey, 64)
rb, _ = ioutil.ReadFile(crtF.Name())
rb, _ = os.ReadFile(crtF.Name())
lCrt, b, err := cert.UnmarshalNebulaCertificateFromPEM(rb)
assert.Len(t, b, 0)
assert.Nil(t, err)
@@ -166,7 +165,7 @@ func Test_ca(t *testing.T) {
assert.Equal(t, "", eb.String())
// read encrypted key file and verify default params
rb, _ = ioutil.ReadFile(keyF.Name())
rb, _ = os.ReadFile(keyF.Name())
k, _ := pem.Decode(rb)
ned, err := cert.UnmarshalNebulaEncryptedData(k.Bytes)
assert.Nil(t, err)

View File

@@ -4,7 +4,6 @@ import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"github.com/slackhq/nebula/cert"
@@ -54,12 +53,12 @@ func keygen(args []string, out io.Writer, errOut io.Writer) error {
return fmt.Errorf("invalid curve: %s", *cf.curve)
}
err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
err = os.WriteFile(*cf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
if err != nil {
return fmt.Errorf("error while writing out-key: %s", err)
}
err = ioutil.WriteFile(*cf.outPubPath, cert.MarshalPublicKey(curve, pub), 0600)
err = os.WriteFile(*cf.outPubPath, cert.MarshalPublicKey(curve, pub), 0600)
if err != nil {
return fmt.Errorf("error while writing out-pub: %s", err)
}

View File

@@ -2,7 +2,6 @@ package main
import (
"bytes"
"io/ioutil"
"os"
"testing"
@@ -54,7 +53,7 @@ func Test_keygen(t *testing.T) {
assert.Equal(t, "", eb.String())
// create temp key file
keyF, err := ioutil.TempFile("", "test.key")
keyF, err := os.CreateTemp("", "test.key")
assert.Nil(t, err)
defer os.Remove(keyF.Name())
@@ -67,7 +66,7 @@ func Test_keygen(t *testing.T) {
assert.Equal(t, "", eb.String())
// create temp pub file
pubF, err := ioutil.TempFile("", "test.pub")
pubF, err := os.CreateTemp("", "test.pub")
assert.Nil(t, err)
defer os.Remove(pubF.Name())
@@ -80,13 +79,13 @@ func Test_keygen(t *testing.T) {
assert.Equal(t, "", eb.String())
// read cert and key files
rb, _ := ioutil.ReadFile(keyF.Name())
rb, _ := os.ReadFile(keyF.Name())
lKey, b, err := cert.UnmarshalX25519PrivateKey(rb)
assert.Len(t, b, 0)
assert.Nil(t, err)
assert.Len(t, lKey, 32)
rb, _ = ioutil.ReadFile(pubF.Name())
rb, _ = os.ReadFile(pubF.Name())
lPub, b, err := cert.UnmarshalX25519PublicKey(rb)
assert.Len(t, b, 0)
assert.Nil(t, err)

View File

@@ -5,7 +5,6 @@ import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
@@ -41,7 +40,7 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
return err
}
rawCert, err := ioutil.ReadFile(*pf.path)
rawCert, err := os.ReadFile(*pf.path)
if err != nil {
return fmt.Errorf("unable to read cert; %s", err)
}
@@ -87,7 +86,7 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
return fmt.Errorf("error while generating qr code: %s", err)
}
err = ioutil.WriteFile(*pf.outQRPath, b, 0600)
err = os.WriteFile(*pf.outQRPath, b, 0600)
if err != nil {
return fmt.Errorf("error while writing out-qr: %s", err)
}

View File

@@ -2,7 +2,6 @@ package main
import (
"bytes"
"io/ioutil"
"os"
"testing"
"time"
@@ -54,7 +53,7 @@ func Test_printCert(t *testing.T) {
// invalid cert at path
ob.Reset()
eb.Reset()
tf, err := ioutil.TempFile("", "print-cert")
tf, err := os.CreateTemp("", "print-cert")
assert.Nil(t, err)
defer os.Remove(tf.Name())

View File

@@ -6,7 +6,6 @@ import (
"flag"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"strings"
@@ -73,7 +72,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
return newHelpErrorf("cannot set both -in-pub and -out-key")
}
rawCAKey, err := ioutil.ReadFile(*sf.caKeyPath)
rawCAKey, err := os.ReadFile(*sf.caKeyPath)
if err != nil {
return fmt.Errorf("error while reading ca-key: %s", err)
}
@@ -112,7 +111,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
return fmt.Errorf("error while parsing ca-key: %s", err)
}
rawCACert, err := ioutil.ReadFile(*sf.caCertPath)
rawCACert, err := os.ReadFile(*sf.caCertPath)
if err != nil {
return fmt.Errorf("error while reading ca-crt: %s", err)
}
@@ -178,7 +177,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
var pub, rawPriv []byte
if *sf.inPubPath != "" {
rawPub, err := ioutil.ReadFile(*sf.inPubPath)
rawPub, err := os.ReadFile(*sf.inPubPath)
if err != nil {
return fmt.Errorf("error while reading in-pub: %s", err)
}
@@ -235,7 +234,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
return fmt.Errorf("refusing to overwrite existing key: %s", *sf.outKeyPath)
}
err = ioutil.WriteFile(*sf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
err = os.WriteFile(*sf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
if err != nil {
return fmt.Errorf("error while writing out-key: %s", err)
}
@@ -246,7 +245,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
return fmt.Errorf("error while marshalling certificate: %s", err)
}
err = ioutil.WriteFile(*sf.outCertPath, b, 0600)
err = os.WriteFile(*sf.outCertPath, b, 0600)
if err != nil {
return fmt.Errorf("error while writing out-crt: %s", err)
}
@@ -257,7 +256,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
return fmt.Errorf("error while generating qr code: %s", err)
}
err = ioutil.WriteFile(*sf.outQRPath, b, 0600)
err = os.WriteFile(*sf.outQRPath, b, 0600)
if err != nil {
return fmt.Errorf("error while writing out-qr: %s", err)
}

View File

@@ -7,7 +7,6 @@ import (
"bytes"
"crypto/rand"
"errors"
"io/ioutil"
"os"
"testing"
"time"
@@ -104,7 +103,7 @@ func Test_signCert(t *testing.T) {
// failed to unmarshal key
ob.Reset()
eb.Reset()
caKeyF, err := ioutil.TempFile("", "sign-cert.key")
caKeyF, err := os.CreateTemp("", "sign-cert.key")
assert.Nil(t, err)
defer os.Remove(caKeyF.Name())
@@ -128,7 +127,7 @@ func Test_signCert(t *testing.T) {
// failed to unmarshal cert
ob.Reset()
eb.Reset()
caCrtF, err := ioutil.TempFile("", "sign-cert.crt")
caCrtF, err := os.CreateTemp("", "sign-cert.crt")
assert.Nil(t, err)
defer os.Remove(caCrtF.Name())
@@ -159,7 +158,7 @@ func Test_signCert(t *testing.T) {
// failed to unmarshal pub
ob.Reset()
eb.Reset()
inPubF, err := ioutil.TempFile("", "in.pub")
inPubF, err := os.CreateTemp("", "in.pub")
assert.Nil(t, err)
defer os.Remove(inPubF.Name())
@@ -206,7 +205,7 @@ func Test_signCert(t *testing.T) {
// mismatched ca key
_, caPriv2, _ := ed25519.GenerateKey(rand.Reader)
caKeyF2, err := ioutil.TempFile("", "sign-cert-2.key")
caKeyF2, err := os.CreateTemp("", "sign-cert-2.key")
assert.Nil(t, err)
defer os.Remove(caKeyF2.Name())
caKeyF2.Write(cert.MarshalEd25519PrivateKey(caPriv2))
@@ -227,7 +226,7 @@ func Test_signCert(t *testing.T) {
assert.Empty(t, eb.String())
// create temp key file
keyF, err := ioutil.TempFile("", "test.key")
keyF, err := os.CreateTemp("", "test.key")
assert.Nil(t, err)
os.Remove(keyF.Name())
@@ -241,7 +240,7 @@ func Test_signCert(t *testing.T) {
os.Remove(keyF.Name())
// create temp cert file
crtF, err := ioutil.TempFile("", "test.crt")
crtF, err := os.CreateTemp("", "test.crt")
assert.Nil(t, err)
os.Remove(crtF.Name())
@@ -254,13 +253,13 @@ func Test_signCert(t *testing.T) {
assert.Empty(t, eb.String())
// read cert and key files
rb, _ := ioutil.ReadFile(keyF.Name())
rb, _ := os.ReadFile(keyF.Name())
lKey, b, err := cert.UnmarshalX25519PrivateKey(rb)
assert.Len(t, b, 0)
assert.Nil(t, err)
assert.Len(t, lKey, 32)
rb, _ = ioutil.ReadFile(crtF.Name())
rb, _ = os.ReadFile(crtF.Name())
lCrt, b, err := cert.UnmarshalNebulaCertificateFromPEM(rb)
assert.Len(t, b, 0)
assert.Nil(t, err)
@@ -296,7 +295,7 @@ func Test_signCert(t *testing.T) {
assert.Empty(t, eb.String())
// read cert file and check pub key matches in-pub
rb, _ = ioutil.ReadFile(crtF.Name())
rb, _ = os.ReadFile(crtF.Name())
lCrt, b, err = cert.UnmarshalNebulaCertificateFromPEM(rb)
assert.Len(t, b, 0)
assert.Nil(t, err)
@@ -348,11 +347,11 @@ func Test_signCert(t *testing.T) {
ob.Reset()
eb.Reset()
caKeyF, err = ioutil.TempFile("", "sign-cert.key")
caKeyF, err = os.CreateTemp("", "sign-cert.key")
assert.Nil(t, err)
defer os.Remove(caKeyF.Name())
caCrtF, err = ioutil.TempFile("", "sign-cert.crt")
caCrtF, err = os.CreateTemp("", "sign-cert.crt")
assert.Nil(t, err)
defer os.Remove(caCrtF.Name())

View File

@@ -4,7 +4,6 @@ import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"time"
@@ -40,7 +39,7 @@ func verify(args []string, out io.Writer, errOut io.Writer) error {
return err
}
rawCACert, err := ioutil.ReadFile(*vf.caPath)
rawCACert, err := os.ReadFile(*vf.caPath)
if err != nil {
return fmt.Errorf("error while reading ca: %s", err)
}
@@ -57,7 +56,7 @@ func verify(args []string, out io.Writer, errOut io.Writer) error {
}
}
rawCert, err := ioutil.ReadFile(*vf.certPath)
rawCert, err := os.ReadFile(*vf.certPath)
if err != nil {
return fmt.Errorf("unable to read crt; %s", err)
}

View File

@@ -3,7 +3,6 @@ package main
import (
"bytes"
"crypto/rand"
"io/ioutil"
"os"
"testing"
"time"
@@ -56,7 +55,7 @@ func Test_verify(t *testing.T) {
// invalid ca at path
ob.Reset()
eb.Reset()
caFile, err := ioutil.TempFile("", "verify-ca")
caFile, err := os.CreateTemp("", "verify-ca")
assert.Nil(t, err)
defer os.Remove(caFile.Name())
@@ -92,7 +91,7 @@ func Test_verify(t *testing.T) {
// invalid crt at path
ob.Reset()
eb.Reset()
certFile, err := ioutil.TempFile("", "verify-cert")
certFile, err := os.CreateTemp("", "verify-cert")
assert.Nil(t, err)
defer os.Remove(certFile.Name())

View File

@@ -59,13 +59,8 @@ func main() {
}
ctrl, err := nebula.Main(c, *configTest, Build, l, nil)
switch v := err.(type) {
case util.ContextualError:
v.Log(l)
os.Exit(1)
case error:
l.WithError(err).Error("Failed to start")
if err != nil {
util.LogWithContextIfNeeded("Failed to start", err, l)
os.Exit(1)
}

View File

@@ -53,18 +53,14 @@ func main() {
}
ctrl, err := nebula.Main(c, *configTest, Build, l, nil)
switch v := err.(type) {
case util.ContextualError:
v.Log(l)
os.Exit(1)
case error:
l.WithError(err).Error("Failed to start")
if err != nil {
util.LogWithContextIfNeeded("Failed to start", err, l)
os.Exit(1)
}
if !*configTest {
ctrl.Start()
notifyReady(l)
ctrl.ShutdownBlock()
}

View File

@@ -0,0 +1,42 @@
package main
import (
"net"
"os"
"time"
"github.com/sirupsen/logrus"
)
// SdNotifyReady tells systemd the service is ready and dependent services can now be started
// https://www.freedesktop.org/software/systemd/man/sd_notify.html
// https://www.freedesktop.org/software/systemd/man/systemd.service.html
const SdNotifyReady = "READY=1"
func notifyReady(l *logrus.Logger) {
sockName := os.Getenv("NOTIFY_SOCKET")
if sockName == "" {
l.Debugln("NOTIFY_SOCKET systemd env var not set, not sending ready signal")
return
}
conn, err := net.DialTimeout("unixgram", sockName, time.Second)
if err != nil {
l.WithError(err).Error("failed to connect to systemd notification socket")
return
}
defer conn.Close()
err = conn.SetWriteDeadline(time.Now().Add(time.Second))
if err != nil {
l.WithError(err).Error("failed to set the write deadline for the systemd notification socket")
return
}
if _, err = conn.Write([]byte(SdNotifyReady)); err != nil {
l.WithError(err).Error("failed to signal the systemd notification socket")
return
}
l.Debugln("notified systemd the service is ready")
}

View File

@@ -0,0 +1,10 @@
//go:build !linux
// +build !linux
package main
import "github.com/sirupsen/logrus"
func notifyReady(_ *logrus.Logger) {
// No init service to notify
}

View File

@@ -4,7 +4,7 @@ import (
"context"
"errors"
"fmt"
"io/ioutil"
"math"
"os"
"os/signal"
"path/filepath"
@@ -15,7 +15,7 @@ import (
"syscall"
"time"
"github.com/imdario/mergo"
"dario.cat/mergo"
"github.com/sirupsen/logrus"
"gopkg.in/yaml.v2"
)
@@ -121,6 +121,10 @@ func (c *C) HasChanged(k string) bool {
// CatchHUP will listen for the HUP signal in a go routine and reload all configs found in the
// original path provided to Load. The old settings are shallow copied for change detection after the reload.
func (c *C) CatchHUP(ctx context.Context) {
if c.path == "" {
return
}
ch := make(chan os.Signal, 1)
signal.Notify(ch, syscall.SIGHUP)
@@ -236,6 +240,15 @@ func (c *C) GetInt(k string, d int) int {
return v
}
// GetUint32 will get the uint32 for k or return the default d if not found or invalid
func (c *C) GetUint32(k string, d uint32) uint32 {
r := c.GetInt(k, int(d))
if uint64(r) > uint64(math.MaxUint32) {
return d
}
return uint32(r)
}
// GetBool will get the bool for k or return the default d if not found or invalid
func (c *C) GetBool(k string, d bool) bool {
r := strings.ToLower(c.GetString(k, fmt.Sprintf("%v", d)))
@@ -348,7 +361,7 @@ func (c *C) parse() error {
var m map[interface{}]interface{}
for _, path := range c.files {
b, err := ioutil.ReadFile(path)
b, err := os.ReadFile(path)
if err != nil {
return err
}

View File

@@ -1,13 +1,12 @@
package config
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
"github.com/imdario/mergo"
"dario.cat/mergo"
"github.com/slackhq/nebula/test"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -16,10 +15,10 @@ import (
func TestConfig_Load(t *testing.T) {
l := test.NewLogger()
dir, err := ioutil.TempDir("", "config-test")
dir, err := os.MkdirTemp("", "config-test")
// invalid yaml
c := NewC(l)
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644)
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644)
assert.EqualError(t, c.Load(dir), "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into map[interface {}]interface {}")
// simple multi config merge
@@ -29,8 +28,8 @@ func TestConfig_Load(t *testing.T) {
assert.Nil(t, err)
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
ioutil.WriteFile(filepath.Join(dir, "02.yml"), []byte("outer:\n inner: override\nnew: hi"), 0644)
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
os.WriteFile(filepath.Join(dir, "02.yml"), []byte("outer:\n inner: override\nnew: hi"), 0644)
assert.Nil(t, c.Load(dir))
expected := map[interface{}]interface{}{
"outer": map[interface{}]interface{}{
@@ -120,9 +119,9 @@ func TestConfig_HasChanged(t *testing.T) {
func TestConfig_ReloadConfig(t *testing.T) {
l := test.NewLogger()
done := make(chan bool, 1)
dir, err := ioutil.TempDir("", "config-test")
dir, err := os.MkdirTemp("", "config-test")
assert.Nil(t, err)
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
c := NewC(l)
assert.Nil(t, c.Load(dir))
@@ -131,7 +130,7 @@ func TestConfig_ReloadConfig(t *testing.T) {
assert.False(t, c.HasChanged("outer"))
assert.False(t, c.HasChanged(""))
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: ho"), 0644)
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: ho"), 0644)
c.RegisterReloadCallback(func(c *C) {
done <- true

View File

@@ -23,6 +23,7 @@ const (
swapPrimary trafficDecision = 3
migrateRelays trafficDecision = 4
tryRehandshake trafficDecision = 5
sendTestPacket trafficDecision = 6
)
type connectionManager struct {
@@ -176,7 +177,7 @@ func (n *connectionManager) Run(ctx context.Context) {
}
func (n *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
decision, hostinfo, primary := n.makeTrafficDecision(localIndex, p, nb, out, now)
decision, hostinfo, primary := n.makeTrafficDecision(localIndex, now)
switch decision {
case deleteTunnel:
@@ -197,6 +198,9 @@ func (n *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte,
case tryRehandshake:
n.tryRehandshake(hostinfo)
case sendTestPacket:
n.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
}
n.resetRelayTrafficCheck(hostinfo)
@@ -231,7 +235,7 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
index = existing.LocalIndex
switch r.Type {
case TerminalType:
relayFrom = newhostinfo.vpnIp
relayFrom = n.intf.myVpnIp
relayTo = existing.PeerIp
case ForwardingType:
relayFrom = existing.PeerIp
@@ -256,7 +260,7 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
}
switch r.Type {
case TerminalType:
relayFrom = newhostinfo.vpnIp
relayFrom = n.intf.myVpnIp
relayTo = r.PeerIp
case ForwardingType:
relayFrom = r.PeerIp
@@ -289,7 +293,7 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
}
}
func (n *connectionManager) makeTrafficDecision(localIndex uint32, p, nb, out []byte, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
n.hostMap.RLock()
defer n.hostMap.RUnlock()
@@ -356,6 +360,7 @@ func (n *connectionManager) makeTrafficDecision(localIndex uint32, p, nb, out []
return deleteTunnel, hostinfo, nil
}
decision := doNothing
if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo {
if !outTraffic {
// If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel.
@@ -380,7 +385,7 @@ func (n *connectionManager) makeTrafficDecision(localIndex uint32, p, nb, out []
}
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
n.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
decision = sendTestPacket
} else {
if n.l.Level >= logrus.DebugLevel {
@@ -390,7 +395,7 @@ func (n *connectionManager) makeTrafficDecision(localIndex uint32, p, nb, out []
n.pendingDeletion[hostinfo.localIndexId] = struct{}{}
n.trafficTimer.Add(hostinfo.localIndexId, n.pendingDeletionInterval)
return doNothing, nil, nil
return decision, hostinfo, nil
}
func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
@@ -405,8 +410,8 @@ func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
return false
}
certState := n.intf.certState.Load()
return bytes.Equal(current.ConnectionState.certState.certificate.Signature, certState.certificate.Signature)
certState := n.intf.pki.GetCertState()
return bytes.Equal(current.ConnectionState.myCert.Signature, certState.Certificate.Signature)
}
func (n *connectionManager) swapPrimary(current, primary *HostInfo) {
@@ -427,12 +432,12 @@ func (n *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostIn
return false
}
valid, err := remoteCert.VerifyWithCache(now, n.intf.caPool)
valid, err := remoteCert.VerifyWithCache(now, n.intf.pki.GetCAPool())
if valid {
return false
}
if !n.intf.disconnectInvalid && err != cert.ErrBlockListed {
if !n.intf.disconnectInvalid.Load() && err != cert.ErrBlockListed {
// Block listed certificates should always be disconnected
return false
}
@@ -464,8 +469,8 @@ func (n *connectionManager) sendPunch(hostinfo *HostInfo) {
}
func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) {
certState := n.intf.certState.Load()
if bytes.Equal(hostinfo.ConnectionState.certState.certificate.Signature, certState.certificate.Signature) {
certState := n.intf.pki.GetCertState()
if bytes.Equal(hostinfo.ConnectionState.myCert.Signature, certState.Certificate.Signature) {
return
}
@@ -473,18 +478,5 @@ func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) {
WithField("reason", "local certificate is not current").
Info("Re-handshaking with remote")
//TODO: this is copied from getOrHandshake to keep the extra checks out of the hot path, figure it out
newHostinfo := n.intf.handshakeManager.AddVpnIp(hostinfo.vpnIp, n.intf.initHostInfo)
if !newHostinfo.HandshakeReady {
ixHandshakeStage0(n.intf, newHostinfo.vpnIp, newHostinfo)
}
//If this is a static host, we don't need to wait for the HostQueryReply
//We can trigger the handshake right now
if _, ok := n.intf.lightHouse.GetStaticHostList()[hostinfo.vpnIp]; ok {
select {
case n.intf.handshakeManager.trigger <- hostinfo.vpnIp:
default:
}
}
n.intf.handshakeManager.StartHandshake(hostinfo.vpnIp, nil)
}

View File

@@ -21,8 +21,9 @@ var vpnIp iputil.VpnIp
func newTestLighthouse() *LightHouse {
lh := &LightHouse{
l: test.NewLogger(),
addrMap: map[iputil.VpnIp]*RemoteList{},
l: test.NewLogger(),
addrMap: map[iputil.VpnIp]*RemoteList{},
queryChan: make(chan iputil.VpnIp, 10),
}
lighthouses := map[iputil.VpnIp]struct{}{}
staticList := map[iputil.VpnIp]struct{}{}
@@ -42,25 +43,26 @@ func Test_NewConnectionManagerTest(t *testing.T) {
preferredRanges := []*net.IPNet{localrange}
// Very incomplete mock objects
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
hostMap := NewHostMap(l, vpncidr, preferredRanges)
cs := &CertState{
rawCertificate: []byte{},
privateKey: []byte{},
certificate: &cert.NebulaCertificate{},
rawCertificateNoKey: []byte{},
RawCertificate: []byte{},
PrivateKey: []byte{},
Certificate: &cert.NebulaCertificate{},
RawCertificateNoKey: []byte{},
}
lh := newTestLighthouse()
ifce := &Interface{
hostMap: hostMap,
inside: &test.NoopTun{},
outside: &udp.Conn{},
outside: &udp.NoopConn{},
firewall: &Firewall{},
lightHouse: lh,
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
pki: &PKI{},
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
l: l,
}
ifce.certState.Store(cs)
ifce.pki.cs.Store(cs)
// Create manager
ctx, cancel := context.WithCancel(context.Background())
@@ -78,8 +80,8 @@ func Test_NewConnectionManagerTest(t *testing.T) {
remoteIndexId: 9901,
}
hostinfo.ConnectionState = &ConnectionState{
certState: cs,
H: &noise.HandshakeState{},
myCert: &cert.NebulaCertificate{},
H: &noise.HandshakeState{},
}
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
@@ -121,25 +123,26 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
preferredRanges := []*net.IPNet{localrange}
// Very incomplete mock objects
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
hostMap := NewHostMap(l, vpncidr, preferredRanges)
cs := &CertState{
rawCertificate: []byte{},
privateKey: []byte{},
certificate: &cert.NebulaCertificate{},
rawCertificateNoKey: []byte{},
RawCertificate: []byte{},
PrivateKey: []byte{},
Certificate: &cert.NebulaCertificate{},
RawCertificateNoKey: []byte{},
}
lh := newTestLighthouse()
ifce := &Interface{
hostMap: hostMap,
inside: &test.NoopTun{},
outside: &udp.Conn{},
outside: &udp.NoopConn{},
firewall: &Firewall{},
lightHouse: lh,
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
pki: &PKI{},
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
l: l,
}
ifce.certState.Store(cs)
ifce.pki.cs.Store(cs)
// Create manager
ctx, cancel := context.WithCancel(context.Background())
@@ -157,8 +160,8 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
remoteIndexId: 9901,
}
hostinfo.ConnectionState = &ConnectionState{
certState: cs,
H: &noise.HandshakeState{},
myCert: &cert.NebulaCertificate{},
H: &noise.HandshakeState{},
}
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
@@ -207,7 +210,7 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
preferredRanges := []*net.IPNet{localrange}
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
hostMap := NewHostMap(l, vpncidr, preferredRanges)
// Generate keys for CA and peer's cert.
pubCA, privCA, _ := ed25519.GenerateKey(rand.Reader)
@@ -220,7 +223,8 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
PublicKey: pubCA,
},
}
caCert.Sign(cert.Curve_CURVE25519, privCA)
assert.NoError(t, caCert.Sign(cert.Curve_CURVE25519, privCA))
ncp := &cert.NebulaCAPool{
CAs: cert.NewCAPool().CAs,
}
@@ -239,28 +243,29 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
Issuer: "ca",
},
}
peerCert.Sign(cert.Curve_CURVE25519, privCA)
assert.NoError(t, peerCert.Sign(cert.Curve_CURVE25519, privCA))
cs := &CertState{
rawCertificate: []byte{},
privateKey: []byte{},
certificate: &cert.NebulaCertificate{},
rawCertificateNoKey: []byte{},
RawCertificate: []byte{},
PrivateKey: []byte{},
Certificate: &cert.NebulaCertificate{},
RawCertificateNoKey: []byte{},
}
lh := newTestLighthouse()
ifce := &Interface{
hostMap: hostMap,
inside: &test.NoopTun{},
outside: &udp.Conn{},
firewall: &Firewall{},
lightHouse: lh,
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
l: l,
disconnectInvalid: true,
caPool: ncp,
hostMap: hostMap,
inside: &test.NoopTun{},
outside: &udp.NoopConn{},
firewall: &Firewall{},
lightHouse: lh,
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
l: l,
pki: &PKI{},
}
ifce.certState.Store(cs)
ifce.pki.cs.Store(cs)
ifce.pki.caPool.Store(ncp)
ifce.disconnectInvalid.Store(true)
// Create manager
ctx, cancel := context.WithCancel(context.Background())
@@ -268,12 +273,16 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
punchy := NewPunchyFromConfig(l, config.NewC(l))
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
ifce.connectionManager = nc
hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil)
hostinfo.ConnectionState = &ConnectionState{
certState: cs,
peerCert: &peerCert,
H: &noise.HandshakeState{},
hostinfo := &HostInfo{
vpnIp: vpnIp,
ConnectionState: &ConnectionState{
myCert: &cert.NebulaCertificate{},
peerCert: &peerCert,
H: &noise.HandshakeState{},
},
}
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
// Move ahead 45s.
// Check if to disconnect with invalid certificate.

View File

@@ -18,35 +18,34 @@ type ConnectionState struct {
eKey *NebulaCipherState
dKey *NebulaCipherState
H *noise.HandshakeState
certState *CertState
myCert *cert.NebulaCertificate
peerCert *cert.NebulaCertificate
initiator bool
messageCounter atomic.Uint64
window *Bits
queueLock sync.Mutex
writeLock sync.Mutex
ready bool
}
func (f *Interface) newConnectionState(l *logrus.Logger, initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState {
func NewConnectionState(l *logrus.Logger, cipher string, certState *CertState, initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState {
var dhFunc noise.DHFunc
curCertState := f.certState.Load()
switch curCertState.certificate.Details.Curve {
switch certState.Certificate.Details.Curve {
case cert.Curve_CURVE25519:
dhFunc = noise.DH25519
case cert.Curve_P256:
dhFunc = noiseutil.DHP256
default:
l.Errorf("invalid curve: %s", curCertState.certificate.Details.Curve)
l.Errorf("invalid curve: %s", certState.Certificate.Details.Curve)
return nil
}
cs := noise.NewCipherSuite(dhFunc, noiseutil.CipherAESGCM, noise.HashSHA256)
if f.cipher == "chachapoly" {
var cs noise.CipherSuite
if cipher == "chachapoly" {
cs = noise.NewCipherSuite(dhFunc, noise.CipherChaChaPoly, noise.HashSHA256)
} else {
cs = noise.NewCipherSuite(dhFunc, noiseutil.CipherAESGCM, noise.HashSHA256)
}
static := noise.DHKey{Private: curCertState.privateKey, Public: curCertState.publicKey}
static := noise.DHKey{Private: certState.PrivateKey, Public: certState.PublicKey}
b := NewBits(ReplayWindow)
// Clear out bit 0, we never transmit it and we don't want it showing as packet loss
@@ -71,8 +70,7 @@ func (f *Interface) newConnectionState(l *logrus.Logger, initiator bool, pattern
H: hs,
initiator: initiator,
window: b,
ready: false,
certState: curCertState,
myCert: certState.Certificate,
}
return ci
@@ -83,6 +81,5 @@ func (cs *ConnectionState) MarshalJSON() ([]byte, error) {
"certificate": cs.peerCert,
"initiator": cs.initiator,
"message_counter": cs.messageCounter.Load(),
"ready": cs.ready,
})
}

View File

@@ -11,19 +11,31 @@ import (
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/overlay"
"github.com/slackhq/nebula/udp"
)
// Every interaction here needs to take extra care to copy memory and not return or use arguments "as is" when touching
// core. This means copying IP objects, slices, de-referencing pointers and taking the actual value, etc
type controlEach func(h *HostInfo)
type controlHostLister interface {
QueryVpnIp(vpnIp iputil.VpnIp) *HostInfo
ForEachIndex(each controlEach)
ForEachVpnIp(each controlEach)
GetPreferredRanges() []*net.IPNet
}
type Control struct {
f *Interface
l *logrus.Logger
cancel context.CancelFunc
sshStart func()
statsStart func()
dnsStart func()
f *Interface
l *logrus.Logger
ctx context.Context
cancel context.CancelFunc
sshStart func()
statsStart func()
dnsStart func()
lighthouseStart func()
}
type ControlHostInfo struct {
@@ -31,7 +43,6 @@ type ControlHostInfo struct {
LocalIndex uint32 `json:"localIndex"`
RemoteIndex uint32 `json:"remoteIndex"`
RemoteAddrs []*udp.Addr `json:"remoteAddrs"`
CachedPackets int `json:"cachedPackets"`
Cert *cert.NebulaCertificate `json:"cert"`
MessageCounter uint64 `json:"messageCounter"`
CurrentRemote *udp.Addr `json:"currentRemote"`
@@ -54,12 +65,19 @@ func (c *Control) Start() {
if c.dnsStart != nil {
go c.dnsStart()
}
if c.lighthouseStart != nil {
c.lighthouseStart()
}
// Start reading packets.
c.f.run()
}
// Stop signals nebula to shutdown, returns after the shutdown is complete
func (c *Control) Context() context.Context {
return c.ctx
}
// Stop signals nebula to shutdown and close all tunnels, returns after the shutdown is complete
func (c *Control) Stop() {
// Stop the handshakeManager (and other services), to prevent new tunnels from
// being created while we're shutting them all down.
@@ -89,7 +107,7 @@ func (c *Control) RebindUDPServer() {
_ = c.f.outside.Rebind()
// Trigger a lighthouse update, useful for mobile clients that should have an update interval of 0
c.f.lightHouse.SendUpdate(c.f)
c.f.lightHouse.SendUpdate()
// Let the main interface know that we rebound so that underlying tunnels know to trigger punches from their remotes
c.f.rebindCount++
@@ -98,7 +116,7 @@ func (c *Control) RebindUDPServer() {
// ListHostmapHosts returns details about the actual or pending (handshaking) hostmap by vpn ip
func (c *Control) ListHostmapHosts(pendingMap bool) []ControlHostInfo {
if pendingMap {
return listHostMapHosts(c.f.handshakeManager.pendingHostMap)
return listHostMapHosts(c.f.handshakeManager)
} else {
return listHostMapHosts(c.f.hostMap)
}
@@ -107,7 +125,7 @@ func (c *Control) ListHostmapHosts(pendingMap bool) []ControlHostInfo {
// ListHostmapIndexes returns details about the actual or pending (handshaking) hostmap by local index id
func (c *Control) ListHostmapIndexes(pendingMap bool) []ControlHostInfo {
if pendingMap {
return listHostMapIndexes(c.f.handshakeManager.pendingHostMap)
return listHostMapIndexes(c.f.handshakeManager)
} else {
return listHostMapIndexes(c.f.hostMap)
}
@@ -115,15 +133,15 @@ func (c *Control) ListHostmapIndexes(pendingMap bool) []ControlHostInfo {
// GetHostInfoByVpnIp returns a single tunnels hostInfo, or nil if not found
func (c *Control) GetHostInfoByVpnIp(vpnIp iputil.VpnIp, pending bool) *ControlHostInfo {
var hm *HostMap
var hl controlHostLister
if pending {
hm = c.f.handshakeManager.pendingHostMap
hl = c.f.handshakeManager
} else {
hm = c.f.hostMap
hl = c.f.hostMap
}
h, err := hm.QueryVpnIp(vpnIp)
if err != nil {
h := hl.QueryVpnIp(vpnIp)
if h == nil {
return nil
}
@@ -133,8 +151,8 @@ func (c *Control) GetHostInfoByVpnIp(vpnIp iputil.VpnIp, pending bool) *ControlH
// SetRemoteForTunnel forces a tunnel to use a specific remote
func (c *Control) SetRemoteForTunnel(vpnIp iputil.VpnIp, addr udp.Addr) *ControlHostInfo {
hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp)
if err != nil {
hostInfo := c.f.hostMap.QueryVpnIp(vpnIp)
if hostInfo == nil {
return nil
}
@@ -145,8 +163,8 @@ func (c *Control) SetRemoteForTunnel(vpnIp iputil.VpnIp, addr udp.Addr) *Control
// CloseTunnel closes a fully established tunnel. If localOnly is false it will notify the remote end as well.
func (c *Control) CloseTunnel(vpnIp iputil.VpnIp, localOnly bool) bool {
hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp)
if err != nil {
hostInfo := c.f.hostMap.QueryVpnIp(vpnIp)
if hostInfo == nil {
return false
}
@@ -214,6 +232,10 @@ func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
return
}
func (c *Control) Device() overlay.Device {
return c.f.inside
}
func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
chi := ControlHostInfo{
@@ -221,7 +243,6 @@ func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
LocalIndex: h.localIndexId,
RemoteIndex: h.remoteIndexId,
RemoteAddrs: h.remotes.CopyAddrs(preferredRanges),
CachedPackets: len(h.packetStore),
CurrentRelaysToMe: h.relayState.CopyRelayIps(),
CurrentRelaysThroughMe: h.relayState.CopyRelayForIps(),
}
@@ -241,28 +262,20 @@ func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
return chi
}
func listHostMapHosts(hm *HostMap) []ControlHostInfo {
hm.RLock()
hosts := make([]ControlHostInfo, len(hm.Hosts))
i := 0
for _, v := range hm.Hosts {
hosts[i] = copyHostInfo(v, hm.preferredRanges)
i++
}
hm.RUnlock()
func listHostMapHosts(hl controlHostLister) []ControlHostInfo {
hosts := make([]ControlHostInfo, 0)
pr := hl.GetPreferredRanges()
hl.ForEachVpnIp(func(hostinfo *HostInfo) {
hosts = append(hosts, copyHostInfo(hostinfo, pr))
})
return hosts
}
func listHostMapIndexes(hm *HostMap) []ControlHostInfo {
hm.RLock()
hosts := make([]ControlHostInfo, len(hm.Indexes))
i := 0
for _, v := range hm.Indexes {
hosts[i] = copyHostInfo(v, hm.preferredRanges)
i++
}
hm.RUnlock()
func listHostMapIndexes(hl controlHostLister) []ControlHostInfo {
hosts := make([]ControlHostInfo, 0)
pr := hl.GetPreferredRanges()
hl.ForEachIndex(func(hostinfo *HostInfo) {
hosts = append(hosts, copyHostInfo(hostinfo, pr))
})
return hosts
}

View File

@@ -18,7 +18,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
l := test.NewLogger()
// Special care must be taken to re-use all objects provided to the hostmap and certificate in the expectedInfo object
// To properly ensure we are not exposing core memory to the caller
hm := NewHostMap(l, "test", &net.IPNet{}, make([]*net.IPNet, 0))
hm := NewHostMap(l, &net.IPNet{}, make([]*net.IPNet, 0))
remote1 := udp.NewAddr(net.ParseIP("0.0.0.100"), 4444)
remote2 := udp.NewAddr(net.ParseIP("1:2:3:4:5:6:7:8"), 4444)
ipNet := net.IPNet{
@@ -50,7 +50,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
remotes := NewRemoteList(nil)
remotes.unlockedPrependV4(0, NewIp4AndPort(remote1.IP, uint32(remote1.Port)))
remotes.unlockedPrependV6(0, NewIp6AndPort(remote2.IP, uint32(remote2.Port)))
hm.Add(iputil.Ip2VpnIp(ipNet.IP), &HostInfo{
hm.unlockedAddHostInfo(&HostInfo{
remote: remote1,
remotes: remotes,
ConnectionState: &ConnectionState{
@@ -64,9 +64,9 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
relayForByIp: map[iputil.VpnIp]*Relay{},
relayForByIdx: map[uint32]*Relay{},
},
})
}, &Interface{})
hm.Add(iputil.Ip2VpnIp(ipNet2.IP), &HostInfo{
hm.unlockedAddHostInfo(&HostInfo{
remote: remote1,
remotes: remotes,
ConnectionState: &ConnectionState{
@@ -80,7 +80,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
relayForByIp: map[iputil.VpnIp]*Relay{},
relayForByIdx: map[uint32]*Relay{},
},
})
}, &Interface{})
c := Control{
f: &Interface{
@@ -96,7 +96,6 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
LocalIndex: 201,
RemoteIndex: 200,
RemoteAddrs: []*udp.Addr{remote2, remote1},
CachedPackets: 0,
Cert: crt.Copy(),
MessageCounter: 0,
CurrentRemote: udp.NewAddr(net.ParseIP("0.0.0.100"), 4444),
@@ -105,7 +104,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
}
// Make sure we don't have any unexpected fields
assertFields(t, []string{"VpnIp", "LocalIndex", "RemoteIndex", "RemoteAddrs", "CachedPackets", "Cert", "MessageCounter", "CurrentRemote", "CurrentRelaysToMe", "CurrentRelaysThroughMe"}, thi)
assertFields(t, []string{"VpnIp", "LocalIndex", "RemoteIndex", "RemoteAddrs", "Cert", "MessageCounter", "CurrentRemote", "CurrentRelaysToMe", "CurrentRelaysThroughMe"}, thi)
test.AssertDeepCopyEqual(t, &expectedInfo, thi)
// Make sure we don't panic if the host info doesn't have a cert yet

View File

@@ -21,7 +21,7 @@ import (
func (c *Control) WaitForType(msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
h := &header.H{}
for {
p := c.f.outside.Get(true)
p := c.f.outside.(*udp.TesterConn).Get(true)
if err := h.Parse(p.Data); err != nil {
panic(err)
}
@@ -37,7 +37,7 @@ func (c *Control) WaitForType(msgType header.MessageType, subType header.Message
func (c *Control) WaitForTypeByIndex(toIndex uint32, msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
h := &header.H{}
for {
p := c.f.outside.Get(true)
p := c.f.outside.(*udp.TesterConn).Get(true)
if err := h.Parse(p.Data); err != nil {
panic(err)
}
@@ -90,11 +90,11 @@ func (c *Control) GetFromTun(block bool) []byte {
// GetFromUDP will pull a udp packet off the udp side of nebula
func (c *Control) GetFromUDP(block bool) *udp.Packet {
return c.f.outside.Get(block)
return c.f.outside.(*udp.TesterConn).Get(block)
}
func (c *Control) GetUDPTxChan() <-chan *udp.Packet {
return c.f.outside.TxPackets
return c.f.outside.(*udp.TesterConn).TxPackets
}
func (c *Control) GetTunTxChan() <-chan []byte {
@@ -103,7 +103,7 @@ func (c *Control) GetTunTxChan() <-chan []byte {
// InjectUDPPacket will inject a packet into the udp side of nebula
func (c *Control) InjectUDPPacket(p *udp.Packet) {
c.f.outside.Send(p)
c.f.outside.(*udp.TesterConn).Send(p)
}
// InjectTunUDPPacket puts a udp packet on the tun interface. Using UDP here because it's a simpler protocol
@@ -143,16 +143,16 @@ func (c *Control) GetVpnIp() iputil.VpnIp {
}
func (c *Control) GetUDPAddr() string {
return c.f.outside.Addr.String()
return c.f.outside.(*udp.TesterConn).Addr.String()
}
func (c *Control) KillPendingTunnel(vpnIp net.IP) bool {
hostinfo, ok := c.f.handshakeManager.pendingHostMap.Hosts[iputil.Ip2VpnIp(vpnIp)]
if !ok {
hostinfo := c.f.handshakeManager.QueryVpnIp(iputil.Ip2VpnIp(vpnIp))
if hostinfo == nil {
return false
}
c.f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo)
c.f.handshakeManager.DeleteHostInfo(hostinfo)
return true
}
@@ -161,19 +161,9 @@ func (c *Control) GetHostmap() *HostMap {
}
func (c *Control) GetCert() *cert.NebulaCertificate {
return c.f.certState.Load().certificate
return c.f.pki.GetCertState().Certificate
}
func (c *Control) ReHandshake(vpnIp iputil.VpnIp) {
hostinfo := c.f.handshakeManager.AddVpnIp(vpnIp, c.f.initHostInfo)
ixHandshakeStage0(c.f, vpnIp, hostinfo)
// If this is a static host, we don't need to wait for the HostQueryReply
// We can trigger the handshake right now
if _, ok := c.f.lightHouse.GetStaticHostList()[hostinfo.vpnIp]; ok {
select {
case c.f.handshakeManager.trigger <- hostinfo.vpnIp:
default:
}
}
c.f.handshakeManager.StartHandshake(vpnIp, nil)
}

View File

@@ -4,6 +4,8 @@ Wants=basic.target network-online.target nss-lookup.target time-sync.target
After=basic.target network.target network-online.target
[Service]
Type=notify
NotifyAccess=main
SyslogIdentifier=nebula
ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml

View File

@@ -5,6 +5,8 @@ After=basic.target network.target network-online.target
Before=sshd.service
[Service]
Type=notify
NotifyAccess=main
SyslogIdentifier=nebula
ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml

View File

@@ -47,8 +47,8 @@ func (d *dnsRecords) QueryCert(data string) string {
return ""
}
iip := iputil.Ip2VpnIp(ip)
hostinfo, err := d.hostMap.QueryVpnIp(iip)
if err != nil {
hostinfo := d.hostMap.QueryVpnIp(iip)
if hostinfo == nil {
return ""
}
q := hostinfo.GetCert()

View File

@@ -20,7 +20,7 @@ import (
)
func BenchmarkHotPath(b *testing.B) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, _, _, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
@@ -44,7 +44,7 @@ func BenchmarkHotPath(b *testing.B) {
}
func TestGoodHandshake(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
@@ -95,7 +95,7 @@ func TestGoodHandshake(t *testing.T) {
}
func TestWrongResponderHandshake(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
// The IPs here are chosen on purpose:
// The current remote handling will sort by preference, public, and then lexically.
@@ -164,7 +164,7 @@ func TestStage1Race(t *testing.T) {
// This tests ensures that two hosts handshaking with each other at the same time will allow traffic to flow
// But will eventually collapse down to a single tunnel
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
@@ -241,7 +241,7 @@ func TestStage1Race(t *testing.T) {
}
func TestUncleanShutdownRaceLoser(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
@@ -290,7 +290,7 @@ func TestUncleanShutdownRaceLoser(t *testing.T) {
}
func TestUncleanShutdownRaceWinner(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
@@ -341,7 +341,7 @@ func TestUncleanShutdownRaceWinner(t *testing.T) {
}
func TestRelays(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
@@ -372,7 +372,7 @@ func TestRelays(t *testing.T) {
func TestStage1RaceRelays(t *testing.T) {
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
@@ -410,6 +410,8 @@ func TestStage1RaceRelays(t *testing.T) {
p := r.RouteForAllUntilTxTun(myControl)
_ = p
r.FlushAll()
myControl.Stop()
theirControl.Stop()
relayControl.Stop()
@@ -419,7 +421,7 @@ func TestStage1RaceRelays(t *testing.T) {
func TestStage1RaceRelays2(t *testing.T) {
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
@@ -506,7 +508,7 @@ func TestStage1RaceRelays2(t *testing.T) {
////TODO: assert hostmaps
}
func TestRehandshakingRelays(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
relayControl, relayVpnIpNet, relayUdpAddr, relayConfig := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
@@ -536,7 +538,111 @@ func TestRehandshakingRelays(t *testing.T) {
// When I update the certificate for the relay, both me and them will have 2 host infos for the relay,
// and the main host infos will not have any relay state to handle the me<->relay<->them tunnel.
r.Log("Renew relay certificate and spin until me and them sees it")
_, _, myNextPrivKey, myNextPEM := newTestCert(ca, caKey, "relay", time.Now(), time.Now().Add(5*time.Minute), relayVpnIpNet, nil, []string{"new group"})
_, _, myNextPrivKey, myNextPEM := NewTestCert(ca, caKey, "relay", time.Now(), time.Now().Add(5*time.Minute), relayVpnIpNet, nil, []string{"new group"})
caB, err := ca.MarshalToPEM()
if err != nil {
panic(err)
}
relayConfig.Settings["pki"] = m{
"ca": string(caB),
"cert": string(myNextPEM),
"key": string(myNextPrivKey),
}
rc, err := yaml.Marshal(relayConfig.Settings)
assert.NoError(t, err)
relayConfig.ReloadConfigString(string(rc))
for {
r.Log("Assert the tunnel works between myVpnIpNet and relayVpnIpNet")
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r)
c := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(relayVpnIpNet.IP), false)
if len(c.Cert.Details.Groups) != 0 {
// We have a new certificate now
r.Log("Certificate between my and relay is updated!")
break
}
time.Sleep(time.Second)
}
for {
r.Log("Assert the tunnel works between theirVpnIpNet and relayVpnIpNet")
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r)
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(relayVpnIpNet.IP), false)
if len(c.Cert.Details.Groups) != 0 {
// We have a new certificate now
r.Log("Certificate between their and relay is updated!")
break
}
time.Sleep(time.Second)
}
r.Log("Assert the relay tunnel still works")
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
// We should have two hostinfos on all sides
for len(myControl.GetHostmap().Indexes) != 2 {
t.Logf("Waiting for myControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(myControl.GetHostmap().Indexes))
r.Log("Assert the relay tunnel still works")
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
r.Log("yupitdoes")
time.Sleep(time.Second)
}
t.Logf("myControl hostinfos got cleaned up!")
for len(theirControl.GetHostmap().Indexes) != 2 {
t.Logf("Waiting for theirControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(theirControl.GetHostmap().Indexes))
r.Log("Assert the relay tunnel still works")
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
r.Log("yupitdoes")
time.Sleep(time.Second)
}
t.Logf("theirControl hostinfos got cleaned up!")
for len(relayControl.GetHostmap().Indexes) != 2 {
t.Logf("Waiting for relayControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(relayControl.GetHostmap().Indexes))
r.Log("Assert the relay tunnel still works")
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
r.Log("yupitdoes")
time.Sleep(time.Second)
}
t.Logf("relayControl hostinfos got cleaned up!")
}
func TestRehandshakingRelaysPrimary(t *testing.T) {
// This test is the same as TestRehandshakingRelays but one of the terminal types is a primary swap winner
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 128}, m{"relay": m{"use_relays": true}})
relayControl, relayVpnIpNet, relayUdpAddr, relayConfig := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 1}, m{"relay": m{"am_relay": true}})
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
// Teach my how to get to the relay and that their can be reached via the relay
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
// Build a router so we don't have to reason who gets which packet
r := router.NewR(t, myControl, relayControl, theirControl)
defer r.RenderFlow()
// Start the servers
myControl.Start()
relayControl.Start()
theirControl.Start()
t.Log("Trigger a handshake from me to them via the relay")
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
p := r.RouteForAllUntilTxTun(theirControl)
r.Log("Assert the tunnel works")
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
// When I update the certificate for the relay, both me and them will have 2 host infos for the relay,
// and the main host infos will not have any relay state to handle the me<->relay<->them tunnel.
r.Log("Renew relay certificate and spin until me and them sees it")
_, _, myNextPrivKey, myNextPEM := NewTestCert(ca, caKey, "relay", time.Now(), time.Now().Add(5*time.Minute), relayVpnIpNet, nil, []string{"new group"})
caB, err := ca.MarshalToPEM()
if err != nil {
@@ -609,7 +715,7 @@ func TestRehandshakingRelays(t *testing.T) {
}
func TestRehandshaking(t *testing.T) {
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 2}, nil)
theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 1}, nil)
@@ -631,7 +737,7 @@ func TestRehandshaking(t *testing.T) {
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
r.Log("Renew my certificate and spin until their sees it")
_, _, myNextPrivKey, myNextPEM := newTestCert(ca, caKey, "me", time.Now(), time.Now().Add(5*time.Minute), myVpnIpNet, nil, []string{"new group"})
_, _, myNextPrivKey, myNextPEM := NewTestCert(ca, caKey, "me", time.Now(), time.Now().Add(5*time.Minute), myVpnIpNet, nil, []string{"new group"})
caB, err := ca.MarshalToPEM()
if err != nil {
@@ -705,7 +811,7 @@ func TestRehandshaking(t *testing.T) {
func TestRehandshakingLoser(t *testing.T) {
// The purpose of this test is that the race loser renews their certificate and rehandshakes. The final tunnel
// Should be the one with the new certificate
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 2}, nil)
theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 1}, nil)
@@ -731,7 +837,7 @@ func TestRehandshakingLoser(t *testing.T) {
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
r.Log("Renew their certificate and spin until mine sees it")
_, _, theirNextPrivKey, theirNextPEM := newTestCert(ca, caKey, "them", time.Now(), time.Now().Add(5*time.Minute), theirVpnIpNet, nil, []string{"their new group"})
_, _, theirNextPrivKey, theirNextPEM := NewTestCert(ca, caKey, "them", time.Now(), time.Now().Add(5*time.Minute), theirVpnIpNet, nil, []string{"their new group"})
caB, err := ca.MarshalToPEM()
if err != nil {
@@ -806,7 +912,7 @@ func TestRaceRegression(t *testing.T) {
// This test forces stage 1, stage 2, stage 1 to be received by me from them
// We had a bug where we were not finding the duplicate handshake and responding to the final stage 1 which
// caused a cross-linked hostinfo
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)

118
e2e/helpers.go Normal file
View File

@@ -0,0 +1,118 @@
package e2e
import (
"crypto/rand"
"io"
"net"
"time"
"github.com/slackhq/nebula/cert"
"golang.org/x/crypto/curve25519"
"golang.org/x/crypto/ed25519"
)
// NewTestCaCert will generate a CA cert
func NewTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
pub, priv, err := ed25519.GenerateKey(rand.Reader)
if before.IsZero() {
before = time.Now().Add(time.Second * -60).Round(time.Second)
}
if after.IsZero() {
after = time.Now().Add(time.Second * 60).Round(time.Second)
}
nc := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "test ca",
NotBefore: time.Unix(before.Unix(), 0),
NotAfter: time.Unix(after.Unix(), 0),
PublicKey: pub,
IsCA: true,
InvertedGroups: make(map[string]struct{}),
},
}
if len(ips) > 0 {
nc.Details.Ips = ips
}
if len(subnets) > 0 {
nc.Details.Subnets = subnets
}
if len(groups) > 0 {
nc.Details.Groups = groups
}
err = nc.Sign(cert.Curve_CURVE25519, priv)
if err != nil {
panic(err)
}
pem, err := nc.MarshalToPEM()
if err != nil {
panic(err)
}
return nc, pub, priv, pem
}
// NewTestCert will generate a signed certificate with the provided details.
// Expiry times are defaulted if you do not pass them in
func NewTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, after time.Time, ip *net.IPNet, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
issuer, err := ca.Sha256Sum()
if err != nil {
panic(err)
}
if before.IsZero() {
before = time.Now().Add(time.Second * -60).Round(time.Second)
}
if after.IsZero() {
after = time.Now().Add(time.Second * 60).Round(time.Second)
}
pub, rawPriv := x25519Keypair()
nc := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: name,
Ips: []*net.IPNet{ip},
Subnets: subnets,
Groups: groups,
NotBefore: time.Unix(before.Unix(), 0),
NotAfter: time.Unix(after.Unix(), 0),
PublicKey: pub,
IsCA: false,
Issuer: issuer,
InvertedGroups: make(map[string]struct{}),
},
}
err = nc.Sign(ca.Details.Curve, key)
if err != nil {
panic(err)
}
pem, err := nc.MarshalToPEM()
if err != nil {
panic(err)
}
return nc, pub, cert.MarshalX25519PrivateKey(rawPriv), pem
}
func x25519Keypair() ([]byte, []byte) {
privkey := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
panic(err)
}
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
if err != nil {
panic(err)
}
return pubkey, privkey
}

View File

@@ -4,7 +4,6 @@
package e2e
import (
"crypto/rand"
"fmt"
"io"
"net"
@@ -12,9 +11,9 @@ import (
"testing"
"time"
"dario.cat/mergo"
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/imdario/mergo"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula"
"github.com/slackhq/nebula/cert"
@@ -22,8 +21,6 @@ import (
"github.com/slackhq/nebula/e2e/router"
"github.com/slackhq/nebula/iputil"
"github.com/stretchr/testify/assert"
"golang.org/x/crypto/curve25519"
"golang.org/x/crypto/ed25519"
"gopkg.in/yaml.v2"
)
@@ -40,7 +37,7 @@ func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, u
IP: udpIp,
Port: 4242,
}
_, _, myPrivKey, myPEM := newTestCert(caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnIpNet, nil, []string{})
_, _, myPrivKey, myPEM := NewTestCert(caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnIpNet, nil, []string{})
caB, err := caCrt.MarshalToPEM()
if err != nil {
@@ -108,112 +105,6 @@ func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, u
return control, vpnIpNet, &udpAddr, c
}
// newTestCaCert will generate a CA cert
func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
pub, priv, err := ed25519.GenerateKey(rand.Reader)
if before.IsZero() {
before = time.Now().Add(time.Second * -60).Round(time.Second)
}
if after.IsZero() {
after = time.Now().Add(time.Second * 60).Round(time.Second)
}
nc := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: "test ca",
NotBefore: time.Unix(before.Unix(), 0),
NotAfter: time.Unix(after.Unix(), 0),
PublicKey: pub,
IsCA: true,
InvertedGroups: make(map[string]struct{}),
},
}
if len(ips) > 0 {
nc.Details.Ips = ips
}
if len(subnets) > 0 {
nc.Details.Subnets = subnets
}
if len(groups) > 0 {
nc.Details.Groups = groups
}
err = nc.Sign(cert.Curve_CURVE25519, priv)
if err != nil {
panic(err)
}
pem, err := nc.MarshalToPEM()
if err != nil {
panic(err)
}
return nc, pub, priv, pem
}
// newTestCert will generate a signed certificate with the provided details.
// Expiry times are defaulted if you do not pass them in
func newTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, after time.Time, ip *net.IPNet, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
issuer, err := ca.Sha256Sum()
if err != nil {
panic(err)
}
if before.IsZero() {
before = time.Now().Add(time.Second * -60).Round(time.Second)
}
if after.IsZero() {
after = time.Now().Add(time.Second * 60).Round(time.Second)
}
pub, rawPriv := x25519Keypair()
nc := &cert.NebulaCertificate{
Details: cert.NebulaCertificateDetails{
Name: name,
Ips: []*net.IPNet{ip},
Subnets: subnets,
Groups: groups,
NotBefore: time.Unix(before.Unix(), 0),
NotAfter: time.Unix(after.Unix(), 0),
PublicKey: pub,
IsCA: false,
Issuer: issuer,
InvertedGroups: make(map[string]struct{}),
},
}
err = nc.Sign(ca.Details.Curve, key)
if err != nil {
panic(err)
}
pem, err := nc.MarshalToPEM()
if err != nil {
panic(err)
}
return nc, pub, cert.MarshalX25519PrivateKey(rawPriv), pem
}
func x25519Keypair() ([]byte, []byte) {
privkey := make([]byte, 32)
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
panic(err)
}
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
if err != nil {
panic(err)
}
return pubkey, privkey
}
type doneCb func()
func deadline(t *testing.T, seconds time.Duration) doneCb {

View File

@@ -11,7 +11,7 @@ pki:
#blocklist:
# - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72
# disconnect_invalid is a toggle to force a client to be disconnected if the certificate is expired or invalid.
#disconnect_invalid: false
#disconnect_invalid: true
# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
@@ -21,6 +21,19 @@ pki:
static_host_map:
"192.168.100.1": ["100.64.22.11:4242"]
# The static_map config stanza can be used to configure how the static_host_map behaves.
#static_map:
# cadence determines how frequently DNS is re-queried for updated IP addresses when a static_host_map entry contains
# a DNS name.
#cadence: 30s
# network determines the type of IP addresses to ask the DNS server for. The default is "ip4" because nodes typically
# do not know their public IPv4 address. Connecting to the Lighthouse via IPv4 allows the Lighthouse to detect the
# public address. Other valid options are "ip6" and "ip" (returns both.)
#network: ip4
# lookup_timeout is the DNS query timeout.
#lookup_timeout: 250ms
lighthouse:
# am_lighthouse is used to enable lighthouse functionality for a node. This should ONLY be true on nodes
@@ -158,7 +171,8 @@ punchy:
# and has been deprecated for "preferred_ranges"
#preferred_ranges: ["172.16.0.0/24"]
# sshd can expose informational and administrative functions via ssh this is a
# sshd can expose informational and administrative functions via ssh. This can expose informational and administrative
# functions, and allows manual tweaking of various network settings when debugging or testing.
#sshd:
# Toggles the feature
#enabled: true
@@ -194,7 +208,7 @@ tun:
disabled: false
# Name of the device. If not set, a default will be chosen by the OS.
# For macOS: if set, must be in the form `utun[0-9]+`.
# For FreeBSD: Required to be set, must be in the form `tun[0-9]+`.
# For NetBSD: Required to be set, must be in the form `tun[0-9]+`
dev: nebula1
# Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert
drop_local_broadcast: false
@@ -275,6 +289,10 @@ logging:
# A 100ms interval with the default 10 retries will give a handshake 5.5 seconds to resolve before timing out
#try_interval: 100ms
#retries: 20
# query_buffer is the size of the buffer channel for querying lighthouses
#query_buffer: 64
# trigger_buffer is the size of the buffer channel for quickly sending handshakes
# after receiving the response for lighthouse queries
#trigger_buffer: 64

100
examples/go_service/main.go Normal file
View File

@@ -0,0 +1,100 @@
package main
import (
"bufio"
"fmt"
"log"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/service"
)
func main() {
if err := run(); err != nil {
log.Fatalf("%+v", err)
}
}
func run() error {
configStr := `
tun:
user: true
static_host_map:
'192.168.100.1': ['localhost:4242']
listen:
host: 0.0.0.0
port: 4241
lighthouse:
am_lighthouse: false
interval: 60
hosts:
- '192.168.100.1'
firewall:
outbound:
# Allow all outbound traffic from this node
- port: any
proto: any
host: any
inbound:
# Allow icmp between any nebula hosts
- port: any
proto: icmp
host: any
- port: any
proto: any
host: any
pki:
ca: /home/rice/Developer/nebula-config/ca.crt
cert: /home/rice/Developer/nebula-config/app.crt
key: /home/rice/Developer/nebula-config/app.key
`
var config config.C
if err := config.LoadString(configStr); err != nil {
return err
}
service, err := service.New(&config)
if err != nil {
return err
}
ln, err := service.Listen("tcp", ":1234")
if err != nil {
return err
}
for {
conn, err := ln.Accept()
if err != nil {
log.Printf("accept error: %s", err)
break
}
defer conn.Close()
log.Printf("got connection")
conn.Write([]byte("hello world\n"))
scanner := bufio.NewScanner(conn)
for scanner.Scan() {
message := scanner.Text()
fmt.Fprintf(conn, "echo: %q\n", message)
log.Printf("got message %q", message)
}
if err := scanner.Err(); err != nil {
log.Printf("scanner error: %s", err)
break
}
}
service.Close()
if err := service.Wait(); err != nil {
return err
}
return nil
}

View File

@@ -5,6 +5,8 @@ After=basic.target network.target network-online.target
Before=sshd.service
[Service]
Type=notify
NotifyAccess=main
SyslogIdentifier=nebula
ExecReload=/bin/kill -HUP $MAINPID
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml

View File

@@ -6,6 +6,7 @@ import (
"encoding/hex"
"errors"
"fmt"
"hash/fnv"
"net"
"reflect"
"strconv"
@@ -57,7 +58,7 @@ type Firewall struct {
DefaultTimeout time.Duration //linux: 600s
// Used to ensure we don't emit local packets for ips we don't own
localIps *cidr.Tree4
localIps *cidr.Tree4[struct{}]
rules string
rulesVersion uint16
@@ -110,8 +111,8 @@ type FirewallRule struct {
Any bool
Hosts map[string]struct{}
Groups [][]string
CIDR *cidr.Tree4
LocalCIDR *cidr.Tree4
CIDR *cidr.Tree4[struct{}]
LocalCIDR *cidr.Tree4[struct{}]
}
// Even though ports are uint16, int32 maps are faster for lookup
@@ -137,7 +138,7 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D
max = defaultTimeout
}
localIps := cidr.NewTree4()
localIps := cidr.NewTree4[struct{}]()
for _, ip := range c.Details.Ips {
localIps.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{})
}
@@ -278,6 +279,18 @@ func (f *Firewall) GetRuleHash() string {
return hex.EncodeToString(sum[:])
}
// GetRuleHashFNV returns a uint32 FNV-1 hash representation the rules, for use as a metric value
func (f *Firewall) GetRuleHashFNV() uint32 {
h := fnv.New32a()
h.Write([]byte(f.rules))
return h.Sum32()
}
// GetRuleHashes returns both the sha256 and FNV-1 hashes, suitable for logging
func (f *Firewall) GetRuleHashes() string {
return "SHA:" + f.GetRuleHash() + ",FNV:" + strconv.FormatUint(uint64(f.GetRuleHashFNV()), 10)
}
func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw FirewallInterface) error {
var table string
if inbound {
@@ -391,7 +404,8 @@ func (f *Firewall) Drop(packet []byte, fp firewall.Packet, incoming bool, h *Hos
// Make sure remote address matches nebula certificate
if remoteCidr := h.remoteCidr; remoteCidr != nil {
if remoteCidr.Contains(fp.RemoteIP) == nil {
ok, _ := remoteCidr.Contains(fp.RemoteIP)
if !ok {
f.metrics(incoming).droppedRemoteIP.Inc(1)
return ErrInvalidRemoteIP
}
@@ -404,7 +418,8 @@ func (f *Firewall) Drop(packet []byte, fp firewall.Packet, incoming bool, h *Hos
}
// Make sure we are supposed to be handling this local ip address
if f.localIps.Contains(fp.LocalIP) == nil {
ok, _ := f.localIps.Contains(fp.LocalIP)
if !ok {
f.metrics(incoming).droppedLocalIP.Inc(1)
return ErrInvalidLocalIP
}
@@ -447,6 +462,7 @@ func (f *Firewall) EmitStats() {
conntrack.Unlock()
metrics.GetOrRegisterGauge("firewall.conntrack.count", nil).Update(int64(conntrackCount))
metrics.GetOrRegisterGauge("firewall.rules.version", nil).Update(int64(f.rulesVersion))
metrics.GetOrRegisterGauge("firewall.rules.hash", nil).Update(int64(f.GetRuleHashFNV()))
}
func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) bool {
@@ -657,8 +673,8 @@ func (fc *FirewallCA) addRule(groups []string, host string, ip, localIp *net.IPN
return &FirewallRule{
Hosts: make(map[string]struct{}),
Groups: make([][]string, 0),
CIDR: cidr.NewTree4(),
LocalCIDR: cidr.NewTree4(),
CIDR: cidr.NewTree4[struct{}](),
LocalCIDR: cidr.NewTree4[struct{}](),
}
}
@@ -726,8 +742,8 @@ func (fr *FirewallRule) addRule(groups []string, host string, ip *net.IPNet, loc
// If it's any we need to wipe out any pre-existing rules to save on memory
fr.Groups = make([][]string, 0)
fr.Hosts = make(map[string]struct{})
fr.CIDR = cidr.NewTree4()
fr.LocalCIDR = cidr.NewTree4()
fr.CIDR = cidr.NewTree4[struct{}]()
fr.LocalCIDR = cidr.NewTree4[struct{}]()
} else {
if len(groups) > 0 {
fr.Groups = append(fr.Groups, groups)
@@ -809,12 +825,18 @@ func (fr *FirewallRule) match(p firewall.Packet, c *cert.NebulaCertificate) bool
}
}
if fr.CIDR != nil && fr.CIDR.Contains(p.RemoteIP) != nil {
return true
if fr.CIDR != nil {
ok, _ := fr.CIDR.Contains(p.RemoteIP)
if ok {
return true
}
}
if fr.LocalCIDR != nil && fr.LocalCIDR.Contains(p.LocalIP) != nil {
return true
if fr.LocalCIDR != nil {
ok, _ := fr.LocalCIDR.Contains(p.LocalIP)
if ok {
return true
}
}
// No host, group, or cidr matched, bye bye

View File

@@ -92,14 +92,16 @@ func TestFirewall_AddRule(t *testing.T) {
assert.False(t, fw.OutRules.AnyProto[1].Any.Any)
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups)
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts)
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP)))
ok, _ := fw.OutRules.AnyProto[1].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP))
assert.True(t, ok)
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", nil, ti, "", ""))
assert.False(t, fw.OutRules.AnyProto[1].Any.Any)
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups)
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts)
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.LocalCIDR.Match(iputil.Ip2VpnIp(ti.IP)))
ok, _ = fw.OutRules.AnyProto[1].Any.LocalCIDR.Match(iputil.Ip2VpnIp(ti.IP))
assert.True(t, ok)
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "ca-name", ""))
@@ -114,8 +116,10 @@ func TestFirewall_AddRule(t *testing.T) {
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"g1", "g2"}, "h1", ti, ti, "", ""))
assert.Equal(t, []string{"g1", "g2"}, fw.OutRules.AnyProto[0].Any.Groups[0])
assert.Contains(t, fw.OutRules.AnyProto[0].Any.Hosts, "h1")
assert.NotNil(t, fw.OutRules.AnyProto[0].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP)))
assert.NotNil(t, fw.OutRules.AnyProto[0].Any.LocalCIDR.Match(iputil.Ip2VpnIp(ti.IP)))
ok, _ = fw.OutRules.AnyProto[0].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP))
assert.True(t, ok)
ok, _ = fw.OutRules.AnyProto[0].Any.LocalCIDR.Match(iputil.Ip2VpnIp(ti.IP))
assert.True(t, ok)
// run twice just to make sure
//TODO: these ANY rules should clear the CA firewall portion

38
go.mod
View File

@@ -3,32 +3,35 @@ module github.com/slackhq/nebula
go 1.20
require (
dario.cat/mergo v1.0.0
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
github.com/armon/go-radix v1.0.0
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
github.com/flynn/noise v1.0.0
github.com/gogo/protobuf v1.3.2
github.com/google/gopacket v1.1.19
github.com/imdario/mergo v0.3.15
github.com/kardianos/service v1.2.2
github.com/miekg/dns v1.1.54
github.com/miekg/dns v1.1.56
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f
github.com/prometheus/client_golang v1.15.1
github.com/prometheus/client_golang v1.17.0
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
github.com/sirupsen/logrus v1.9.0
github.com/sirupsen/logrus v1.9.3
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8
github.com/stretchr/testify v1.8.2
github.com/vishvananda/netlink v1.1.0
golang.org/x/crypto v0.8.0
github.com/stretchr/testify v1.8.4
github.com/vishvananda/netlink v1.1.1-0.20211118161826-650dca95af54
golang.org/x/crypto v0.17.0
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53
golang.org/x/net v0.9.0
golang.org/x/sys v0.8.0
golang.org/x/term v0.8.0
golang.org/x/net v0.19.0
golang.org/x/sync v0.5.0
golang.org/x/sys v0.15.0
golang.org/x/term v0.15.0
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b
golang.zx2c4.com/wireguard/windows v0.5.3
google.golang.org/protobuf v1.30.0
google.golang.org/protobuf v1.31.0
gopkg.in/yaml.v2 v2.4.0
gvisor.dev/gvisor v0.0.0-20230504175454-7b0a1988a28f
)
require (
@@ -36,14 +39,15 @@ require (
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/btree v1.0.1 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 // indirect
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.11.1 // indirect
github.com/vishvananda/netns v0.0.4 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/tools v0.8.0 // indirect
golang.org/x/mod v0.12.0 // indirect
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 // indirect
golang.org/x/tools v0.13.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

85
go.sum
View File

@@ -1,4 +1,6 @@
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
@@ -45,6 +47,8 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4=
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
@@ -54,8 +58,6 @@ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@@ -78,8 +80,8 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI=
github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE=
github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
@@ -97,52 +99,47 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16 h1:v7DLqVdK4VrYkVD5diGdl4sxJurKJEMnODWRJlxV9oM=
github.com/prometheus/client_model v0.4.1-0.20230718164431-9a2bf3000d16/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/procfs v0.11.1 h1:xRC8Iq1yyca5ypa9n1EZnWZkt7dwcoRPQwX/5gwaUuI=
github.com/prometheus/procfs v0.11.1/go.mod h1:eesXgaPo1q7lBpVMoMy0ZOFTth9hBn4W/y0/p/ScXhY=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0=
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8=
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/vishvananda/netlink v1.1.1-0.20211118161826-650dca95af54 h1:8mhqcHPqTMhSPoslhGYihEgSfc77+7La1P6kiB6+9So=
github.com/vishvananda/netlink v1.1.1-0.20211118161826-650dca95af54/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -152,16 +149,16 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k=
golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4=
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 h1:5llv2sWeaMSnA3w2kS57ouQQ4pudlXrR0dCgw51QK9o=
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -172,8 +169,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c=
golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -181,44 +178,50 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE=
golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc=
golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4=
golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b h1:J1CaxgLerRR5lgx3wnr6L04cJFbWoceSK9JWBdglINo=
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b/go.mod h1:tqur9LnfstdR9ep2LaJT4lFUl0EjlHtge+gAjmsHUG4=
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -230,8 +233,8 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -247,3 +250,5 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gvisor.dev/gvisor v0.0.0-20230504175454-7b0a1988a28f h1:8GE2MRjGiFmfpon8dekPI08jEuNMQzSffVHgdupcO4E=
gvisor.dev/gvisor v0.0.0-20230504175454-7b0a1988a28f/go.mod h1:pzr6sy8gDLfVmDAg8OYrlKvGEHw5C3PGTiBXBTCx76Q=

View File

@@ -1,31 +0,0 @@
package nebula
import (
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/udp"
)
func HandleIncomingHandshake(f *Interface, addr *udp.Addr, via *ViaSender, packet []byte, h *header.H, hostinfo *HostInfo) {
// First remote allow list check before we know the vpnIp
if addr != nil {
if !f.lightHouse.GetRemoteAllowList().AllowUnknownVpnIp(addr.IP) {
f.l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
return
}
}
switch h.Subtype {
case header.HandshakeIXPSK0:
switch h.MessageCounter {
case 1:
ixHandshakeStage1(f, addr, via, packet, h)
case 2:
newHostinfo, _ := f.handshakeManager.QueryIndex(h.RemoteIndex)
tearDown := ixHandshakeStage2(f, addr, via, newHostinfo, packet, h)
if tearDown && newHostinfo != nil {
f.handshakeManager.DeleteHostInfo(newHostinfo)
}
}
}
}

View File

@@ -4,6 +4,7 @@ import (
"time"
"github.com/flynn/noise"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/udp"
@@ -13,27 +14,22 @@ import (
// This function constructs a handshake packet, but does not actually send it
// Sending is done by the handshake manager
func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) {
// This queries the lighthouse if we don't know a remote for the host
// We do it here to provoke the lighthouse to preempt our timer wheel and trigger the stage 1 packet to send
// more quickly, effect is a quicker handshake.
if hostinfo.remote == nil {
f.lightHouse.QueryServer(vpnIp, f)
}
err := f.handshakeManager.AddIndexHostInfo(hostinfo)
func ixHandshakeStage0(f *Interface, hh *HandshakeHostInfo) bool {
err := f.handshakeManager.allocateIndex(hh)
if err != nil {
f.l.WithError(err).WithField("vpnIp", vpnIp).
f.l.WithError(err).WithField("vpnIp", hh.hostinfo.vpnIp).
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to generate index")
return
return false
}
ci := hostinfo.ConnectionState
certState := f.pki.GetCertState()
ci := NewConnectionState(f.l, f.cipher, certState, true, noise.HandshakeIX, []byte{}, 0)
hh.hostinfo.ConnectionState = ci
hsProto := &NebulaHandshakeDetails{
InitiatorIndex: hostinfo.localIndexId,
InitiatorIndex: hh.hostinfo.localIndexId,
Time: uint64(time.Now().UnixNano()),
Cert: ci.certState.rawCertificateNoKey,
Cert: certState.RawCertificateNoKey,
}
hsBytes := []byte{}
@@ -44,9 +40,9 @@ func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) {
hsBytes, err = hs.Marshal()
if err != nil {
f.l.WithError(err).WithField("vpnIp", vpnIp).
f.l.WithError(err).WithField("vpnIp", hh.hostinfo.vpnIp).
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
return
return false
}
h := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, 0, 1)
@@ -54,22 +50,23 @@ func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) {
msg, _, _, err := ci.H.WriteMessage(h, hsBytes)
if err != nil {
f.l.WithError(err).WithField("vpnIp", vpnIp).
f.l.WithError(err).WithField("vpnIp", hh.hostinfo.vpnIp).
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage")
return
return false
}
// We are sending handshake packet 1, so we don't expect to receive
// handshake packet 1 from the responder
ci.window.Update(f.l, 1)
hostinfo.HandshakePacket[0] = msg
hostinfo.HandshakeReady = true
hostinfo.handshakeStart = time.Now()
hh.hostinfo.HandshakePacket[0] = msg
hh.ready = true
return true
}
func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []byte, h *header.H) {
ci := f.newConnectionState(f.l, false, noise.HandshakeIX, []byte{}, 0)
certState := f.pki.GetCertState()
ci := NewConnectionState(f.l, f.cipher, certState, false, noise.HandshakeIX, []byte{}, 0)
// Mark packet 1 as seen so it doesn't show up as missed
ci.window.Update(f.l, 1)
@@ -91,7 +88,7 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
return
}
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.caPool)
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.pki.GetCAPool())
if err != nil {
f.l.WithError(err).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).WithField("cert", remoteCert).
@@ -143,9 +140,6 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
},
}
hostinfo.Lock()
defer hostinfo.Unlock()
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
@@ -155,7 +149,7 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
Info("Handshake message received")
hs.Details.ResponderIndex = myIndex
hs.Details.Cert = ci.certState.rawCertificateNoKey
hs.Details.Cert = certState.RawCertificateNoKey
// Update the time in case their clock is way off from ours
hs.Details.Time = uint64(time.Now().UnixNano())
@@ -211,19 +205,12 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
if err != nil {
switch err {
case ErrAlreadySeen:
// Update remote if preferred (Note we have to switch to locking
// the existing hostinfo, and then switch back so the defer Unlock
// higher in this function still works)
hostinfo.Unlock()
existing.Lock()
// Update remote if preferred
if existing.SetRemoteIfPreferred(f.hostMap, addr) {
// Send a test packet to ensure the other side has also switched to
// the preferred remote
f.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
}
existing.Unlock()
hostinfo.Lock()
msg = existing.HandshakePacket[2]
f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1)
@@ -310,7 +297,6 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
WithField("issuer", issuer).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
WithField("sentCachedPackets", len(hostinfo.packetStore)).
Info("Handshake message sent")
}
} else {
@@ -326,25 +312,26 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
WithField("issuer", issuer).
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
WithField("sentCachedPackets", len(hostinfo.packetStore)).
Info("Handshake message sent")
}
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
hostinfo.ConnectionState.messageCounter.Store(2)
hostinfo.remotes.ResetBlockedRemotes()
return
}
func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *HostInfo, packet []byte, h *header.H) bool {
if hostinfo == nil {
func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hh *HandshakeHostInfo, packet []byte, h *header.H) bool {
if hh == nil {
// Nothing here to tear down, got a bogus stage 2 packet
return true
}
hostinfo.Lock()
defer hostinfo.Unlock()
hh.Lock()
defer hh.Unlock()
hostinfo := hh.hostinfo
if addr != nil {
if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, addr.IP) {
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
@@ -353,22 +340,6 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
}
ci := hostinfo.ConnectionState
if ci.ready {
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h).
Info("Handshake is already complete")
// Update remote if preferred
if hostinfo.SetRemoteIfPreferred(f.hostMap, addr) {
// Send a test packet to ensure the other side has also switched to
// the preferred remote
f.SendMessageToVpnIp(header.Test, header.TestRequest, hostinfo.vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
}
// We already have a complete tunnel, there is nothing that can be done by processing further stage 1 packets
return false
}
msg, eKey, dKey, err := ci.H.ReadMessage(nil, packet[header.Len:])
if err != nil {
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
@@ -399,7 +370,7 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
return true
}
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.caPool)
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.pki.GetCAPool())
if err != nil {
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
WithField("cert", remoteCert).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
@@ -422,34 +393,30 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
Info("Incorrect host responded to handshake")
// Release our old handshake from pending, it should not continue
f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo)
f.handshakeManager.DeleteHostInfo(hostinfo)
// Create a new hostinfo/handshake for the intended vpn ip
//TODO: this adds it to the timer wheel in a way that aggressively retries
newHostInfo := f.getOrHandshake(hostinfo.vpnIp)
newHostInfo.Lock()
f.handshakeManager.StartHandshake(hostinfo.vpnIp, func(newHH *HandshakeHostInfo) {
//TODO: this doesnt know if its being added or is being used for caching a packet
// Block the current used address
newHH.hostinfo.remotes = hostinfo.remotes
newHH.hostinfo.remotes.BlockRemote(addr)
// Block the current used address
newHostInfo.remotes = hostinfo.remotes
newHostInfo.remotes.BlockRemote(addr)
// Get the correct remote list for the host we did handshake with
hostinfo.remotes = f.lightHouse.QueryCache(vpnIp)
// Get the correct remote list for the host we did handshake with
hostinfo.remotes = f.lightHouse.QueryCache(vpnIp)
f.l.WithField("blockedUdpAddrs", newHH.hostinfo.remotes.CopyBlockedRemotes()).WithField("vpnIp", vpnIp).
WithField("remotes", newHH.hostinfo.remotes.CopyAddrs(f.hostMap.preferredRanges)).
Info("Blocked addresses for handshakes")
f.l.WithField("blockedUdpAddrs", newHostInfo.remotes.CopyBlockedRemotes()).WithField("vpnIp", vpnIp).
WithField("remotes", newHostInfo.remotes.CopyAddrs(f.hostMap.preferredRanges)).
Info("Blocked addresses for handshakes")
// Swap the packet store to benefit the original intended recipient
newHH.packetStore = hh.packetStore
hh.packetStore = []*cachedPacket{}
// Swap the packet store to benefit the original intended recipient
hostinfo.ConnectionState.queueLock.Lock()
newHostInfo.packetStore = hostinfo.packetStore
hostinfo.packetStore = []*cachedPacket{}
hostinfo.ConnectionState.queueLock.Unlock()
// Finally, put the correct vpn ip in the host info, tell them to close the tunnel, and return true to tear down
hostinfo.vpnIp = vpnIp
f.sendCloseTunnel(hostinfo)
newHostInfo.Unlock()
// Finally, put the correct vpn ip in the host info, tell them to close the tunnel, and return true to tear down
hostinfo.vpnIp = vpnIp
f.sendCloseTunnel(hostinfo)
})
return true
}
@@ -457,7 +424,7 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
// Mark packet 2 as seen so it doesn't show up as missed
ci.window.Update(f.l, 2)
duration := time.Since(hostinfo.handshakeStart).Nanoseconds()
duration := time.Since(hh.startTime).Nanoseconds()
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
WithField("certName", certName).
WithField("fingerprint", fingerprint).
@@ -465,7 +432,7 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
WithField("durationNs", duration).
WithField("sentCachedPackets", len(hostinfo.packetStore)).
WithField("sentCachedPackets", len(hh.packetStore)).
Info("Handshake message received")
hostinfo.remoteIndexId = hs.Details.ResponderIndex
@@ -489,7 +456,23 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
// Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp
f.handshakeManager.Complete(hostinfo, f)
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
hostinfo.ConnectionState.messageCounter.Store(2)
if f.l.Level >= logrus.DebugLevel {
hostinfo.logger(f.l).Debugf("Sending %d stored packets", len(hh.packetStore))
}
if len(hh.packetStore) > 0 {
nb := make([]byte, 12, 12)
out := make([]byte, mtu)
for _, cp := range hh.packetStore {
cp.callback(cp.messageType, cp.messageSubType, hostinfo, cp.packet, nb, out)
}
f.cachedPacketMetrics.sent.Inc(int64(len(hh.packetStore)))
}
hostinfo.remotes.ResetBlockedRemotes()
f.metricHandshakes.Update(duration)
return false

View File

@@ -7,6 +7,7 @@ import (
"encoding/binary"
"errors"
"net"
"sync"
"time"
"github.com/rcrowley/go-metrics"
@@ -42,24 +43,68 @@ type HandshakeConfig struct {
}
type HandshakeManager struct {
pendingHostMap *HostMap
// Mutex for interacting with the vpnIps and indexes maps
sync.RWMutex
vpnIps map[iputil.VpnIp]*HandshakeHostInfo
indexes map[uint32]*HandshakeHostInfo
mainHostMap *HostMap
lightHouse *LightHouse
outside *udp.Conn
outside udp.Conn
config HandshakeConfig
OutboundHandshakeTimer *LockingTimerWheel[iputil.VpnIp]
messageMetrics *MessageMetrics
metricInitiated metrics.Counter
metricTimedOut metrics.Counter
f *Interface
l *logrus.Logger
// can be used to trigger outbound handshake for the given vpnIp
trigger chan iputil.VpnIp
}
func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges []*net.IPNet, mainHostMap *HostMap, lightHouse *LightHouse, outside *udp.Conn, config HandshakeConfig) *HandshakeManager {
type HandshakeHostInfo struct {
sync.Mutex
startTime time.Time // Time that we first started trying with this handshake
ready bool // Is the handshake ready
counter int // How many attempts have we made so far
lastRemotes []*udp.Addr // Remotes that we sent to during the previous attempt
packetStore []*cachedPacket // A set of packets to be transmitted once the handshake completes
hostinfo *HostInfo
}
func (hh *HandshakeHostInfo) cachePacket(l *logrus.Logger, t header.MessageType, st header.MessageSubType, packet []byte, f packetCallback, m *cachedPacketMetrics) {
if len(hh.packetStore) < 100 {
tempPacket := make([]byte, len(packet))
copy(tempPacket, packet)
hh.packetStore = append(hh.packetStore, &cachedPacket{t, st, f, tempPacket})
if l.Level >= logrus.DebugLevel {
hh.hostinfo.logger(l).
WithField("length", len(hh.packetStore)).
WithField("stored", true).
Debugf("Packet store")
}
} else {
m.dropped.Inc(1)
if l.Level >= logrus.DebugLevel {
hh.hostinfo.logger(l).
WithField("length", len(hh.packetStore)).
WithField("stored", false).
Debugf("Packet store")
}
}
}
func NewHandshakeManager(l *logrus.Logger, mainHostMap *HostMap, lightHouse *LightHouse, outside udp.Conn, config HandshakeConfig) *HandshakeManager {
return &HandshakeManager{
pendingHostMap: NewHostMap(l, "pending", tunCidr, preferredRanges),
vpnIps: map[iputil.VpnIp]*HandshakeHostInfo{},
indexes: map[uint32]*HandshakeHostInfo{},
mainHostMap: mainHostMap,
lightHouse: lightHouse,
outside: outside,
@@ -73,7 +118,7 @@ func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges [
}
}
func (c *HandshakeManager) Run(ctx context.Context, f EncWriter) {
func (c *HandshakeManager) Run(ctx context.Context) {
clockSource := time.NewTicker(c.config.tryInterval)
defer clockSource.Stop()
@@ -82,58 +127,80 @@ func (c *HandshakeManager) Run(ctx context.Context, f EncWriter) {
case <-ctx.Done():
return
case vpnIP := <-c.trigger:
c.handleOutbound(vpnIP, f, true)
c.handleOutbound(vpnIP, true)
case now := <-clockSource.C:
c.NextOutboundHandshakeTimerTick(now, f)
c.NextOutboundHandshakeTimerTick(now)
}
}
}
func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time, f EncWriter) {
func (hm *HandshakeManager) HandleIncoming(addr *udp.Addr, via *ViaSender, packet []byte, h *header.H) {
// First remote allow list check before we know the vpnIp
if addr != nil {
if !hm.lightHouse.GetRemoteAllowList().AllowUnknownVpnIp(addr.IP) {
hm.l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
return
}
}
switch h.Subtype {
case header.HandshakeIXPSK0:
switch h.MessageCounter {
case 1:
ixHandshakeStage1(hm.f, addr, via, packet, h)
case 2:
newHostinfo := hm.queryIndex(h.RemoteIndex)
tearDown := ixHandshakeStage2(hm.f, addr, via, newHostinfo, packet, h)
if tearDown && newHostinfo != nil {
hm.DeleteHostInfo(newHostinfo.hostinfo)
}
}
}
}
func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time) {
c.OutboundHandshakeTimer.Advance(now)
for {
vpnIp, has := c.OutboundHandshakeTimer.Purge()
if !has {
break
}
c.handleOutbound(vpnIp, f, false)
c.handleOutbound(vpnIp, false)
}
}
func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, lighthouseTriggered bool) {
hostinfo, err := c.pendingHostMap.QueryVpnIp(vpnIp)
if err != nil {
func (hm *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, lighthouseTriggered bool) {
hh := hm.queryVpnIp(vpnIp)
if hh == nil {
return
}
hostinfo.Lock()
defer hostinfo.Unlock()
hh.Lock()
defer hh.Unlock()
// We may have raced to completion but now that we have a lock we should ensure we have not yet completed.
if hostinfo.HandshakeComplete {
// Ensure we don't exist in the pending hostmap anymore since we have completed
c.pendingHostMap.DeleteHostInfo(hostinfo)
hostinfo := hh.hostinfo
// If we are out of time, clean up
if hh.counter >= hm.config.retries {
hh.hostinfo.logger(hm.l).WithField("udpAddrs", hh.hostinfo.remotes.CopyAddrs(hm.mainHostMap.preferredRanges)).
WithField("initiatorIndex", hh.hostinfo.localIndexId).
WithField("remoteIndex", hh.hostinfo.remoteIndexId).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
WithField("durationNs", time.Since(hh.startTime).Nanoseconds()).
Info("Handshake timed out")
hm.metricTimedOut.Inc(1)
hm.DeleteHostInfo(hostinfo)
return
}
// Increment the counter to increase our delay, linear backoff
hh.counter++
// Check if we have a handshake packet to transmit yet
if !hostinfo.HandshakeReady {
// There is currently a slight race in getOrHandshake due to ConnectionState not being part of the HostInfo directly
// Our hostinfo here was added to the pending map and the wheel may have ticked to us before we created ConnectionState
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter))
return
}
// If we are out of time, clean up
if hostinfo.HandshakeCounter >= c.config.retries {
hostinfo.logger(c.l).WithField("udpAddrs", hostinfo.remotes.CopyAddrs(c.pendingHostMap.preferredRanges)).
WithField("initiatorIndex", hostinfo.localIndexId).
WithField("remoteIndex", hostinfo.remoteIndexId).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
WithField("durationNs", time.Since(hostinfo.handshakeStart).Nanoseconds()).
Info("Handshake timed out")
c.metricTimedOut.Inc(1)
c.pendingHostMap.DeleteHostInfo(hostinfo)
return
if !hh.ready {
if !ixHandshakeStage0(hm.f, hh) {
hm.OutboundHandshakeTimer.Add(vpnIp, hm.config.tryInterval*time.Duration(hh.counter))
return
}
}
// Get a remotes object if we don't already have one.
@@ -141,11 +208,11 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
// NB ^ This comment doesn't jive. It's how the thing gets initialized.
// It's the common path. Should it update every time, in case a future LH query/queries give us more info?
if hostinfo.remotes == nil {
hostinfo.remotes = c.lightHouse.QueryCache(vpnIp)
hostinfo.remotes = hm.lightHouse.QueryCache(vpnIp)
}
remotes := hostinfo.remotes.CopyAddrs(c.pendingHostMap.preferredRanges)
remotesHaveChanged := !udp.AddrSlice(remotes).Equal(hostinfo.HandshakeLastRemotes)
remotes := hostinfo.remotes.CopyAddrs(hm.mainHostMap.preferredRanges)
remotesHaveChanged := !udp.AddrSlice(remotes).Equal(hh.lastRemotes)
// We only care about a lighthouse trigger if we have new remotes to send to.
// This is a very specific optimization for a fast lighthouse reply.
@@ -154,25 +221,25 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
return
}
hostinfo.HandshakeLastRemotes = remotes
hh.lastRemotes = remotes
// TODO: this will generate a load of queries for hosts with only 1 ip
// (such as ones registered to the lighthouse with only a private IP)
// So we only do it one time after attempting 5 handshakes already.
if len(remotes) <= 1 && hostinfo.HandshakeCounter == 5 {
if len(remotes) <= 1 && hh.counter == 5 {
// If we only have 1 remote it is highly likely our query raced with the other host registered within the lighthouse
// Our vpnIp here has a tunnel with a lighthouse but has yet to send a host update packet there so we only know about
// the learned public ip for them. Query again to short circuit the promotion counter
c.lightHouse.QueryServer(vpnIp, f)
hm.lightHouse.QueryServer(vpnIp)
}
// Send the handshake to all known ips, stage 2 takes care of assigning the hostinfo.remote based on the first to reply
var sentTo []*udp.Addr
hostinfo.remotes.ForEach(c.pendingHostMap.preferredRanges, func(addr *udp.Addr, _ bool) {
c.messageMetrics.Tx(header.Handshake, header.MessageSubType(hostinfo.HandshakePacket[0][1]), 1)
err = c.outside.WriteTo(hostinfo.HandshakePacket[0], addr)
hostinfo.remotes.ForEach(hm.mainHostMap.preferredRanges, func(addr *udp.Addr, _ bool) {
hm.messageMetrics.Tx(header.Handshake, header.MessageSubType(hostinfo.HandshakePacket[0][1]), 1)
err := hm.outside.WriteTo(hostinfo.HandshakePacket[0], addr)
if err != nil {
hostinfo.logger(c.l).WithField("udpAddr", addr).
hostinfo.logger(hm.l).WithField("udpAddr", addr).
WithField("initiatorIndex", hostinfo.localIndexId).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
WithError(err).Error("Failed to send handshake message")
@@ -185,63 +252,63 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
// Don't be too noisy or confusing if we fail to send a handshake - if we don't get through we'll eventually log a timeout,
// so only log when the list of remotes has changed
if remotesHaveChanged {
hostinfo.logger(c.l).WithField("udpAddrs", sentTo).
hostinfo.logger(hm.l).WithField("udpAddrs", sentTo).
WithField("initiatorIndex", hostinfo.localIndexId).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
Info("Handshake message sent")
} else if c.l.IsLevelEnabled(logrus.DebugLevel) {
hostinfo.logger(c.l).WithField("udpAddrs", sentTo).
} else if hm.l.IsLevelEnabled(logrus.DebugLevel) {
hostinfo.logger(hm.l).WithField("udpAddrs", sentTo).
WithField("initiatorIndex", hostinfo.localIndexId).
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
Debug("Handshake message sent")
}
if c.config.useRelays && len(hostinfo.remotes.relays) > 0 {
hostinfo.logger(c.l).WithField("relays", hostinfo.remotes.relays).Info("Attempt to relay through hosts")
if hm.config.useRelays && len(hostinfo.remotes.relays) > 0 {
hostinfo.logger(hm.l).WithField("relays", hostinfo.remotes.relays).Info("Attempt to relay through hosts")
// Send a RelayRequest to all known Relay IP's
for _, relay := range hostinfo.remotes.relays {
// Don't relay to myself, and don't relay through the host I'm trying to connect to
if *relay == vpnIp || *relay == c.lightHouse.myVpnIp {
if *relay == vpnIp || *relay == hm.lightHouse.myVpnIp {
continue
}
relayHostInfo, err := c.mainHostMap.QueryVpnIp(*relay)
if err != nil || relayHostInfo.remote == nil {
hostinfo.logger(c.l).WithError(err).WithField("relay", relay.String()).Info("Establish tunnel to relay target")
f.Handshake(*relay)
relayHostInfo := hm.mainHostMap.QueryVpnIp(*relay)
if relayHostInfo == nil || relayHostInfo.remote == nil {
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Establish tunnel to relay target")
hm.f.Handshake(*relay)
continue
}
// Check the relay HostInfo to see if we already established a relay through it
if existingRelay, ok := relayHostInfo.relayState.QueryRelayForByIp(vpnIp); ok {
switch existingRelay.State {
case Established:
hostinfo.logger(c.l).WithField("relay", relay.String()).Info("Send handshake via relay")
f.SendVia(relayHostInfo, existingRelay, hostinfo.HandshakePacket[0], make([]byte, 12), make([]byte, mtu), false)
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Send handshake via relay")
hm.f.SendVia(relayHostInfo, existingRelay, hostinfo.HandshakePacket[0], make([]byte, 12), make([]byte, mtu), false)
case Requested:
hostinfo.logger(c.l).WithField("relay", relay.String()).Info("Re-send CreateRelay request")
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Re-send CreateRelay request")
// Re-send the CreateRelay request, in case the previous one was lost.
m := NebulaControl{
Type: NebulaControl_CreateRelayRequest,
InitiatorRelayIndex: existingRelay.LocalIndex,
RelayFromIp: uint32(c.lightHouse.myVpnIp),
RelayFromIp: uint32(hm.lightHouse.myVpnIp),
RelayToIp: uint32(vpnIp),
}
msg, err := m.Marshal()
if err != nil {
hostinfo.logger(c.l).
hostinfo.logger(hm.l).
WithError(err).
Error("Failed to marshal Control message to create relay")
} else {
// This must send over the hostinfo, not over hm.Hosts[ip]
f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
c.l.WithFields(logrus.Fields{
"relayFrom": c.lightHouse.myVpnIp,
hm.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
hm.l.WithFields(logrus.Fields{
"relayFrom": hm.lightHouse.myVpnIp,
"relayTo": vpnIp,
"initiatorRelayIndex": existingRelay.LocalIndex,
"relay": *relay}).
Info("send CreateRelayRequest")
}
default:
hostinfo.logger(c.l).
hostinfo.logger(hm.l).
WithField("vpnIp", vpnIp).
WithField("state", existingRelay.State).
WithField("relay", relayHostInfo.vpnIp).
@@ -250,26 +317,26 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
} else {
// No relays exist or requested yet.
if relayHostInfo.remote != nil {
idx, err := AddRelay(c.l, relayHostInfo, c.mainHostMap, vpnIp, nil, TerminalType, Requested)
idx, err := AddRelay(hm.l, relayHostInfo, hm.mainHostMap, vpnIp, nil, TerminalType, Requested)
if err != nil {
hostinfo.logger(c.l).WithField("relay", relay.String()).WithError(err).Info("Failed to add relay to hostmap")
hostinfo.logger(hm.l).WithField("relay", relay.String()).WithError(err).Info("Failed to add relay to hostmap")
}
m := NebulaControl{
Type: NebulaControl_CreateRelayRequest,
InitiatorRelayIndex: idx,
RelayFromIp: uint32(c.lightHouse.myVpnIp),
RelayFromIp: uint32(hm.lightHouse.myVpnIp),
RelayToIp: uint32(vpnIp),
}
msg, err := m.Marshal()
if err != nil {
hostinfo.logger(c.l).
hostinfo.logger(hm.l).
WithError(err).
Error("Failed to marshal Control message to create relay")
} else {
f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
c.l.WithFields(logrus.Fields{
"relayFrom": c.lightHouse.myVpnIp,
hm.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
hm.l.WithFields(logrus.Fields{
"relayFrom": hm.lightHouse.myVpnIp,
"relayTo": vpnIp,
"initiatorRelayIndex": idx,
"relay": *relay}).
@@ -280,23 +347,81 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
}
}
// Increment the counter to increase our delay, linear backoff
hostinfo.HandshakeCounter++
// If a lighthouse triggered this attempt then we are still in the timer wheel and do not need to re-add
if !lighthouseTriggered {
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter))
hm.OutboundHandshakeTimer.Add(vpnIp, hm.config.tryInterval*time.Duration(hh.counter))
}
}
func (c *HandshakeManager) AddVpnIp(vpnIp iputil.VpnIp, init func(*HostInfo)) *HostInfo {
hostinfo, created := c.pendingHostMap.AddVpnIp(vpnIp, init)
if created {
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval)
c.metricInitiated.Inc(1)
// GetOrHandshake will try to find a hostinfo with a fully formed tunnel or start a new handshake if one is not present
// The 2nd argument will be true if the hostinfo is ready to transmit traffic
func (hm *HandshakeManager) GetOrHandshake(vpnIp iputil.VpnIp, cacheCb func(*HandshakeHostInfo)) (*HostInfo, bool) {
// Check the main hostmap and maintain a read lock if our host is not there
hm.mainHostMap.RLock()
if h, ok := hm.mainHostMap.Hosts[vpnIp]; ok {
hm.mainHostMap.RUnlock()
// Do not attempt promotion if you are a lighthouse
if !hm.lightHouse.amLighthouse {
h.TryPromoteBest(hm.mainHostMap.preferredRanges, hm.f)
}
return h, true
}
defer hm.mainHostMap.RUnlock()
return hm.StartHandshake(vpnIp, cacheCb), false
}
// StartHandshake will ensure a handshake is currently being attempted for the provided vpn ip
func (hm *HandshakeManager) StartHandshake(vpnIp iputil.VpnIp, cacheCb func(*HandshakeHostInfo)) *HostInfo {
hm.Lock()
defer hm.Unlock()
if hh, ok := hm.vpnIps[vpnIp]; ok {
// We are already trying to handshake with this vpn ip
if cacheCb != nil {
cacheCb(hh)
}
return hh.hostinfo
}
hostinfo := &HostInfo{
vpnIp: vpnIp,
HandshakePacket: make(map[uint8][]byte, 0),
relayState: RelayState{
relays: map[iputil.VpnIp]struct{}{},
relayForByIp: map[iputil.VpnIp]*Relay{},
relayForByIdx: map[uint32]*Relay{},
},
}
hh := &HandshakeHostInfo{
hostinfo: hostinfo,
startTime: time.Now(),
}
hm.vpnIps[vpnIp] = hh
hm.metricInitiated.Inc(1)
hm.OutboundHandshakeTimer.Add(vpnIp, hm.config.tryInterval)
if cacheCb != nil {
cacheCb(hh)
}
// If this is a static host, we don't need to wait for the HostQueryReply
// We can trigger the handshake right now
_, doTrigger := hm.lightHouse.GetStaticHostList()[vpnIp]
if !doTrigger {
// Add any calculated remotes, and trigger early handshake if one found
doTrigger = hm.lightHouse.addCalculatedRemotes(vpnIp)
}
if doTrigger {
select {
case hm.trigger <- vpnIp:
default:
}
}
hm.lightHouse.QueryServer(vpnIp)
return hostinfo
}
@@ -318,10 +443,10 @@ var (
// ErrLocalIndexCollision if we already have an entry in the main or pending
// hostmap for the hostinfo.localIndexId.
func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket uint8, f *Interface) (*HostInfo, error) {
c.pendingHostMap.Lock()
defer c.pendingHostMap.Unlock()
c.mainHostMap.Lock()
defer c.mainHostMap.Unlock()
c.Lock()
defer c.Unlock()
// Check if we already have a tunnel with this vpn ip
existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp]
@@ -350,8 +475,8 @@ func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket
return existingIndex, ErrLocalIndexCollision
}
existingIndex, found = c.pendingHostMap.Indexes[hostinfo.localIndexId]
if found && existingIndex != hostinfo {
existingPendingIndex, found := c.indexes[hostinfo.localIndexId]
if found && existingPendingIndex.hostinfo != hostinfo {
// We have a collision, but for a different hostinfo
return existingIndex, ErrLocalIndexCollision
}
@@ -372,47 +497,47 @@ func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket
// Complete is a simpler version of CheckAndComplete when we already know we
// won't have a localIndexId collision because we already have an entry in the
// pendingHostMap. An existing hostinfo is returned if there was one.
func (c *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) {
c.pendingHostMap.Lock()
defer c.pendingHostMap.Unlock()
c.mainHostMap.Lock()
defer c.mainHostMap.Unlock()
func (hm *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) {
hm.mainHostMap.Lock()
defer hm.mainHostMap.Unlock()
hm.Lock()
defer hm.Unlock()
existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
existingRemoteIndex, found := hm.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
if found && existingRemoteIndex != nil {
// We have a collision, but this can happen since we can't control
// the remote ID. Just log about the situation as a note.
hostinfo.logger(c.l).
hostinfo.logger(hm.l).
WithField("remoteIndex", hostinfo.remoteIndexId).WithField("collision", existingRemoteIndex.vpnIp).
Info("New host shadows existing host remoteIndex")
}
// We need to remove from the pending hostmap first to avoid undoing work when after to the main hostmap.
c.pendingHostMap.unlockedDeleteHostInfo(hostinfo)
c.mainHostMap.unlockedAddHostInfo(hostinfo, f)
hm.unlockedDeleteHostInfo(hostinfo)
hm.mainHostMap.unlockedAddHostInfo(hostinfo, f)
}
// AddIndexHostInfo generates a unique localIndexId for this HostInfo
// allocateIndex generates a unique localIndexId for this HostInfo
// and adds it to the pendingHostMap. Will error if we are unable to generate
// a unique localIndexId
func (c *HandshakeManager) AddIndexHostInfo(h *HostInfo) error {
c.pendingHostMap.Lock()
defer c.pendingHostMap.Unlock()
c.mainHostMap.RLock()
defer c.mainHostMap.RUnlock()
func (hm *HandshakeManager) allocateIndex(hh *HandshakeHostInfo) error {
hm.mainHostMap.RLock()
defer hm.mainHostMap.RUnlock()
hm.Lock()
defer hm.Unlock()
for i := 0; i < 32; i++ {
index, err := generateIndex(c.l)
index, err := generateIndex(hm.l)
if err != nil {
return err
}
_, inPending := c.pendingHostMap.Indexes[index]
_, inMain := c.mainHostMap.Indexes[index]
_, inPending := hm.indexes[index]
_, inMain := hm.mainHostMap.Indexes[index]
if !inMain && !inPending {
h.localIndexId = index
c.pendingHostMap.Indexes[index] = h
hh.hostinfo.localIndexId = index
hm.indexes[index] = hh
return nil
}
}
@@ -420,22 +545,90 @@ func (c *HandshakeManager) AddIndexHostInfo(h *HostInfo) error {
return errors.New("failed to generate unique localIndexId")
}
func (c *HandshakeManager) addRemoteIndexHostInfo(index uint32, h *HostInfo) {
c.pendingHostMap.addRemoteIndexHostInfo(index, h)
}
func (c *HandshakeManager) DeleteHostInfo(hostinfo *HostInfo) {
//l.Debugln("Deleting pending hostinfo :", hostinfo)
c.pendingHostMap.DeleteHostInfo(hostinfo)
c.Lock()
defer c.Unlock()
c.unlockedDeleteHostInfo(hostinfo)
}
func (c *HandshakeManager) QueryIndex(index uint32) (*HostInfo, error) {
return c.pendingHostMap.QueryIndex(index)
func (c *HandshakeManager) unlockedDeleteHostInfo(hostinfo *HostInfo) {
delete(c.vpnIps, hostinfo.vpnIp)
if len(c.vpnIps) == 0 {
c.vpnIps = map[iputil.VpnIp]*HandshakeHostInfo{}
}
delete(c.indexes, hostinfo.localIndexId)
if len(c.vpnIps) == 0 {
c.indexes = map[uint32]*HandshakeHostInfo{}
}
if c.l.Level >= logrus.DebugLevel {
c.l.WithField("hostMap", m{"mapTotalSize": len(c.vpnIps),
"vpnIp": hostinfo.vpnIp, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}).
Debug("Pending hostmap hostInfo deleted")
}
}
func (hm *HandshakeManager) QueryVpnIp(vpnIp iputil.VpnIp) *HostInfo {
hh := hm.queryVpnIp(vpnIp)
if hh != nil {
return hh.hostinfo
}
return nil
}
func (hm *HandshakeManager) queryVpnIp(vpnIp iputil.VpnIp) *HandshakeHostInfo {
hm.RLock()
defer hm.RUnlock()
return hm.vpnIps[vpnIp]
}
func (hm *HandshakeManager) QueryIndex(index uint32) *HostInfo {
hh := hm.queryIndex(index)
if hh != nil {
return hh.hostinfo
}
return nil
}
func (hm *HandshakeManager) queryIndex(index uint32) *HandshakeHostInfo {
hm.RLock()
defer hm.RUnlock()
return hm.indexes[index]
}
func (c *HandshakeManager) GetPreferredRanges() []*net.IPNet {
return c.mainHostMap.preferredRanges
}
func (c *HandshakeManager) ForEachVpnIp(f controlEach) {
c.RLock()
defer c.RUnlock()
for _, v := range c.vpnIps {
f(v.hostinfo)
}
}
func (c *HandshakeManager) ForEachIndex(f controlEach) {
c.RLock()
defer c.RUnlock()
for _, v := range c.indexes {
f(v.hostinfo)
}
}
func (c *HandshakeManager) EmitStats() {
c.pendingHostMap.EmitStats("pending")
c.mainHostMap.EmitStats("main")
c.RLock()
hostLen := len(c.vpnIps)
indexLen := len(c.indexes)
c.RUnlock()
metrics.GetOrRegisterGauge("hostmap.pending.hosts", nil).Update(int64(hostLen))
metrics.GetOrRegisterGauge("hostmap.pending.indexes", nil).Update(int64(indexLen))
c.mainHostMap.EmitStats()
}
// Utility functions below

View File

@@ -5,6 +5,7 @@ import (
"testing"
"time"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/header"
"github.com/slackhq/nebula/iputil"
"github.com/slackhq/nebula/test"
@@ -14,56 +15,53 @@ import (
func Test_NewHandshakeManagerVpnIp(t *testing.T) {
l := test.NewLogger()
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
ip := iputil.Ip2VpnIp(net.ParseIP("172.1.1.2"))
preferredRanges := []*net.IPNet{localrange}
mw := &mockEncWriter{}
mainHM := NewHostMap(l, "test", vpncidr, preferredRanges)
mainHM := NewHostMap(l, vpncidr, preferredRanges)
lh := newTestLighthouse()
blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig)
now := time.Now()
blah.NextOutboundHandshakeTimerTick(now, mw)
var initCalled bool
initFunc := func(*HostInfo) {
initCalled = true
cs := &CertState{
RawCertificate: []byte{},
PrivateKey: []byte{},
Certificate: &cert.NebulaCertificate{},
RawCertificateNoKey: []byte{},
}
i := blah.AddVpnIp(ip, initFunc)
assert.True(t, initCalled)
blah := NewHandshakeManager(l, mainHM, lh, &udp.NoopConn{}, defaultHandshakeConfig)
blah.f = &Interface{handshakeManager: blah, pki: &PKI{}, l: l}
blah.f.pki.cs.Store(cs)
initCalled = false
i2 := blah.AddVpnIp(ip, initFunc)
assert.False(t, initCalled)
now := time.Now()
blah.NextOutboundHandshakeTimerTick(now)
i := blah.StartHandshake(ip, nil)
i2 := blah.StartHandshake(ip, nil)
assert.Same(t, i, i2)
i.remotes = NewRemoteList(nil)
i.HandshakeReady = true
// Adding something to pending should not affect the main hostmap
assert.Len(t, mainHM.Hosts, 0)
// Confirm they are in the pending index list
assert.Contains(t, blah.pendingHostMap.Hosts, ip)
assert.Contains(t, blah.vpnIps, ip)
// Jump ahead `HandshakeRetries` ticks, offset by one to get the sleep logic right
for i := 1; i <= DefaultHandshakeRetries+1; i++ {
now = now.Add(time.Duration(i) * DefaultHandshakeTryInterval)
blah.NextOutboundHandshakeTimerTick(now, mw)
blah.NextOutboundHandshakeTimerTick(now)
}
// Confirm they are still in the pending index list
assert.Contains(t, blah.pendingHostMap.Hosts, ip)
assert.Contains(t, blah.vpnIps, ip)
// Tick 1 more time, a minute will certainly flush it out
blah.NextOutboundHandshakeTimerTick(now.Add(time.Minute), mw)
blah.NextOutboundHandshakeTimerTick(now.Add(time.Minute))
// Confirm they have been removed
assert.NotContains(t, blah.pendingHostMap.Hosts, ip)
assert.NotContains(t, blah.vpnIps, ip)
}
func testCountTimerWheelEntries(tw *LockingTimerWheel[iputil.VpnIp]) (c int) {

View File

@@ -2,7 +2,6 @@ package nebula
import (
"errors"
"fmt"
"net"
"sync"
"sync/atomic"
@@ -18,9 +17,11 @@ import (
)
// const ProbeLen = 100
const PromoteEvery = 1000
const ReQueryEvery = 5000
const defaultPromoteEvery = 1000 // Count of packets sent before we try moving a tunnel to a preferred underlay ip address
const defaultReQueryEvery = 5000 // Count of packets sent before re-querying a hostinfo to the lighthouse
const defaultReQueryWait = time.Minute // Minimum amount of seconds to wait before re-querying a hostinfo the lighthouse. Evaluated every ReQueryEvery
const MaxRemotes = 10
const maxRecvError = 4
// MaxHostInfosPerVpnIp is the max number of hostinfos we will track for a given vpn ip
// 5 allows for an initial handshake and each host pair re-handshaking twice
@@ -52,7 +53,6 @@ type Relay struct {
type HostMap struct {
sync.RWMutex //Because we concurrently read and write to our maps
name string
Indexes map[uint32]*HostInfo
Relays map[uint32]*HostInfo // Maps a Relay IDX to a Relay HostInfo object
RemoteIndexes map[uint32]*HostInfo
@@ -197,25 +197,24 @@ func (rs *RelayState) InsertRelay(ip iputil.VpnIp, idx uint32, r *Relay) {
}
type HostInfo struct {
sync.RWMutex
remote *udp.Addr
remotes *RemoteList
promoteCounter atomic.Uint32
ConnectionState *ConnectionState
remoteIndexId uint32
localIndexId uint32
vpnIp iputil.VpnIp
recvError atomic.Uint32
remoteCidr *cidr.Tree4[struct{}]
relayState RelayState
remote *udp.Addr
remotes *RemoteList
promoteCounter atomic.Uint32
ConnectionState *ConnectionState
handshakeStart time.Time //todo: this an entry in the handshake manager
HandshakeReady bool //todo: being in the manager means you are ready
HandshakeCounter int //todo: another handshake manager entry
HandshakeLastRemotes []*udp.Addr //todo: another handshake manager entry, which remotes we sent to last time
HandshakeComplete bool //todo: this should go away in favor of ConnectionState.ready
HandshakePacket map[uint8][]byte //todo: this is other handshake manager entry
packetStore []*cachedPacket //todo: this is other handshake manager entry
remoteIndexId uint32
localIndexId uint32
vpnIp iputil.VpnIp
recvError int
remoteCidr *cidr.Tree4
relayState RelayState
// HandshakePacket records the packets used to create this hostinfo
// We need these to avoid replayed handshake packets creating new hostinfos which causes churn
HandshakePacket map[uint8][]byte
// nextLHQuery is the earliest we can ask the lighthouse for new information.
// This is used to limit lighthouse re-queries in chatty clients
nextLHQuery atomic.Int64
// lastRebindCount is the other side of Interface.rebindCount, if these values don't match then we need to ask LH
// for a punch from the remote end of this tunnel. The goal being to prime their conntrack for our traffic just like
@@ -255,13 +254,12 @@ type cachedPacketMetrics struct {
dropped metrics.Counter
}
func NewHostMap(l *logrus.Logger, name string, vpnCIDR *net.IPNet, preferredRanges []*net.IPNet) *HostMap {
func NewHostMap(l *logrus.Logger, vpnCIDR *net.IPNet, preferredRanges []*net.IPNet) *HostMap {
h := map[iputil.VpnIp]*HostInfo{}
i := map[uint32]*HostInfo{}
r := map[uint32]*HostInfo{}
relays := map[uint32]*HostInfo{}
m := HostMap{
name: name,
Indexes: i,
Relays: relays,
RemoteIndexes: r,
@@ -273,8 +271,8 @@ func NewHostMap(l *logrus.Logger, name string, vpnCIDR *net.IPNet, preferredRang
return &m
}
// UpdateStats takes a name and reports host and index counts to the stats collection system
func (hm *HostMap) EmitStats(name string) {
// EmitStats reports host, index, and relay counts to the stats collection system
func (hm *HostMap) EmitStats() {
hm.RLock()
hostLen := len(hm.Hosts)
indexLen := len(hm.Indexes)
@@ -282,10 +280,10 @@ func (hm *HostMap) EmitStats(name string) {
relaysLen := len(hm.Relays)
hm.RUnlock()
metrics.GetOrRegisterGauge("hostmap."+name+".hosts", nil).Update(int64(hostLen))
metrics.GetOrRegisterGauge("hostmap."+name+".indexes", nil).Update(int64(indexLen))
metrics.GetOrRegisterGauge("hostmap."+name+".remoteIndexes", nil).Update(int64(remoteIndexLen))
metrics.GetOrRegisterGauge("hostmap."+name+".relayIndexes", nil).Update(int64(relaysLen))
metrics.GetOrRegisterGauge("hostmap.main.hosts", nil).Update(int64(hostLen))
metrics.GetOrRegisterGauge("hostmap.main.indexes", nil).Update(int64(indexLen))
metrics.GetOrRegisterGauge("hostmap.main.remoteIndexes", nil).Update(int64(remoteIndexLen))
metrics.GetOrRegisterGauge("hostmap.main.relayIndexes", nil).Update(int64(relaysLen))
}
func (hm *HostMap) RemoveRelay(localIdx uint32) {
@@ -299,88 +297,6 @@ func (hm *HostMap) RemoveRelay(localIdx uint32) {
hm.Unlock()
}
func (hm *HostMap) GetIndexByVpnIp(vpnIp iputil.VpnIp) (uint32, error) {
hm.RLock()
if i, ok := hm.Hosts[vpnIp]; ok {
index := i.localIndexId
hm.RUnlock()
return index, nil
}
hm.RUnlock()
return 0, errors.New("vpn IP not found")
}
func (hm *HostMap) Add(ip iputil.VpnIp, hostinfo *HostInfo) {
hm.Lock()
hm.Hosts[ip] = hostinfo
hm.Unlock()
}
func (hm *HostMap) AddVpnIp(vpnIp iputil.VpnIp, init func(hostinfo *HostInfo)) (hostinfo *HostInfo, created bool) {
hm.RLock()
if h, ok := hm.Hosts[vpnIp]; !ok {
hm.RUnlock()
h = &HostInfo{
vpnIp: vpnIp,
HandshakePacket: make(map[uint8][]byte, 0),
relayState: RelayState{
relays: map[iputil.VpnIp]struct{}{},
relayForByIp: map[iputil.VpnIp]*Relay{},
relayForByIdx: map[uint32]*Relay{},
},
}
if init != nil {
init(h)
}
hm.Lock()
hm.Hosts[vpnIp] = h
hm.Unlock()
return h, true
} else {
hm.RUnlock()
return h, false
}
}
// Only used by pendingHostMap when the remote index is not initially known
func (hm *HostMap) addRemoteIndexHostInfo(index uint32, h *HostInfo) {
hm.Lock()
h.remoteIndexId = index
hm.RemoteIndexes[index] = h
hm.Unlock()
if hm.l.Level > logrus.DebugLevel {
hm.l.WithField("hostMap", m{"mapName": hm.name, "indexNumber": index, "mapTotalSize": len(hm.Indexes),
"hostinfo": m{"existing": true, "localIndexId": h.localIndexId, "hostId": h.vpnIp}}).
Debug("Hostmap remoteIndex added")
}
}
// DeleteReverseIndex is used to clean up on recv_error
// This function should only ever be called on the pending hostmap
func (hm *HostMap) DeleteReverseIndex(index uint32) {
hm.Lock()
hostinfo, ok := hm.RemoteIndexes[index]
if ok {
delete(hm.Indexes, hostinfo.localIndexId)
delete(hm.RemoteIndexes, index)
// Check if we have an entry under hostId that matches the same hostinfo
// instance. Clean it up as well if we do (they might not match in pendingHostmap)
var hostinfo2 *HostInfo
hostinfo2, ok = hm.Hosts[hostinfo.vpnIp]
if ok && hostinfo2 == hostinfo {
delete(hm.Hosts, hostinfo.vpnIp)
}
}
hm.Unlock()
if hm.l.Level >= logrus.DebugLevel {
hm.l.WithField("hostMap", m{"mapName": hm.name, "indexNumber": index, "mapTotalSize": len(hm.Indexes)}).
Debug("Hostmap remote index deleted")
}
}
// DeleteHostInfo will fully unlink the hostinfo and return true if it was the final hostinfo for this vpn ip
func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) bool {
// Delete the host itself, ensuring it's not modified anymore
@@ -393,12 +309,6 @@ func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) bool {
return final
}
func (hm *HostMap) DeleteRelayIdx(localIdx uint32) {
hm.Lock()
defer hm.Unlock()
delete(hm.RemoteIndexes, localIdx)
}
func (hm *HostMap) MakePrimary(hostinfo *HostInfo) {
hm.Lock()
defer hm.Unlock()
@@ -476,7 +386,7 @@ func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
}
if hm.l.Level >= logrus.DebugLevel {
hm.l.WithField("hostMap", m{"mapName": hm.name, "mapTotalSize": len(hm.Hosts),
hm.l.WithField("hostMap", m{"mapTotalSize": len(hm.Hosts),
"vpnIp": hostinfo.vpnIp, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}).
Debug("Hostmap hostInfo deleted")
}
@@ -486,55 +396,40 @@ func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
}
}
func (hm *HostMap) QueryIndex(index uint32) (*HostInfo, error) {
//TODO: we probably just want to return bool instead of error, or at least a static error
func (hm *HostMap) QueryIndex(index uint32) *HostInfo {
hm.RLock()
if h, ok := hm.Indexes[index]; ok {
hm.RUnlock()
return h, nil
return h
} else {
hm.RUnlock()
return nil, errors.New("unable to find index")
return nil
}
}
// Retrieves a HostInfo by Index. Returns whether the HostInfo is primary at time of query.
// This helper exists so that the hostinfo.prev pointer can be read while the hostmap lock is held.
func (hm *HostMap) QueryIndexIsPrimary(index uint32) (*HostInfo, bool, error) {
//TODO: we probably just want to return bool instead of error, or at least a static error
hm.RLock()
if h, ok := hm.Indexes[index]; ok {
hm.RUnlock()
return h, h.prev == nil, nil
} else {
hm.RUnlock()
return nil, false, errors.New("unable to find index")
}
}
func (hm *HostMap) QueryRelayIndex(index uint32) (*HostInfo, error) {
//TODO: we probably just want to return bool instead of error, or at least a static error
func (hm *HostMap) QueryRelayIndex(index uint32) *HostInfo {
hm.RLock()
if h, ok := hm.Relays[index]; ok {
hm.RUnlock()
return h, nil
return h
} else {
hm.RUnlock()
return nil, errors.New("unable to find index")
return nil
}
}
func (hm *HostMap) QueryReverseIndex(index uint32) (*HostInfo, error) {
func (hm *HostMap) QueryReverseIndex(index uint32) *HostInfo {
hm.RLock()
if h, ok := hm.RemoteIndexes[index]; ok {
hm.RUnlock()
return h, nil
return h
} else {
hm.RUnlock()
return nil, fmt.Errorf("unable to find reverse index or connectionstate nil in %s hostmap", hm.name)
return nil
}
}
func (hm *HostMap) QueryVpnIp(vpnIp iputil.VpnIp) (*HostInfo, error) {
func (hm *HostMap) QueryVpnIp(vpnIp iputil.VpnIp) *HostInfo {
return hm.queryVpnIp(vpnIp, nil)
}
@@ -556,13 +451,7 @@ func (hm *HostMap) QueryVpnIpRelayFor(targetIp, relayHostIp iputil.VpnIp) (*Host
return nil, nil, errors.New("unable to find host with relay")
}
// PromoteBestQueryVpnIp will attempt to lazily switch to the best remote every
// `PromoteEvery` calls to this function for a given host.
func (hm *HostMap) PromoteBestQueryVpnIp(vpnIp iputil.VpnIp, ifce *Interface) (*HostInfo, error) {
return hm.queryVpnIp(vpnIp, ifce)
}
func (hm *HostMap) queryVpnIp(vpnIp iputil.VpnIp, promoteIfce *Interface) (*HostInfo, error) {
func (hm *HostMap) queryVpnIp(vpnIp iputil.VpnIp, promoteIfce *Interface) *HostInfo {
hm.RLock()
if h, ok := hm.Hosts[vpnIp]; ok {
hm.RUnlock()
@@ -570,12 +459,12 @@ func (hm *HostMap) queryVpnIp(vpnIp iputil.VpnIp, promoteIfce *Interface) (*Host
if promoteIfce != nil && !promoteIfce.lightHouse.amLighthouse {
h.TryPromoteBest(hm.preferredRanges, promoteIfce)
}
return h, nil
return h
}
hm.RUnlock()
return nil, errors.New("unable to find host")
return nil
}
// unlockedAddHostInfo assumes you have a write-lock and will add a hostinfo object to the hostmap Indexes and RemoteIndexes maps.
@@ -598,7 +487,7 @@ func (hm *HostMap) unlockedAddHostInfo(hostinfo *HostInfo, f *Interface) {
hm.RemoteIndexes[hostinfo.remoteIndexId] = hostinfo
if hm.l.Level >= logrus.DebugLevel {
hm.l.WithField("hostMap", m{"mapName": hm.name, "vpnIp": hostinfo.vpnIp, "mapTotalSize": len(hm.Hosts),
hm.l.WithField("hostMap", m{"vpnIp": hostinfo.vpnIp, "mapTotalSize": len(hm.Hosts),
"hostinfo": m{"existing": true, "localIndexId": hostinfo.localIndexId, "hostId": hostinfo.vpnIp}}).
Debug("Hostmap vpnIp added")
}
@@ -614,15 +503,34 @@ func (hm *HostMap) unlockedAddHostInfo(hostinfo *HostInfo, f *Interface) {
}
}
func (hm *HostMap) GetPreferredRanges() []*net.IPNet {
return hm.preferredRanges
}
func (hm *HostMap) ForEachVpnIp(f controlEach) {
hm.RLock()
defer hm.RUnlock()
for _, v := range hm.Hosts {
f(v)
}
}
func (hm *HostMap) ForEachIndex(f controlEach) {
hm.RLock()
defer hm.RUnlock()
for _, v := range hm.Indexes {
f(v)
}
}
// TryPromoteBest handles re-querying lighthouses and probing for better paths
// NOTE: It is an error to call this if you are a lighthouse since they should not roam clients!
func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface) {
c := i.promoteCounter.Add(1)
if c%PromoteEvery == 0 {
// The lock here is currently protecting i.remote access
i.RLock()
if c%ifce.tryPromoteEvery.Load() == 0 {
remote := i.remote
i.RUnlock()
// return early if we are already on a preferred remote
if remote != nil {
@@ -646,65 +554,17 @@ func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface)
}
// Re query our lighthouses for new remotes occasionally
if c%ReQueryEvery == 0 && ifce.lightHouse != nil {
ifce.lightHouse.QueryServer(i.vpnIp, ifce)
}
}
func (i *HostInfo) cachePacket(l *logrus.Logger, t header.MessageType, st header.MessageSubType, packet []byte, f packetCallback, m *cachedPacketMetrics) {
//TODO: return the error so we can log with more context
if len(i.packetStore) < 100 {
tempPacket := make([]byte, len(packet))
copy(tempPacket, packet)
//l.WithField("trace", string(debug.Stack())).Error("Caching packet", tempPacket)
i.packetStore = append(i.packetStore, &cachedPacket{t, st, f, tempPacket})
if l.Level >= logrus.DebugLevel {
i.logger(l).
WithField("length", len(i.packetStore)).
WithField("stored", true).
Debugf("Packet store")
if c%ifce.reQueryEvery.Load() == 0 && ifce.lightHouse != nil {
now := time.Now().UnixNano()
if now < i.nextLHQuery.Load() {
return
}
} else if l.Level >= logrus.DebugLevel {
m.dropped.Inc(1)
i.logger(l).
WithField("length", len(i.packetStore)).
WithField("stored", false).
Debugf("Packet store")
i.nextLHQuery.Store(now + ifce.reQueryWait.Load())
ifce.lightHouse.QueryServer(i.vpnIp)
}
}
// handshakeComplete will set the connection as ready to communicate, as well as flush any stored packets
func (i *HostInfo) handshakeComplete(l *logrus.Logger, m *cachedPacketMetrics) {
//TODO: I'm not certain the distinction between handshake complete and ConnectionState being ready matters because:
//TODO: HandshakeComplete means send stored packets and ConnectionState.ready means we are ready to send
//TODO: if the transition from HandhsakeComplete to ConnectionState.ready happens all within this function they are identical
i.ConnectionState.queueLock.Lock()
i.HandshakeComplete = true
//TODO: this should be managed by the handshake state machine to set it based on how many handshake were seen.
// Clamping it to 2 gets us out of the woods for now
i.ConnectionState.messageCounter.Store(2)
if l.Level >= logrus.DebugLevel {
i.logger(l).Debugf("Sending %d stored packets", len(i.packetStore))
}
if len(i.packetStore) > 0 {
nb := make([]byte, 12, 12)
out := make([]byte, mtu)
for _, cp := range i.packetStore {
cp.callback(cp.messageType, cp.messageSubType, i, cp.packet, nb, out)
}
m.sent.Inc(int64(len(i.packetStore)))
}
i.remotes.ResetBlockedRemotes()
i.packetStore = make([]*cachedPacket, 0)
i.ConnectionState.ready = true
i.ConnectionState.queueLock.Unlock()
}
func (i *HostInfo) GetCert() *cert.NebulaCertificate {
if i.ConnectionState != nil {
return i.ConnectionState.peerCert
@@ -761,9 +621,8 @@ func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote *udp.Addr) bool {
}
func (i *HostInfo) RecvErrorExceeded() bool {
if i.recvError < 3 {
i.recvError += 1
return false
if i.recvError.Add(1) >= maxRecvError {
return true
}
return true
}
@@ -774,7 +633,7 @@ func (i *HostInfo) CreateRemoteCIDR(c *cert.NebulaCertificate) {
return
}
remoteCidr := cidr.NewTree4()
remoteCidr := cidr.NewTree4[struct{}]()
for _, ip := range c.Details.Ips {
remoteCidr.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{})
}

View File

@@ -11,7 +11,7 @@ import (
func TestHostMap_MakePrimary(t *testing.T) {
l := test.NewLogger()
hm := NewHostMap(
l, "test",
l,
&net.IPNet{
IP: net.IP{10, 0, 0, 1},
Mask: net.IPMask{255, 255, 255, 0},
@@ -32,7 +32,7 @@ func TestHostMap_MakePrimary(t *testing.T) {
hm.unlockedAddHostInfo(h1, f)
// Make sure we go h1 -> h2 -> h3 -> h4
prim, _ := hm.QueryVpnIp(1)
prim := hm.QueryVpnIp(1)
assert.Equal(t, h1.localIndexId, prim.localIndexId)
assert.Equal(t, h2.localIndexId, prim.next.localIndexId)
assert.Nil(t, prim.prev)
@@ -47,7 +47,7 @@ func TestHostMap_MakePrimary(t *testing.T) {
hm.MakePrimary(h3)
// Make sure we go h3 -> h1 -> h2 -> h4
prim, _ = hm.QueryVpnIp(1)
prim = hm.QueryVpnIp(1)
assert.Equal(t, h3.localIndexId, prim.localIndexId)
assert.Equal(t, h1.localIndexId, prim.next.localIndexId)
assert.Nil(t, prim.prev)
@@ -62,7 +62,7 @@ func TestHostMap_MakePrimary(t *testing.T) {
hm.MakePrimary(h4)
// Make sure we go h4 -> h3 -> h1 -> h2
prim, _ = hm.QueryVpnIp(1)
prim = hm.QueryVpnIp(1)
assert.Equal(t, h4.localIndexId, prim.localIndexId)
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
assert.Nil(t, prim.prev)
@@ -77,7 +77,7 @@ func TestHostMap_MakePrimary(t *testing.T) {
hm.MakePrimary(h4)
// Make sure we go h4 -> h3 -> h1 -> h2
prim, _ = hm.QueryVpnIp(1)
prim = hm.QueryVpnIp(1)
assert.Equal(t, h4.localIndexId, prim.localIndexId)
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
assert.Nil(t, prim.prev)
@@ -92,7 +92,7 @@ func TestHostMap_MakePrimary(t *testing.T) {
func TestHostMap_DeleteHostInfo(t *testing.T) {
l := test.NewLogger()
hm := NewHostMap(
l, "test",
l,
&net.IPNet{
IP: net.IP{10, 0, 0, 1},
Mask: net.IPMask{255, 255, 255, 0},
@@ -119,11 +119,11 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
// h6 should be deleted
assert.Nil(t, h6.next)
assert.Nil(t, h6.prev)
_, err := hm.QueryIndex(h6.localIndexId)
assert.Error(t, err)
h := hm.QueryIndex(h6.localIndexId)
assert.Nil(t, h)
// Make sure we go h1 -> h2 -> h3 -> h4 -> h5
prim, _ := hm.QueryVpnIp(1)
prim := hm.QueryVpnIp(1)
assert.Equal(t, h1.localIndexId, prim.localIndexId)
assert.Equal(t, h2.localIndexId, prim.next.localIndexId)
assert.Nil(t, prim.prev)
@@ -142,7 +142,7 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
assert.Nil(t, h1.next)
// Make sure we go h2 -> h3 -> h4 -> h5
prim, _ = hm.QueryVpnIp(1)
prim = hm.QueryVpnIp(1)
assert.Equal(t, h2.localIndexId, prim.localIndexId)
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
assert.Nil(t, prim.prev)
@@ -160,7 +160,7 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
assert.Nil(t, h3.next)
// Make sure we go h2 -> h4 -> h5
prim, _ = hm.QueryVpnIp(1)
prim = hm.QueryVpnIp(1)
assert.Equal(t, h2.localIndexId, prim.localIndexId)
assert.Equal(t, h4.localIndexId, prim.next.localIndexId)
assert.Nil(t, prim.prev)
@@ -176,7 +176,7 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
assert.Nil(t, h5.next)
// Make sure we go h2 -> h4
prim, _ = hm.QueryVpnIp(1)
prim = hm.QueryVpnIp(1)
assert.Equal(t, h2.localIndexId, prim.localIndexId)
assert.Equal(t, h4.localIndexId, prim.next.localIndexId)
assert.Nil(t, prim.prev)
@@ -190,7 +190,7 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
assert.Nil(t, h2.next)
// Make sure we only have h4
prim, _ = hm.QueryVpnIp(1)
prim = hm.QueryVpnIp(1)
assert.Equal(t, h4.localIndexId, prim.localIndexId)
assert.Nil(t, prim.prev)
assert.Nil(t, prim.next)
@@ -202,6 +202,6 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
assert.Nil(t, h4.next)
// Make sure we have nil
prim, _ = hm.QueryVpnIp(1)
prim = hm.QueryVpnIp(1)
assert.Nil(t, prim)
}

135
inside.go
View File

@@ -1,7 +1,6 @@
package nebula
import (
"github.com/flynn/noise"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/firewall"
"github.com/slackhq/nebula/header"
@@ -45,7 +44,10 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
return
}
hostinfo := f.getOrHandshake(fwPacket.RemoteIP)
hostinfo, ready := f.getOrHandshake(fwPacket.RemoteIP, func(hh *HandshakeHostInfo) {
hh.cachePacket(f.l, header.Message, 0, packet, f.sendMessageNow, f.cachedPacketMetrics)
})
if hostinfo == nil {
f.rejectInside(packet, out, q)
if f.l.Level >= logrus.DebugLevel {
@@ -55,23 +57,14 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
}
return
}
ci := hostinfo.ConnectionState
if !ci.ready {
// Because we might be sending stored packets, lock here to stop new things going to
// the packet queue.
ci.queueLock.Lock()
if !ci.ready {
hostinfo.cachePacket(f.l, header.Message, 0, packet, f.sendMessageNow, f.cachedPacketMetrics)
ci.queueLock.Unlock()
return
}
ci.queueLock.Unlock()
if !ready {
return
}
dropReason := f.firewall.Drop(packet, *fwPacket, false, hostinfo, f.caPool, localCache)
dropReason := f.firewall.Drop(packet, *fwPacket, false, hostinfo, f.pki.GetCAPool(), localCache)
if dropReason == nil {
f.sendNoMetrics(header.Message, 0, ci, hostinfo, nil, packet, nb, out, q)
f.sendNoMetrics(header.Message, 0, hostinfo.ConnectionState, hostinfo, nil, packet, nb, out, q)
} else {
f.rejectInside(packet, out, q)
@@ -90,6 +83,10 @@ func (f *Interface) rejectInside(packet []byte, out []byte, q int) {
}
out = iputil.CreateRejectPacket(packet, out)
if len(out) == 0 {
return
}
_, err := f.readers[q].Write(out)
if err != nil {
f.l.WithError(err).Error("Failed to write to tun")
@@ -101,80 +98,39 @@ func (f *Interface) rejectOutside(packet []byte, ci *ConnectionState, hostinfo *
return
}
// Use some out buffer space to build the packet before encryption
// Need 40 bytes for the reject packet (20 byte ipv4 header, 20 byte tcp rst packet)
// Leave 100 bytes for the encrypted packet (60 byte Nebula header, 40 byte reject packet)
out = out[:140]
outPacket := iputil.CreateRejectPacket(packet, out[100:])
f.sendNoMetrics(header.Message, 0, ci, hostinfo, nil, outPacket, nb, out, q)
out = iputil.CreateRejectPacket(packet, out)
if len(out) == 0 {
return
}
if len(out) > iputil.MaxRejectPacketSize {
if f.l.GetLevel() >= logrus.InfoLevel {
f.l.
WithField("packet", packet).
WithField("outPacket", out).
Info("rejectOutside: packet too big, not sending")
}
return
}
f.sendNoMetrics(header.Message, 0, ci, hostinfo, nil, out, nb, packet, q)
}
func (f *Interface) Handshake(vpnIp iputil.VpnIp) {
f.getOrHandshake(vpnIp)
f.getOrHandshake(vpnIp, nil)
}
// getOrHandshake returns nil if the vpnIp is not routable
func (f *Interface) getOrHandshake(vpnIp iputil.VpnIp) *HostInfo {
// getOrHandshake returns nil if the vpnIp is not routable.
// If the 2nd return var is false then the hostinfo is not ready to be used in a tunnel
func (f *Interface) getOrHandshake(vpnIp iputil.VpnIp, cacheCallback func(*HandshakeHostInfo)) (*HostInfo, bool) {
if !ipMaskContains(f.lightHouse.myVpnIp, f.lightHouse.myVpnZeros, vpnIp) {
vpnIp = f.inside.RouteFor(vpnIp)
if vpnIp == 0 {
return nil
}
}
hostinfo, err := f.hostMap.PromoteBestQueryVpnIp(vpnIp, f)
//if err != nil || hostinfo.ConnectionState == nil {
if err != nil {
hostinfo, err = f.handshakeManager.pendingHostMap.QueryVpnIp(vpnIp)
if err != nil {
hostinfo = f.handshakeManager.AddVpnIp(vpnIp, f.initHostInfo)
}
}
ci := hostinfo.ConnectionState
if ci != nil && ci.eKey != nil && ci.ready {
return hostinfo
}
// Handshake is not ready, we need to grab the lock now before we start the handshake process
hostinfo.Lock()
defer hostinfo.Unlock()
// Double check, now that we have the lock
ci = hostinfo.ConnectionState
if ci != nil && ci.eKey != nil && ci.ready {
return hostinfo
}
// If we have already created the handshake packet, we don't want to call the function at all.
if !hostinfo.HandshakeReady {
ixHandshakeStage0(f, vpnIp, hostinfo)
// FIXME: Maybe make XX selectable, but probably not since psk makes it nearly pointless for us.
//xx_handshakeStage0(f, ip, hostinfo)
// If this is a static host, we don't need to wait for the HostQueryReply
// We can trigger the handshake right now
_, doTrigger := f.lightHouse.GetStaticHostList()[vpnIp]
if !doTrigger {
// Add any calculated remotes, and trigger early handshake if one found
doTrigger = f.lightHouse.addCalculatedRemotes(vpnIp)
}
if doTrigger {
select {
case f.handshakeManager.trigger <- vpnIp:
default:
}
return nil, false
}
}
return hostinfo
}
// initHostInfo is the init function to pass to (*HandshakeManager).AddVpnIP that
// will create the initial Noise ConnectionState
func (f *Interface) initHostInfo(hostinfo *HostInfo) {
hostinfo.ConnectionState = f.newConnectionState(f.l, true, noise.HandshakeIX, []byte{}, 0)
return f.handshakeManager.GetOrHandshake(vpnIp, cacheCallback)
}
func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, nb, out []byte) {
@@ -186,7 +142,7 @@ func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubTyp
}
// check if packet is in outbound fw rules
dropReason := f.firewall.Drop(p, *fp, false, hostinfo, f.caPool, nil)
dropReason := f.firewall.Drop(p, *fp, false, hostinfo, f.pki.GetCAPool(), nil)
if dropReason != nil {
if f.l.Level >= logrus.DebugLevel {
f.l.WithField("fwPacket", fp).
@@ -201,7 +157,10 @@ func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubTyp
// SendMessageToVpnIp handles real ip:port lookup and sends to the current best known address for vpnIp
func (f *Interface) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, nb, out []byte) {
hostInfo := f.getOrHandshake(vpnIp)
hostInfo, ready := f.getOrHandshake(vpnIp, func(hh *HandshakeHostInfo) {
hh.cachePacket(f.l, t, st, p, f.SendMessageToHostInfo, f.cachedPacketMetrics)
})
if hostInfo == nil {
if f.l.Level >= logrus.DebugLevel {
f.l.WithField("vpnIp", vpnIp).
@@ -210,16 +169,8 @@ func (f *Interface) SendMessageToVpnIp(t header.MessageType, st header.MessageSu
return
}
if !hostInfo.ConnectionState.ready {
// Because we might be sending stored packets, lock here to stop new things going to
// the packet queue.
hostInfo.ConnectionState.queueLock.Lock()
if !hostInfo.ConnectionState.ready {
hostInfo.cachePacket(f.l, t, st, p, f.SendMessageToHostInfo, f.cachedPacketMetrics)
hostInfo.ConnectionState.queueLock.Unlock()
return
}
hostInfo.ConnectionState.queueLock.Unlock()
if !ready {
return
}
f.SendMessageToHostInfo(t, st, hostInfo, p, nb, out)
@@ -239,7 +190,7 @@ func (f *Interface) sendTo(t header.MessageType, st header.MessageSubType, ci *C
f.sendNoMetrics(t, st, ci, hostinfo, remote, p, nb, out, 0)
}
// sendVia sends a payload through a Relay tunnel. No authentication or encryption is done
// SendVia sends a payload through a Relay tunnel. No authentication or encryption is done
// to the payload for the ultimate target host, making this a useful method for sending
// handshake messages to peers through relay tunnels.
// via is the HostInfo through which the message is relayed.
@@ -337,7 +288,7 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
if t != header.CloseTunnel && hostinfo.lastRebindCount != f.rebindCount {
//NOTE: there is an update hole if a tunnel isn't used and exactly 256 rebinds occur before the tunnel is
// finally used again. This tunnel would eventually be torn down and recreated if this action didn't help.
f.lightHouse.QueryServer(hostinfo.vpnIp, f)
f.lightHouse.QueryServer(hostinfo.vpnIp)
hostinfo.lastRebindCount = f.rebindCount
if f.l.Level >= logrus.DebugLevel {
f.l.WithField("vpnIp", hostinfo.vpnIp).Debug("Lighthouse update triggered for punch due to rebind counter")

View File

@@ -13,7 +13,6 @@ import (
"github.com/rcrowley/go-metrics"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/firewall"
"github.com/slackhq/nebula/header"
@@ -26,9 +25,9 @@ const mtu = 9001
type InterfaceConfig struct {
HostMap *HostMap
Outside *udp.Conn
Outside udp.Conn
Inside overlay.Device
certState *CertState
pki *PKI
Cipher string
Firewall *Firewall
ServeDns bool
@@ -41,20 +40,22 @@ type InterfaceConfig struct {
routines int
MessageMetrics *MessageMetrics
version string
caPool *cert.NebulaCAPool
disconnectInvalid bool
relayManager *relayManager
punchy *Punchy
tryPromoteEvery uint32
reQueryEvery uint32
reQueryWait time.Duration
ConntrackCacheTimeout time.Duration
l *logrus.Logger
}
type Interface struct {
hostMap *HostMap
outside *udp.Conn
outside udp.Conn
inside overlay.Device
certState atomic.Pointer[CertState]
pki *PKI
cipher string
firewall *Firewall
connectionManager *connectionManager
@@ -67,11 +68,14 @@ type Interface struct {
dropLocalBroadcast bool
dropMulticast bool
routines int
caPool *cert.NebulaCAPool
disconnectInvalid bool
disconnectInvalid atomic.Bool
closed atomic.Bool
relayManager *relayManager
tryPromoteEvery atomic.Uint32
reQueryEvery atomic.Uint32
reQueryWait atomic.Int64
sendRecvErrorConfig sendRecvErrorConfig
// rebindCount is used to decide if an active tunnel should trigger a punch notification through a lighthouse
@@ -80,7 +84,7 @@ type Interface struct {
conntrackCacheTimeout time.Duration
writers []*udp.Conn
writers []udp.Conn
readers []io.ReadWriteCloser
metricHandshakes metrics.Histogram
@@ -144,15 +148,17 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
if c.Inside == nil {
return nil, errors.New("no inside interface (tun)")
}
if c.certState == nil {
if c.pki == nil {
return nil, errors.New("no certificate state")
}
if c.Firewall == nil {
return nil, errors.New("no firewall rules")
}
myVpnIp := iputil.Ip2VpnIp(c.certState.certificate.Details.Ips[0].IP)
certificate := c.pki.GetCertState().Certificate
myVpnIp := iputil.Ip2VpnIp(certificate.Details.Ips[0].IP)
ifce := &Interface{
pki: c.pki,
hostMap: c.HostMap,
outside: c.Outside,
inside: c.Inside,
@@ -162,15 +168,13 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
handshakeManager: c.HandshakeManager,
createTime: time.Now(),
lightHouse: c.lightHouse,
localBroadcast: myVpnIp | ^iputil.Ip2VpnIp(c.certState.certificate.Details.Ips[0].Mask),
localBroadcast: myVpnIp | ^iputil.Ip2VpnIp(certificate.Details.Ips[0].Mask),
dropLocalBroadcast: c.DropLocalBroadcast,
dropMulticast: c.DropMulticast,
routines: c.routines,
version: c.version,
writers: make([]*udp.Conn, c.routines),
writers: make([]udp.Conn, c.routines),
readers: make([]io.ReadWriteCloser, c.routines),
caPool: c.caPool,
disconnectInvalid: c.disconnectInvalid,
myVpnIp: myVpnIp,
relayManager: c.relayManager,
@@ -186,7 +190,10 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
l: c.l,
}
ifce.certState.Store(c.certState)
ifce.tryPromoteEvery.Store(c.tryPromoteEvery)
ifce.reQueryEvery.Store(c.reQueryEvery)
ifce.reQueryWait.Store(int64(c.reQueryWait))
ifce.connectionManager = newConnectionManager(ctx, c.l, ifce, c.checkInterval, c.pendingDeletionInterval, c.punchy)
return ifce, nil
@@ -243,7 +250,7 @@ func (f *Interface) run() {
func (f *Interface) listenOut(i int) {
runtime.LockOSThread()
var li *udp.Conn
var li udp.Conn
// TODO clean this up with a coherent interface for each outside connection
if i > 0 {
li = f.writers[i]
@@ -283,47 +290,24 @@ func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) {
}
func (f *Interface) RegisterConfigChangeCallbacks(c *config.C) {
c.RegisterReloadCallback(f.reloadCA)
c.RegisterReloadCallback(f.reloadCertKey)
c.RegisterReloadCallback(f.reloadFirewall)
c.RegisterReloadCallback(f.reloadSendRecvError)
c.RegisterReloadCallback(f.reloadDisconnectInvalid)
c.RegisterReloadCallback(f.reloadMisc)
for _, udpConn := range f.writers {
c.RegisterReloadCallback(udpConn.ReloadConfig)
}
}
func (f *Interface) reloadCA(c *config.C) {
// reload and check regardless
// todo: need mutex?
newCAs, err := loadCAFromConfig(f.l, c)
if err != nil {
f.l.WithError(err).Error("Could not refresh trusted CA certificates")
return
func (f *Interface) reloadDisconnectInvalid(c *config.C) {
initial := c.InitialLoad()
if initial || c.HasChanged("pki.disconnect_invalid") {
f.disconnectInvalid.Store(c.GetBool("pki.disconnect_invalid", true))
if !initial {
f.l.Infof("pki.disconnect_invalid changed to %v", f.disconnectInvalid.Load())
}
}
f.caPool = newCAs
f.l.WithField("fingerprints", f.caPool.GetFingerprints()).Info("Trusted CA certificates refreshed")
}
func (f *Interface) reloadCertKey(c *config.C) {
// reload and check in all cases
cs, err := NewCertStateFromConfig(c)
if err != nil {
f.l.WithError(err).Error("Could not refresh client cert")
return
}
// did IP in cert change? if so, don't set
currentCert := f.certState.Load().certificate
oldIPs := currentCert.Details.Ips
newIPs := cs.certificate.Details.Ips
if len(oldIPs) > 0 && len(newIPs) > 0 && oldIPs[0].String() != newIPs[0].String() {
f.l.WithField("new_ip", newIPs[0]).WithField("old_ip", oldIPs[0]).Error("IP in new cert was different from old")
return
}
f.certState.Store(cs)
f.l.WithField("cert", cs.certificate).Info("Client cert refreshed from disk")
}
func (f *Interface) reloadFirewall(c *config.C) {
@@ -333,7 +317,7 @@ func (f *Interface) reloadFirewall(c *config.C) {
return
}
fw, err := NewFirewallFromConfig(f.l, f.certState.Load().certificate, c)
fw, err := NewFirewallFromConfig(f.l, f.pki.GetCertState().Certificate, c)
if err != nil {
f.l.WithError(err).Error("Error while creating firewall during reload")
return
@@ -348,8 +332,8 @@ func (f *Interface) reloadFirewall(c *config.C) {
// If rulesVersion is back to zero, we have wrapped all the way around. Be
// safe and just reset conntrack in this case.
if fw.rulesVersion == 0 {
f.l.WithField("firewallHash", fw.GetRuleHash()).
WithField("oldFirewallHash", oldFw.GetRuleHash()).
f.l.WithField("firewallHashes", fw.GetRuleHashes()).
WithField("oldFirewallHashes", oldFw.GetRuleHashes()).
WithField("rulesVersion", fw.rulesVersion).
Warn("firewall rulesVersion has overflowed, resetting conntrack")
} else {
@@ -359,8 +343,8 @@ func (f *Interface) reloadFirewall(c *config.C) {
f.firewall = fw
oldFw.Destroy()
f.l.WithField("firewallHash", fw.GetRuleHash()).
WithField("oldFirewallHash", oldFw.GetRuleHash()).
f.l.WithField("firewallHashes", fw.GetRuleHashes()).
WithField("oldFirewallHashes", oldFw.GetRuleHashes()).
WithField("rulesVersion", fw.rulesVersion).
Info("New firewall has been installed")
}
@@ -389,6 +373,26 @@ func (f *Interface) reloadSendRecvError(c *config.C) {
}
}
func (f *Interface) reloadMisc(c *config.C) {
if c.HasChanged("counters.try_promote") {
n := c.GetUint32("counters.try_promote", defaultPromoteEvery)
f.tryPromoteEvery.Store(n)
f.l.Info("counters.try_promote has changed")
}
if c.HasChanged("counters.requery_every_packets") {
n := c.GetUint32("counters.requery_every_packets", defaultReQueryEvery)
f.reQueryEvery.Store(n)
f.l.Info("counters.requery_every_packets has changed")
}
if c.HasChanged("timers.requery_wait_duration") {
n := c.GetDuration("timers.requery_wait_duration", defaultReQueryWait)
f.reQueryWait.Store(int64(n))
f.l.Info("timers.requery_wait_duration has changed")
}
}
func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
ticker := time.NewTicker(i)
defer ticker.Stop()
@@ -405,7 +409,7 @@ func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
f.firewall.EmitStats()
f.handshakeManager.EmitStats()
udpStats()
certExpirationGauge.Update(int64(f.certState.Load().certificate.Details.NotAfter.Sub(time.Now()) / time.Second))
certExpirationGauge.Update(int64(f.pki.GetCertState().Certificate.Details.NotAfter.Sub(time.Now()) / time.Second))
}
}
}
@@ -413,6 +417,13 @@ func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
func (f *Interface) Close() error {
f.closed.Store(true)
for _, u := range f.writers {
err := u.Close()
if err != nil {
f.l.WithError(err).Error("Error while closing udp socket")
}
}
// Release the tun device
return f.inside.Close()
}

View File

@@ -6,8 +6,19 @@ import (
"golang.org/x/net/ipv4"
)
const (
// Need 96 bytes for the largest reject packet:
// - 20 byte ipv4 header
// - 8 byte icmpv4 header
// - 68 byte body (60 byte max orig ipv4 header + 8 byte orig icmpv4 header)
MaxRejectPacketSize = ipv4.HeaderLen + 8 + 60 + 8
)
func CreateRejectPacket(packet []byte, out []byte) []byte {
// TODO ipv4 only, need to fix when inside supports ipv6
if len(packet) < ipv4.HeaderLen || int(packet[0]>>4) != ipv4.Version {
return nil
}
switch packet[9] {
case 6: // tcp
return ipv4CreateRejectTCPPacket(packet, out)
@@ -19,20 +30,28 @@ func CreateRejectPacket(packet []byte, out []byte) []byte {
func ipv4CreateRejectICMPPacket(packet []byte, out []byte) []byte {
ihl := int(packet[0]&0x0f) << 2
// ICMP reply includes header and first 8 bytes of the packet
if len(packet) < ihl {
// We need at least this many bytes for this to be a valid packet
return nil
}
// ICMP reply includes original header and first 8 bytes of the packet
packetLen := len(packet)
if packetLen > ihl+8 {
packetLen = ihl + 8
}
outLen := ipv4.HeaderLen + 8 + packetLen
if outLen > cap(out) {
return nil
}
out = out[:(outLen)]
out = out[:outLen]
ipHdr := out[0:ipv4.HeaderLen]
ipHdr[0] = ipv4.Version<<4 | (ipv4.HeaderLen >> 2) // version, ihl
ipHdr[1] = 0 // DSCP, ECN
binary.BigEndian.PutUint16(ipHdr[2:], uint16(ipv4.HeaderLen+8+packetLen)) // Total Length
ipHdr[0] = ipv4.Version<<4 | (ipv4.HeaderLen >> 2) // version, ihl
ipHdr[1] = 0 // DSCP, ECN
binary.BigEndian.PutUint16(ipHdr[2:], uint16(outLen)) // Total Length
ipHdr[4] = 0 // id
ipHdr[5] = 0 // .
@@ -76,7 +95,15 @@ func ipv4CreateRejectTCPPacket(packet []byte, out []byte) []byte {
ihl := int(packet[0]&0x0f) << 2
outLen := ipv4.HeaderLen + tcpLen
out = out[:(outLen)]
if len(packet) < ihl+tcpLen {
// We need at least this many bytes for this to be a valid packet
return nil
}
if outLen > cap(out) {
return nil
}
out = out[:outLen]
ipHdr := out[0:ipv4.HeaderLen]
ipHdr[0] = ipv4.Version<<4 | (ipv4.HeaderLen >> 2) // version, ihl

73
iputil/packet_test.go Normal file
View File

@@ -0,0 +1,73 @@
package iputil
import (
"net"
"testing"
"github.com/stretchr/testify/assert"
"golang.org/x/net/ipv4"
)
func Test_CreateRejectPacket(t *testing.T) {
h := ipv4.Header{
Len: 20,
Src: net.IPv4(10, 0, 0, 1),
Dst: net.IPv4(10, 0, 0, 2),
Protocol: 1, // ICMP
}
b, err := h.Marshal()
if err != nil {
t.Fatalf("h.Marhshal: %v", err)
}
b = append(b, []byte{0, 3, 0, 4}...)
expectedLen := ipv4.HeaderLen + 8 + h.Len + 4
out := make([]byte, expectedLen)
rejectPacket := CreateRejectPacket(b, out)
assert.NotNil(t, rejectPacket)
assert.Len(t, rejectPacket, expectedLen)
// ICMP with max header len
h = ipv4.Header{
Len: 60,
Src: net.IPv4(10, 0, 0, 1),
Dst: net.IPv4(10, 0, 0, 2),
Protocol: 1, // ICMP
Options: make([]byte, 40),
}
b, err = h.Marshal()
if err != nil {
t.Fatalf("h.Marhshal: %v", err)
}
b = append(b, []byte{0, 3, 0, 4, 0, 0, 0, 0}...)
expectedLen = MaxRejectPacketSize
out = make([]byte, MaxRejectPacketSize)
rejectPacket = CreateRejectPacket(b, out)
assert.NotNil(t, rejectPacket)
assert.Len(t, rejectPacket, expectedLen)
// TCP with max header len
h = ipv4.Header{
Len: 60,
Src: net.IPv4(10, 0, 0, 1),
Dst: net.IPv4(10, 0, 0, 2),
Protocol: 6, // TCP
Options: make([]byte, 40),
}
b, err = h.Marshal()
if err != nil {
t.Fatalf("h.Marhshal: %v", err)
}
b = append(b, []byte{0, 3, 0, 4}...)
b = append(b, make([]byte, 16)...)
expectedLen = ipv4.HeaderLen + 20
out = make([]byte, expectedLen)
rejectPacket = CreateRejectPacket(b, out)
assert.NotNil(t, rejectPacket)
assert.Len(t, rejectPacket, expectedLen)
}

View File

@@ -39,7 +39,7 @@ type LightHouse struct {
myVpnIp iputil.VpnIp
myVpnZeros iputil.VpnIp
myVpnNet *net.IPNet
punchConn *udp.Conn
punchConn udp.Conn
punchy *Punchy
// Local cache of answers from light houses
@@ -64,18 +64,19 @@ type LightHouse struct {
staticList atomic.Pointer[map[iputil.VpnIp]struct{}]
lighthouses atomic.Pointer[map[iputil.VpnIp]struct{}]
interval atomic.Int64
updateCancel context.CancelFunc
updateParentCtx context.Context
updateUdp EncWriter
nebulaPort uint32 // 32 bits because protobuf does not have a uint16
interval atomic.Int64
updateCancel context.CancelFunc
ifce EncWriter
nebulaPort uint32 // 32 bits because protobuf does not have a uint16
advertiseAddrs atomic.Pointer[[]netIpAndPort]
// IP's of relays that can be used by peers to access me
relaysForMe atomic.Pointer[[]iputil.VpnIp]
calculatedRemotes atomic.Pointer[cidr.Tree4] // Maps VpnIp to []*calculatedRemote
queryChan chan iputil.VpnIp
calculatedRemotes atomic.Pointer[cidr.Tree4[[]*calculatedRemote]] // Maps VpnIp to []*calculatedRemote
metrics *MessageMetrics
metricHolepunchTx metrics.Counter
@@ -84,7 +85,7 @@ type LightHouse struct {
// NewLightHouseFromConfig will build a Lighthouse struct from the values provided in the config object
// addrMap should be nil unless this is during a config reload
func NewLightHouseFromConfig(ctx context.Context, l *logrus.Logger, c *config.C, myVpnNet *net.IPNet, pc *udp.Conn, p *Punchy) (*LightHouse, error) {
func NewLightHouseFromConfig(ctx context.Context, l *logrus.Logger, c *config.C, myVpnNet *net.IPNet, pc udp.Conn, p *Punchy) (*LightHouse, error) {
amLighthouse := c.GetBool("lighthouse.am_lighthouse", false)
nebulaPort := uint32(c.GetInt("listen.port", 0))
if amLighthouse && nebulaPort == 0 {
@@ -111,6 +112,7 @@ func NewLightHouseFromConfig(ctx context.Context, l *logrus.Logger, c *config.C,
nebulaPort: nebulaPort,
punchConn: pc,
punchy: p,
queryChan: make(chan iputil.VpnIp, c.GetUint32("handshakes.query_buffer", 64)),
l: l,
}
lighthouses := make(map[iputil.VpnIp]struct{})
@@ -133,13 +135,15 @@ func NewLightHouseFromConfig(ctx context.Context, l *logrus.Logger, c *config.C,
c.RegisterReloadCallback(func(c *config.C) {
err := h.reload(c, false)
switch v := err.(type) {
case util.ContextualError:
case *util.ContextualError:
v.Log(l)
case error:
l.WithError(err).Error("failed to reload lighthouse")
}
})
h.startQueryWorker()
return &h, nil
}
@@ -167,7 +171,7 @@ func (lh *LightHouse) GetRelaysForMe() []iputil.VpnIp {
return *lh.relaysForMe.Load()
}
func (lh *LightHouse) getCalculatedRemotes() *cidr.Tree4 {
func (lh *LightHouse) getCalculatedRemotes() *cidr.Tree4[[]*calculatedRemote] {
return lh.calculatedRemotes.Load()
}
@@ -217,7 +221,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
lh.updateCancel()
}
lh.LhUpdateWorker(lh.updateParentCtx, lh.updateUdp)
lh.StartUpdateWorker()
}
}
@@ -262,6 +266,18 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
//NOTE: many things will get much simpler when we combine static_host_map and lighthouse.hosts in config
if initial || c.HasChanged("static_host_map") || c.HasChanged("static_map.cadence") || c.HasChanged("static_map.network") || c.HasChanged("static_map.lookup_timeout") {
// Clean up. Entries still in the static_host_map will be re-built.
// Entries no longer present must have their (possible) background DNS goroutines stopped.
if existingStaticList := lh.staticList.Load(); existingStaticList != nil {
lh.RLock()
for staticVpnIp := range *existingStaticList {
if am, ok := lh.addrMap[staticVpnIp]; ok && am != nil {
am.hr.Cancel()
}
}
lh.RUnlock()
}
// Build a new list based on current config.
staticList := make(map[iputil.VpnIp]struct{})
err := lh.loadStaticMap(c, lh.myVpnNet, staticList)
if err != nil {
@@ -432,9 +448,9 @@ func (lh *LightHouse) loadStaticMap(c *config.C, tunCidr *net.IPNet, staticList
return nil
}
func (lh *LightHouse) Query(ip iputil.VpnIp, f EncWriter) *RemoteList {
func (lh *LightHouse) Query(ip iputil.VpnIp) *RemoteList {
if !lh.IsLighthouseIP(ip) {
lh.QueryServer(ip, f)
lh.QueryServer(ip)
}
lh.RLock()
if v, ok := lh.addrMap[ip]; ok {
@@ -445,30 +461,14 @@ func (lh *LightHouse) Query(ip iputil.VpnIp, f EncWriter) *RemoteList {
return nil
}
// This is asynchronous so no reply should be expected
func (lh *LightHouse) QueryServer(ip iputil.VpnIp, f EncWriter) {
if lh.amLighthouse {
// QueryServer is asynchronous so no reply should be expected
func (lh *LightHouse) QueryServer(ip iputil.VpnIp) {
// Don't put lighthouse ips in the query channel because we can't query lighthouses about lighthouses
if lh.amLighthouse || lh.IsLighthouseIP(ip) {
return
}
if lh.IsLighthouseIP(ip) {
return
}
// Send a query to the lighthouses and hope for the best next time
query, err := NewLhQueryByInt(ip).Marshal()
if err != nil {
lh.l.WithError(err).WithField("vpnIp", ip).Error("Failed to marshal lighthouse query payload")
return
}
lighthouses := lh.GetLighthouses()
lh.metricTx(NebulaMeta_HostQuery, int64(len(lighthouses)))
nb := make([]byte, 12, 12)
out := make([]byte, mtu)
for n := range lighthouses {
f.SendMessageToVpnIp(header.LightHouse, 0, n, query, nb, out)
}
lh.queryChan <- ip
}
func (lh *LightHouse) QueryCache(ip iputil.VpnIp) *RemoteList {
@@ -583,11 +583,10 @@ func (lh *LightHouse) addCalculatedRemotes(vpnIp iputil.VpnIp) bool {
if tree == nil {
return false
}
value := tree.MostSpecificContains(vpnIp)
if value == nil {
ok, calculatedRemotes := tree.MostSpecificContains(vpnIp)
if !ok {
return false
}
calculatedRemotes := value.([]*calculatedRemote)
var calculated []*Ip4AndPort
for _, cr := range calculatedRemotes {
@@ -742,33 +741,73 @@ func NewUDPAddrFromLH6(ipp *Ip6AndPort) *udp.Addr {
return udp.NewAddr(lhIp6ToIp(ipp), uint16(ipp.Port))
}
func (lh *LightHouse) LhUpdateWorker(ctx context.Context, f EncWriter) {
lh.updateParentCtx = ctx
lh.updateUdp = f
func (lh *LightHouse) startQueryWorker() {
if lh.amLighthouse {
return
}
go func() {
nb := make([]byte, 12, 12)
out := make([]byte, mtu)
for {
select {
case <-lh.ctx.Done():
return
case ip := <-lh.queryChan:
lh.innerQueryServer(ip, nb, out)
}
}
}()
}
func (lh *LightHouse) innerQueryServer(ip iputil.VpnIp, nb, out []byte) {
if lh.IsLighthouseIP(ip) {
return
}
// Send a query to the lighthouses and hope for the best next time
query, err := NewLhQueryByInt(ip).Marshal()
if err != nil {
lh.l.WithError(err).WithField("vpnIp", ip).Error("Failed to marshal lighthouse query payload")
return
}
lighthouses := lh.GetLighthouses()
lh.metricTx(NebulaMeta_HostQuery, int64(len(lighthouses)))
for n := range lighthouses {
lh.ifce.SendMessageToVpnIp(header.LightHouse, 0, n, query, nb, out)
}
}
func (lh *LightHouse) StartUpdateWorker() {
interval := lh.GetUpdateInterval()
if lh.amLighthouse || interval == 0 {
return
}
clockSource := time.NewTicker(time.Second * time.Duration(interval))
updateCtx, cancel := context.WithCancel(ctx)
updateCtx, cancel := context.WithCancel(lh.ctx)
lh.updateCancel = cancel
defer clockSource.Stop()
for {
lh.SendUpdate(f)
go func() {
defer clockSource.Stop()
select {
case <-updateCtx.Done():
return
case <-clockSource.C:
continue
for {
lh.SendUpdate()
select {
case <-updateCtx.Done():
return
case <-clockSource.C:
continue
}
}
}
}()
}
func (lh *LightHouse) SendUpdate(f EncWriter) {
func (lh *LightHouse) SendUpdate() {
var v4 []*Ip4AndPort
var v6 []*Ip6AndPort
@@ -821,7 +860,7 @@ func (lh *LightHouse) SendUpdate(f EncWriter) {
}
for vpnIp := range lighthouses {
f.SendMessageToVpnIp(header.LightHouse, 0, vpnIp, mm, nb, out)
lh.ifce.SendMessageToVpnIp(header.LightHouse, 0, vpnIp, mm, nb, out)
}
}

View File

@@ -12,6 +12,7 @@ import (
"github.com/slackhq/nebula/test"
"github.com/slackhq/nebula/udp"
"github.com/stretchr/testify/assert"
"gopkg.in/yaml.v2"
)
//TODO: Add a test to ensure udpAddr is copied and not reused
@@ -65,6 +66,35 @@ func Test_lhStaticMapping(t *testing.T) {
assert.EqualError(t, err, "lighthouse 10.128.0.3 does not have a static_host_map entry")
}
func TestReloadLighthouseInterval(t *testing.T) {
l := test.NewLogger()
_, myVpnNet, _ := net.ParseCIDR("10.128.0.1/16")
lh1 := "10.128.0.2"
c := config.NewC(l)
c.Settings["lighthouse"] = map[interface{}]interface{}{
"hosts": []interface{}{lh1},
"interval": "1s",
}
c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"1.1.1.1:4242"}}
lh, err := NewLightHouseFromConfig(context.Background(), l, c, myVpnNet, nil, nil)
assert.NoError(t, err)
lh.ifce = &mockEncWriter{}
// The first one routine is kicked off by main.go currently, lets make sure that one dies
c.ReloadConfigString("lighthouse:\n interval: 5")
assert.Equal(t, int64(5), lh.interval.Load())
// Subsequent calls are killed off by the LightHouse.Reload function
c.ReloadConfigString("lighthouse:\n interval: 10")
assert.Equal(t, int64(10), lh.interval.Load())
// If this completes then nothing is stealing our reload routine
c.ReloadConfigString("lighthouse:\n interval: 11")
assert.Equal(t, int64(11), lh.interval.Load())
}
func BenchmarkLighthouseHandleRequest(b *testing.B) {
l := test.NewLogger()
_, myVpnNet, _ := net.ParseCIDR("10.128.0.1/0")
@@ -242,8 +272,17 @@ func TestLighthouse_reload(t *testing.T) {
lh, err := NewLightHouseFromConfig(context.Background(), l, c, &net.IPNet{IP: net.IP{10, 128, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, nil, nil)
assert.NoError(t, err)
c.Settings["static_host_map"] = map[interface{}]interface{}{"10.128.0.2": []interface{}{"1.1.1.1:4242"}}
lh.reload(c, false)
nc := map[interface{}]interface{}{
"static_host_map": map[interface{}]interface{}{
"10.128.0.2": []interface{}{"1.1.1.1:4242"},
},
}
rc, err := yaml.Marshal(nc)
assert.NoError(t, err)
c.ReloadConfigString(string(rc))
err = lh.reload(c, false)
assert.NoError(t, err)
}
func newLHHostRequest(fromAddr *udp.Addr, myVpnIp, queryVpnIp iputil.VpnIp, lhh *LightHouseHandler) testLhReply {

100
main.go
View File

@@ -3,7 +3,6 @@ package nebula
import (
"context"
"encoding/binary"
"errors"
"fmt"
"net"
"time"
@@ -19,7 +18,7 @@ import (
type m map[string]interface{}
func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logger, tunFd *int) (retcon *Control, reterr error) {
func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logger, deviceFactory overlay.DeviceFactory) (retcon *Control, reterr error) {
ctx, cancel := context.WithCancel(context.Background())
// Automatically cancel the context if Main returns an error, to signal all created goroutines to quit.
defer func() {
@@ -46,7 +45,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
err := configLogger(l, c)
if err != nil {
return nil, util.NewContextualError("Failed to configure the logger", nil, err)
return nil, util.ContextualizeIfNeeded("Failed to configure the logger", err)
}
c.RegisterReloadCallback(func(c *config.C) {
@@ -56,36 +55,31 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
}
})
caPool, err := loadCAFromConfig(l, c)
pki, err := NewPKIFromConfig(l, c)
if err != nil {
//The errors coming out of loadCA are already nicely formatted
return nil, util.NewContextualError("Failed to load ca from config", nil, err)
return nil, util.ContextualizeIfNeeded("Failed to load PKI from config", err)
}
l.WithField("fingerprints", caPool.GetFingerprints()).Debug("Trusted CA fingerprints")
cs, err := NewCertStateFromConfig(c)
certificate := pki.GetCertState().Certificate
fw, err := NewFirewallFromConfig(l, certificate, c)
if err != nil {
//The errors coming out of NewCertStateFromConfig are already nicely formatted
return nil, util.NewContextualError("Failed to load certificate from config", nil, err)
return nil, util.ContextualizeIfNeeded("Error while loading firewall rules", err)
}
l.WithField("cert", cs.certificate).Debug("Client nebula certificate")
fw, err := NewFirewallFromConfig(l, cs.certificate, c)
if err != nil {
return nil, util.NewContextualError("Error while loading firewall rules", nil, err)
}
l.WithField("firewallHash", fw.GetRuleHash()).Info("Firewall started")
l.WithField("firewallHashes", fw.GetRuleHashes()).Info("Firewall started")
// TODO: make sure mask is 4 bytes
tunCidr := cs.certificate.Details.Ips[0]
tunCidr := certificate.Details.Ips[0]
ssh, err := sshd.NewSSHServer(l.WithField("subsystem", "sshd"))
if err != nil {
return nil, util.ContextualizeIfNeeded("Error while creating SSH server", err)
}
wireSSHReload(l, ssh, c)
var sshStart func()
if c.GetBool("sshd.enabled", false) {
sshStart, err = configSSH(l, ssh, c)
if err != nil {
return nil, util.NewContextualError("Error while configuring the sshd", nil, err)
return nil, util.ContextualizeIfNeeded("Error while configuring the sshd", err)
}
}
@@ -134,9 +128,13 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
if !configTest {
c.CatchHUP(ctx)
tun, err = overlay.NewDeviceFromConfig(c, l, tunCidr, tunFd, routines)
if deviceFactory == nil {
deviceFactory = overlay.NewDeviceFromConfig
}
tun, err = deviceFactory(c, l, tunCidr, routines)
if err != nil {
return nil, util.NewContextualError("Failed to get a tun/tap device", nil, err)
return nil, util.ContextualizeIfNeeded("Failed to get a tun/tap device", err)
}
defer func() {
@@ -147,7 +145,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
}
// set up our UDP listener
udpConns := make([]*udp.Conn, routines)
udpConns := make([]udp.Conn, routines)
port := c.GetInt("listen.port", 0)
if !configTest {
@@ -160,11 +158,12 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
} else {
listenHost, err = net.ResolveIPAddr("ip", rawListenHost)
if err != nil {
return nil, util.NewContextualError("Failed to resolve listen.host", nil, err)
return nil, util.ContextualizeIfNeeded("Failed to resolve listen.host", err)
}
}
for i := 0; i < routines; i++ {
l.Infof("listening %q %d", listenHost.IP, port)
udpServer, err := udp.NewListener(l, listenHost.IP, port, routines > 1, c.GetInt("listen.batch", 64))
if err != nil {
return nil, util.NewContextualError("Failed to open udp listener", m{"queue": i}, err)
@@ -182,7 +181,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
for _, rawPreferredRange := range rawPreferredRanges {
_, preferredRange, err := net.ParseCIDR(rawPreferredRange)
if err != nil {
return nil, util.NewContextualError("Failed to parse preferred ranges", nil, err)
return nil, util.ContextualizeIfNeeded("Failed to parse preferred ranges", err)
}
preferredRanges = append(preferredRanges, preferredRange)
}
@@ -195,7 +194,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
if rawLocalRange != "" {
_, localRange, err := net.ParseCIDR(rawLocalRange)
if err != nil {
return nil, util.NewContextualError("Failed to parse local_range", nil, err)
return nil, util.ContextualizeIfNeeded("Failed to parse local_range", err)
}
// Check if the entry for local_range was already specified in
@@ -212,7 +211,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
}
}
hostMap := NewHostMap(l, "main", tunCidr, preferredRanges)
hostMap := NewHostMap(l, tunCidr, preferredRanges)
hostMap.metricsEnabled = c.GetBool("stats.message_metrics", false)
l.
@@ -220,18 +219,10 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
WithField("preferredRanges", hostMap.preferredRanges).
Info("Main HostMap created")
/*
config.SetDefault("promoter.interval", 10)
go hostMap.Promoter(config.GetInt("promoter.interval"))
*/
punchy := NewPunchyFromConfig(l, c)
lightHouse, err := NewLightHouseFromConfig(ctx, l, c, tunCidr, udpConns[0], punchy)
switch {
case errors.As(err, &util.ContextualError{}):
return nil, err
case err != nil:
return nil, util.NewContextualError("Failed to initialize lighthouse handler", nil, err)
if err != nil {
return nil, util.ContextualizeIfNeeded("Failed to initialize lighthouse handler", err)
}
var messageMetrics *MessageMetrics
@@ -252,13 +243,9 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
messageMetrics: messageMetrics,
}
handshakeManager := NewHandshakeManager(l, tunCidr, preferredRanges, hostMap, lightHouse, udpConns[0], handshakeConfig)
handshakeManager := NewHandshakeManager(l, hostMap, lightHouse, udpConns[0], handshakeConfig)
lightHouse.handshakeTrigger = handshakeManager.trigger
//TODO: These will be reused for psk
//handshakeMACKey := config.GetString("handshake_mac.key", "")
//handshakeAcceptedMACKeys := config.GetStringSlice("handshake_mac.accepted_keys", []string{})
serveDns := false
if c.GetBool("lighthouse.serve_dns", false) {
if c.GetBool("lighthouse.am_lighthouse", false) {
@@ -270,11 +257,12 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
checkInterval := c.GetInt("timers.connection_alive_interval", 5)
pendingDeletionInterval := c.GetInt("timers.pending_deletion_interval", 10)
ifConfig := &InterfaceConfig{
HostMap: hostMap,
Inside: tun,
Outside: udpConns[0],
certState: cs,
pki: pki,
Cipher: c.GetString("cipher", "aes"),
Firewall: fw,
ServeDns: serveDns,
@@ -282,13 +270,14 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
lightHouse: lightHouse,
checkInterval: time.Second * time.Duration(checkInterval),
pendingDeletionInterval: time.Second * time.Duration(pendingDeletionInterval),
tryPromoteEvery: c.GetUint32("counters.try_promote", defaultPromoteEvery),
reQueryEvery: c.GetUint32("counters.requery_every_packets", defaultReQueryEvery),
reQueryWait: c.GetDuration("timers.requery_wait_duration", defaultReQueryWait),
DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false),
DropMulticast: c.GetBool("tun.drop_multicast", false),
routines: routines,
MessageMetrics: messageMetrics,
version: buildVersion,
caPool: caPool,
disconnectInvalid: c.GetBool("pki.disconnect_invalid", false),
relayManager: NewRelayManager(ctx, l, hostMap, c),
punchy: punchy,
@@ -315,21 +304,21 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
// TODO: Better way to attach these, probably want a new interface in InterfaceConfig
// I don't want to make this initial commit too far-reaching though
ifce.writers = udpConns
lightHouse.ifce = ifce
ifce.RegisterConfigChangeCallbacks(c)
ifce.reloadDisconnectInvalid(c)
ifce.reloadSendRecvError(c)
go handshakeManager.Run(ctx, ifce)
go lightHouse.LhUpdateWorker(ctx, ifce)
handshakeManager.f = ifce
go handshakeManager.Run(ctx)
}
// TODO - stats third-party modules start uncancellable goroutines. Update those libs to accept
// a context so that they can exit when the context is Done.
statsStart, err := startStats(l, c, buildVersion, configTest)
if err != nil {
return nil, util.NewContextualError("Failed to start stats emitter", nil, err)
return nil, util.ContextualizeIfNeeded("Failed to start stats emitter", err)
}
if configTest {
@@ -339,7 +328,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
//TODO: check if we _should_ be emitting stats
go ifce.emitStats(ctx, c.GetDuration("stats.interval", time.Second*10))
attachCommands(l, c, ssh, hostMap, handshakeManager.pendingHostMap, lightHouse, ifce)
attachCommands(l, c, ssh, ifce)
// Start DNS server last to allow using the nebula IP as lighthouse.dns.host
var dnsStart func()
@@ -348,5 +337,14 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
dnsStart = dnsMain(l, hostMap, c)
}
return &Control{ifce, l, cancel, sshStart, statsStart, dnsStart}, nil
return &Control{
ifce,
l,
ctx,
cancel,
sshStart,
statsStart,
dnsStart,
lightHouse.StartUpdateWorker,
}, nil
}

View File

@@ -64,9 +64,9 @@ func (f *Interface) readOutsidePackets(addr *udp.Addr, via *ViaSender, out []byt
var hostinfo *HostInfo
// verify if we've seen this index before, otherwise respond to the handshake initiation
if h.Type == header.Message && h.Subtype == header.MessageRelay {
hostinfo, _ = f.hostMap.QueryRelayIndex(h.RemoteIndex)
hostinfo = f.hostMap.QueryRelayIndex(h.RemoteIndex)
} else {
hostinfo, _ = f.hostMap.QueryIndex(h.RemoteIndex)
hostinfo = f.hostMap.QueryIndex(h.RemoteIndex)
}
var ci *ConnectionState
@@ -198,7 +198,7 @@ func (f *Interface) readOutsidePackets(addr *udp.Addr, via *ViaSender, out []byt
case header.Handshake:
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
HandleIncomingHandshake(f, addr, via, packet, h, hostinfo)
f.handshakeManager.HandleIncoming(addr, via, packet, h)
return
case header.RecvError:
@@ -404,9 +404,11 @@ func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out
return false
}
dropReason := f.firewall.Drop(out, *fwPacket, true, hostinfo, f.caPool, localCache)
dropReason := f.firewall.Drop(out, *fwPacket, true, hostinfo, f.pki.GetCAPool(), localCache)
if dropReason != nil {
f.rejectOutside(out, hostinfo.ConnectionState, hostinfo, nb, out, q)
// NOTE: We give `packet` as the `out` here since we already decrypted from it and we don't need it anymore
// This gives us a buffer to build the reject packet in
f.rejectOutside(out, hostinfo.ConnectionState, hostinfo, nb, packet, q)
if f.l.Level >= logrus.DebugLevel {
hostinfo.logger(f.l).WithField("fwPacket", fwPacket).
WithField("reason", dropReason).
@@ -449,29 +451,23 @@ func (f *Interface) handleRecvError(addr *udp.Addr, h *header.H) {
Debug("Recv error received")
}
// First, clean up in the pending hostmap
f.handshakeManager.pendingHostMap.DeleteReverseIndex(h.RemoteIndex)
hostinfo, err := f.hostMap.QueryReverseIndex(h.RemoteIndex)
if err != nil {
f.l.Debugln(err, ": ", h.RemoteIndex)
hostinfo := f.hostMap.QueryReverseIndex(h.RemoteIndex)
if hostinfo == nil {
f.l.WithField("remoteIndex", h.RemoteIndex).Debugln("Did not find remote index in main hostmap")
return
}
hostinfo.Lock()
defer hostinfo.Unlock()
if !hostinfo.RecvErrorExceeded() {
return
}
if hostinfo.remote != nil && !hostinfo.remote.Equals(addr) {
f.l.Infoln("Someone spoofing recv_errors? ", addr, hostinfo.remote)
return
}
f.closeTunnel(hostinfo)
// We also delete it from pending hostmap to allow for
// fast reconnect.
// We also delete it from pending hostmap to allow for fast reconnect.
f.handshakeManager.DeleteHostInfo(hostinfo)
}

View File

@@ -21,8 +21,8 @@ type Route struct {
Install bool
}
func makeRouteTree(l *logrus.Logger, routes []Route, allowMTU bool) (*cidr.Tree4, error) {
routeTree := cidr.NewTree4()
func makeRouteTree(l *logrus.Logger, routes []Route, allowMTU bool) (*cidr.Tree4[iputil.VpnIp], error) {
routeTree := cidr.NewTree4[iputil.VpnIp]()
for _, r := range routes {
if !allowMTU && r.MTU > 0 {
l.WithField("route", r).Warnf("route MTU is not supported in %s", runtime.GOOS)

View File

@@ -265,18 +265,16 @@ func Test_makeRouteTree(t *testing.T) {
assert.NoError(t, err)
ip := iputil.Ip2VpnIp(net.ParseIP("1.0.0.2"))
r := routeTree.MostSpecificContains(ip)
assert.NotNil(t, r)
assert.IsType(t, iputil.VpnIp(0), r)
assert.EqualValues(t, iputil.Ip2VpnIp(net.ParseIP("192.168.0.1")), r)
ok, r := routeTree.MostSpecificContains(ip)
assert.True(t, ok)
assert.Equal(t, iputil.Ip2VpnIp(net.ParseIP("192.168.0.1")), r)
ip = iputil.Ip2VpnIp(net.ParseIP("1.0.0.1"))
r = routeTree.MostSpecificContains(ip)
assert.NotNil(t, r)
assert.IsType(t, iputil.VpnIp(0), r)
assert.EqualValues(t, iputil.Ip2VpnIp(net.ParseIP("192.168.0.2")), r)
ok, r = routeTree.MostSpecificContains(ip)
assert.True(t, ok)
assert.Equal(t, iputil.Ip2VpnIp(net.ParseIP("192.168.0.2")), r)
ip = iputil.Ip2VpnIp(net.ParseIP("1.1.0.1"))
r = routeTree.MostSpecificContains(ip)
assert.Nil(t, r)
ok, r = routeTree.MostSpecificContains(ip)
assert.False(t, ok)
}

View File

@@ -10,7 +10,9 @@ import (
const DefaultMTU = 1300
func NewDeviceFromConfig(c *config.C, l *logrus.Logger, tunCidr *net.IPNet, fd *int, routines int) (Device, error) {
type DeviceFactory func(c *config.C, l *logrus.Logger, tunCidr *net.IPNet, routines int) (Device, error)
func NewDeviceFromConfig(c *config.C, l *logrus.Logger, tunCidr *net.IPNet, routines int) (Device, error) {
routes, err := parseRoutes(c, tunCidr)
if err != nil {
return nil, util.NewContextualError("Could not parse tun.routes", nil, err)
@@ -27,17 +29,6 @@ func NewDeviceFromConfig(c *config.C, l *logrus.Logger, tunCidr *net.IPNet, fd *
tun := newDisabledTun(tunCidr, c.GetInt("tun.tx_queue", 500), c.GetBool("stats.message_metrics", false), l)
return tun, nil
case fd != nil:
return newTunFromFd(
l,
*fd,
tunCidr,
c.GetInt("tun.mtu", DefaultMTU),
routes,
c.GetInt("tun.tx_queue", 500),
c.GetBool("tun.use_system_route_table", false),
)
default:
return newTun(
l,
@@ -51,3 +42,28 @@ func NewDeviceFromConfig(c *config.C, l *logrus.Logger, tunCidr *net.IPNet, fd *
)
}
}
func NewFdDeviceFromConfig(fd *int) DeviceFactory {
return func(c *config.C, l *logrus.Logger, tunCidr *net.IPNet, routines int) (Device, error) {
routes, err := parseRoutes(c, tunCidr)
if err != nil {
return nil, util.NewContextualError("Could not parse tun.routes", nil, err)
}
unsafeRoutes, err := parseUnsafeRoutes(c, tunCidr)
if err != nil {
return nil, util.NewContextualError("Could not parse tun.unsafe_routes", nil, err)
}
routes = append(routes, unsafeRoutes...)
return newTunFromFd(
l,
*fd,
tunCidr,
c.GetInt("tun.mtu", DefaultMTU),
routes,
c.GetInt("tun.tx_queue", 500),
c.GetBool("tun.use_system_route_table", false),
)
}
}

View File

@@ -18,7 +18,7 @@ type tun struct {
io.ReadWriteCloser
fd int
cidr *net.IPNet
routeTree *cidr.Tree4
routeTree *cidr.Tree4[iputil.VpnIp]
l *logrus.Logger
}
@@ -46,12 +46,8 @@ func newTun(_ *logrus.Logger, _ string, _ *net.IPNet, _ int, _ []Route, _ int, _
}
func (t *tun) RouteFor(ip iputil.VpnIp) iputil.VpnIp {
r := t.routeTree.MostSpecificContains(ip)
if r != nil {
return r.(iputil.VpnIp)
}
return 0
_, r := t.routeTree.MostSpecificContains(ip)
return r
}
func (t tun) Activate() error {

View File

@@ -25,7 +25,7 @@ type tun struct {
cidr *net.IPNet
DefaultMTU int
Routes []Route
routeTree *cidr.Tree4
routeTree *cidr.Tree4[iputil.VpnIp]
l *logrus.Logger
// cache out buffer since we need to prepend 4 bytes for tun metadata
@@ -47,14 +47,6 @@ type ifReq struct {
pad [8]byte
}
func ioctl(a1, a2, a3 uintptr) error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, a1, a2, a3)
if errno != 0 {
return errno
}
return nil
}
var sockaddrCtlSize uintptr = 32
const (
@@ -194,10 +186,10 @@ func (t *tun) Activate() error {
unix.SOCK_DGRAM,
unix.IPPROTO_IP,
)
if err != nil {
return err
}
defer unix.Close(s)
fd := uintptr(s)
@@ -312,9 +304,9 @@ func (t *tun) Activate() error {
}
func (t *tun) RouteFor(ip iputil.VpnIp) iputil.VpnIp {
r := t.routeTree.MostSpecificContains(ip)
if r != nil {
return r.(iputil.VpnIp)
ok, r := t.routeTree.MostSpecificContains(ip)
if ok {
return r
}
return 0

View File

@@ -4,28 +4,51 @@
package overlay
import (
"bytes"
"errors"
"fmt"
"io"
"io/fs"
"net"
"os"
"os/exec"
"regexp"
"strconv"
"strings"
"syscall"
"unsafe"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cidr"
"github.com/slackhq/nebula/iputil"
)
var deviceNameRE = regexp.MustCompile(`^tun[0-9]+$`)
const (
// FIODGNAME is defined in sys/sys/filio.h on FreeBSD
// For 32-bit systems, use FIODGNAME_32 (not defined in this file: 0x80086678)
FIODGNAME = 0x80106678
)
type fiodgnameArg struct {
length int32
pad [4]byte
buf unsafe.Pointer
}
type ifreqRename struct {
Name [16]byte
Data uintptr
}
type ifreqDestroy struct {
Name [16]byte
pad [16]byte
}
type tun struct {
Device string
cidr *net.IPNet
MTU int
Routes []Route
routeTree *cidr.Tree4
routeTree *cidr.Tree4[iputil.VpnIp]
l *logrus.Logger
io.ReadWriteCloser
@@ -33,8 +56,23 @@ type tun struct {
func (t *tun) Close() error {
if t.ReadWriteCloser != nil {
return t.ReadWriteCloser.Close()
if err := t.ReadWriteCloser.Close(); err != nil {
return err
}
s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_DGRAM, syscall.IPPROTO_IP)
if err != nil {
return err
}
defer syscall.Close(s)
ifreq := ifreqDestroy{Name: t.deviceBytes()}
// Destroy the interface
err = ioctl(uintptr(s), syscall.SIOCIFDESTROY, uintptr(unsafe.Pointer(&ifreq)))
return err
}
return nil
}
@@ -43,34 +81,87 @@ func newTunFromFd(_ *logrus.Logger, _ int, _ *net.IPNet, _ int, _ []Route, _ int
}
func newTun(l *logrus.Logger, deviceName string, cidr *net.IPNet, defaultMTU int, routes []Route, _ int, _ bool, _ bool) (*tun, error) {
// Try to open existing tun device
var file *os.File
var err error
if deviceName != "" {
file, err = os.OpenFile("/dev/"+deviceName, os.O_RDWR, 0)
}
if errors.Is(err, fs.ErrNotExist) || deviceName == "" {
// If the device doesn't already exist, request a new one and rename it
file, err = os.OpenFile("/dev/tun", os.O_RDWR, 0)
}
if err != nil {
return nil, err
}
rawConn, err := file.SyscallConn()
if err != nil {
return nil, fmt.Errorf("SyscallConn: %v", err)
}
var name [16]byte
var ctrlErr error
rawConn.Control(func(fd uintptr) {
// Read the name of the interface
arg := fiodgnameArg{length: 16, buf: unsafe.Pointer(&name)}
ctrlErr = ioctl(fd, FIODGNAME, uintptr(unsafe.Pointer(&arg)))
})
if ctrlErr != nil {
return nil, err
}
ifName := string(bytes.TrimRight(name[:], "\x00"))
if deviceName == "" {
deviceName = ifName
}
// If the name doesn't match the desired interface name, rename it now
if ifName != deviceName {
s, err := syscall.Socket(
syscall.AF_INET,
syscall.SOCK_DGRAM,
syscall.IPPROTO_IP,
)
if err != nil {
return nil, err
}
defer syscall.Close(s)
fd := uintptr(s)
var fromName [16]byte
var toName [16]byte
copy(fromName[:], ifName)
copy(toName[:], deviceName)
ifrr := ifreqRename{
Name: fromName,
Data: uintptr(unsafe.Pointer(&toName)),
}
// Set the device name
ioctl(fd, syscall.SIOCSIFNAME, uintptr(unsafe.Pointer(&ifrr)))
}
routeTree, err := makeRouteTree(l, routes, false)
if err != nil {
return nil, err
}
if strings.HasPrefix(deviceName, "/dev/") {
deviceName = strings.TrimPrefix(deviceName, "/dev/")
}
if !deviceNameRE.MatchString(deviceName) {
return nil, fmt.Errorf("tun.dev must match `tun[0-9]+`")
}
return &tun{
Device: deviceName,
cidr: cidr,
MTU: defaultMTU,
Routes: routes,
routeTree: routeTree,
l: l,
ReadWriteCloser: file,
Device: deviceName,
cidr: cidr,
MTU: defaultMTU,
Routes: routes,
routeTree: routeTree,
l: l,
}, nil
}
func (t *tun) Activate() error {
var err error
t.ReadWriteCloser, err = os.OpenFile("/dev/"+t.Device, os.O_RDWR, 0)
if err != nil {
return fmt.Errorf("activate failed: %v", err)
}
// TODO use syscalls instead of exec.Command
t.l.Debug("command: ifconfig", t.Device, t.cidr.String(), t.cidr.IP.String())
if err = exec.Command("/sbin/ifconfig", t.Device, t.cidr.String(), t.cidr.IP.String()).Run(); err != nil {
@@ -101,12 +192,8 @@ func (t *tun) Activate() error {
}
func (t *tun) RouteFor(ip iputil.VpnIp) iputil.VpnIp {
r := t.routeTree.MostSpecificContains(ip)
if r != nil {
return r.(iputil.VpnIp)
}
return 0
_, r := t.routeTree.MostSpecificContains(ip)
return r
}
func (t *tun) Cidr() *net.IPNet {
@@ -120,3 +207,10 @@ func (t *tun) Name() string {
func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) {
return nil, fmt.Errorf("TODO: multiqueue not implemented for freebsd")
}
func (t *tun) deviceBytes() (o [16]byte) {
for i, c := range t.Device {
o[i] = byte(c)
}
return
}

View File

@@ -20,7 +20,7 @@ import (
type tun struct {
io.ReadWriteCloser
cidr *net.IPNet
routeTree *cidr.Tree4
routeTree *cidr.Tree4[iputil.VpnIp]
}
func newTun(_ *logrus.Logger, _ string, _ *net.IPNet, _ int, _ []Route, _ int, _ bool, _ bool) (*tun, error) {
@@ -46,12 +46,8 @@ func (t *tun) Activate() error {
}
func (t *tun) RouteFor(ip iputil.VpnIp) iputil.VpnIp {
r := t.routeTree.MostSpecificContains(ip)
if r != nil {
return r.(iputil.VpnIp)
}
return 0
_, r := t.routeTree.MostSpecificContains(ip)
return r
}
// The following is hoisted up from water, we do this so we can inject our own fd on iOS

View File

@@ -30,7 +30,7 @@ type tun struct {
TXQueueLen int
Routes []Route
routeTree atomic.Pointer[cidr.Tree4]
routeTree atomic.Pointer[cidr.Tree4[iputil.VpnIp]]
routeChan chan struct{}
useSystemRoutes bool
@@ -43,14 +43,6 @@ type ifReq struct {
pad [8]byte
}
func ioctl(a1, a2, a3 uintptr) error {
_, _, errno := unix.Syscall(unix.SYS_IOCTL, a1, a2, a3)
if errno != 0 {
return errno
}
return nil
}
type ifreqAddr struct {
Name [16]byte
Addr unix.RawSockaddrInet4
@@ -162,12 +154,8 @@ func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) {
}
func (t *tun) RouteFor(ip iputil.VpnIp) iputil.VpnIp {
r := t.routeTree.Load().MostSpecificContains(ip)
if r != nil {
return r.(iputil.VpnIp)
}
return 0
_, r := t.routeTree.Load().MostSpecificContains(ip)
return r
}
func (t *tun) Write(b []byte) (int, error) {
@@ -388,7 +376,7 @@ func (t *tun) updateRoutes(r netlink.RouteUpdate) {
return
}
newTree := cidr.NewTree4()
newTree := cidr.NewTree4[iputil.VpnIp]()
if r.Type == unix.RTM_NEWROUTE {
for _, oldR := range t.routeTree.Load().List() {
newTree.AddCIDR(oldR.CIDR, oldR.Value)
@@ -400,7 +388,7 @@ func (t *tun) updateRoutes(r netlink.RouteUpdate) {
} else {
gw := iputil.Ip2VpnIp(r.Gw)
for _, oldR := range t.routeTree.Load().List() {
if bytes.Equal(oldR.CIDR.IP, r.Dst.IP) && bytes.Equal(oldR.CIDR.Mask, r.Dst.Mask) && *oldR.Value != nil && (*oldR.Value).(iputil.VpnIp) == gw {
if bytes.Equal(oldR.CIDR.IP, r.Dst.IP) && bytes.Equal(oldR.CIDR.Mask, r.Dst.Mask) && oldR.Value == gw {
// This is the record to delete
t.l.WithField("destination", r.Dst).WithField("via", r.Gw).Info("Removing route")
continue

158
overlay/tun_netbsd.go Normal file
View File

@@ -0,0 +1,158 @@
//go:build !e2e_testing
// +build !e2e_testing
package overlay
import (
"fmt"
"io"
"net"
"os"
"os/exec"
"regexp"
"strconv"
"syscall"
"unsafe"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cidr"
"github.com/slackhq/nebula/iputil"
)
type ifreqDestroy struct {
Name [16]byte
pad [16]byte
}
type tun struct {
Device string
cidr *net.IPNet
MTU int
Routes []Route
routeTree *cidr.Tree4[iputil.VpnIp]
l *logrus.Logger
io.ReadWriteCloser
}
func (t *tun) Close() error {
if t.ReadWriteCloser != nil {
if err := t.ReadWriteCloser.Close(); err != nil {
return err
}
s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_DGRAM, syscall.IPPROTO_IP)
if err != nil {
return err
}
defer syscall.Close(s)
ifreq := ifreqDestroy{Name: t.deviceBytes()}
err = ioctl(uintptr(s), syscall.SIOCIFDESTROY, uintptr(unsafe.Pointer(&ifreq)))
return err
}
return nil
}
func newTunFromFd(_ *logrus.Logger, _ int, _ *net.IPNet, _ int, _ []Route, _ int, _ bool) (*tun, error) {
return nil, fmt.Errorf("newTunFromFd not supported in NetBSD")
}
var deviceNameRE = regexp.MustCompile(`^tun[0-9]+$`)
func newTun(l *logrus.Logger, deviceName string, cidr *net.IPNet, defaultMTU int, routes []Route, _ int, _ bool, _ bool) (*tun, error) {
// Try to open tun device
var file *os.File
var err error
if deviceName == "" {
return nil, fmt.Errorf("a device name in the format of /dev/tunN must be specified")
}
if !deviceNameRE.MatchString(deviceName) {
return nil, fmt.Errorf("a device name in the format of /dev/tunN must be specified")
}
file, err = os.OpenFile("/dev/"+deviceName, os.O_RDWR, 0)
if err != nil {
return nil, err
}
routeTree, err := makeRouteTree(l, routes, false)
if err != nil {
return nil, err
}
return &tun{
ReadWriteCloser: file,
Device: deviceName,
cidr: cidr,
MTU: defaultMTU,
Routes: routes,
routeTree: routeTree,
l: l,
}, nil
}
func (t *tun) Activate() error {
var err error
// TODO use syscalls instead of exec.Command
cmd := exec.Command("/sbin/ifconfig", t.Device, t.cidr.String(), t.cidr.IP.String())
t.l.Debug("command: ", cmd.String())
if err = cmd.Run(); err != nil {
return fmt.Errorf("failed to run 'ifconfig': %s", err)
}
cmd = exec.Command("/sbin/route", "-n", "add", "-net", t.cidr.String(), t.cidr.IP.String())
t.l.Debug("command: ", cmd.String())
if err = cmd.Run(); err != nil {
return fmt.Errorf("failed to run 'route add': %s", err)
}
cmd = exec.Command("/sbin/ifconfig", t.Device, "mtu", strconv.Itoa(t.MTU))
t.l.Debug("command: ", cmd.String())
if err = cmd.Run(); err != nil {
return fmt.Errorf("failed to run 'ifconfig': %s", err)
}
// Unsafe path routes
for _, r := range t.Routes {
if r.Via == nil || !r.Install {
// We don't allow route MTUs so only install routes with a via
continue
}
cmd = exec.Command("/sbin/route", "-n", "add", "-net", r.Cidr.String(), t.cidr.IP.String())
t.l.Debug("command: ", cmd.String())
if err = cmd.Run(); err != nil {
return fmt.Errorf("failed to run 'route add' for unsafe_route %s: %s", r.Cidr.String(), err)
}
}
return nil
}
func (t *tun) RouteFor(ip iputil.VpnIp) iputil.VpnIp {
_, r := t.routeTree.MostSpecificContains(ip)
return r
}
func (t *tun) Cidr() *net.IPNet {
return t.cidr
}
func (t *tun) Name() string {
return t.Device
}
func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) {
return nil, fmt.Errorf("TODO: multiqueue not implemented for netbsd")
}
func (t *tun) deviceBytes() (o [16]byte) {
for i, c := range t.Device {
o[i] = byte(c)
}
return
}

14
overlay/tun_notwin.go Normal file
View File

@@ -0,0 +1,14 @@
//go:build !windows
// +build !windows
package overlay
import "syscall"
func ioctl(a1, a2, a3 uintptr) error {
_, _, errno := syscall.Syscall(syscall.SYS_IOCTL, a1, a2, a3)
if errno != 0 {
return errno
}
return nil
}

170
overlay/tun_openbsd.go Normal file
View File

@@ -0,0 +1,170 @@
//go:build !e2e_testing
// +build !e2e_testing
package overlay
import (
"fmt"
"io"
"net"
"os"
"os/exec"
"regexp"
"strconv"
"syscall"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cidr"
"github.com/slackhq/nebula/iputil"
)
type tun struct {
Device string
cidr *net.IPNet
MTU int
Routes []Route
routeTree *cidr.Tree4[iputil.VpnIp]
l *logrus.Logger
io.ReadWriteCloser
// cache out buffer since we need to prepend 4 bytes for tun metadata
out []byte
}
func (t *tun) Close() error {
if t.ReadWriteCloser != nil {
return t.ReadWriteCloser.Close()
}
return nil
}
func newTunFromFd(_ *logrus.Logger, _ int, _ *net.IPNet, _ int, _ []Route, _ int, _ bool) (*tun, error) {
return nil, fmt.Errorf("newTunFromFd not supported in OpenBSD")
}
var deviceNameRE = regexp.MustCompile(`^tun[0-9]+$`)
func newTun(l *logrus.Logger, deviceName string, cidr *net.IPNet, defaultMTU int, routes []Route, _ int, _ bool, _ bool) (*tun, error) {
if deviceName == "" {
return nil, fmt.Errorf("a device name in the format of tunN must be specified")
}
if !deviceNameRE.MatchString(deviceName) {
return nil, fmt.Errorf("a device name in the format of tunN must be specified")
}
file, err := os.OpenFile("/dev/"+deviceName, os.O_RDWR, 0)
if err != nil {
return nil, err
}
routeTree, err := makeRouteTree(l, routes, false)
if err != nil {
return nil, err
}
return &tun{
ReadWriteCloser: file,
Device: deviceName,
cidr: cidr,
MTU: defaultMTU,
Routes: routes,
routeTree: routeTree,
l: l,
}, nil
}
func (t *tun) Activate() error {
var err error
// TODO use syscalls instead of exec.Command
cmd := exec.Command("/sbin/ifconfig", t.Device, t.cidr.String(), t.cidr.IP.String())
t.l.Debug("command: ", cmd.String())
if err = cmd.Run(); err != nil {
return fmt.Errorf("failed to run 'ifconfig': %s", err)
}
cmd = exec.Command("/sbin/ifconfig", t.Device, "mtu", strconv.Itoa(t.MTU))
t.l.Debug("command: ", cmd.String())
if err = cmd.Run(); err != nil {
return fmt.Errorf("failed to run 'ifconfig': %s", err)
}
cmd = exec.Command("/sbin/route", "-n", "add", "-inet", t.cidr.String(), t.cidr.IP.String())
t.l.Debug("command: ", cmd.String())
if err = cmd.Run(); err != nil {
return fmt.Errorf("failed to run 'route add': %s", err)
}
// Unsafe path routes
for _, r := range t.Routes {
if r.Via == nil || !r.Install {
// We don't allow route MTUs so only install routes with a via
continue
}
cmd = exec.Command("/sbin/route", "-n", "add", "-inet", r.Cidr.String(), t.cidr.IP.String())
t.l.Debug("command: ", cmd.String())
if err = cmd.Run(); err != nil {
return fmt.Errorf("failed to run 'route add' for unsafe_route %s: %s", r.Cidr.String(), err)
}
}
return nil
}
func (t *tun) RouteFor(ip iputil.VpnIp) iputil.VpnIp {
_, r := t.routeTree.MostSpecificContains(ip)
return r
}
func (t *tun) Cidr() *net.IPNet {
return t.cidr
}
func (t *tun) Name() string {
return t.Device
}
func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) {
return nil, fmt.Errorf("TODO: multiqueue not implemented for freebsd")
}
func (t *tun) Read(to []byte) (int, error) {
buf := make([]byte, len(to)+4)
n, err := t.ReadWriteCloser.Read(buf)
copy(to, buf[4:])
return n - 4, err
}
// Write is only valid for single threaded use
func (t *tun) Write(from []byte) (int, error) {
buf := t.out
if cap(buf) < len(from)+4 {
buf = make([]byte, len(from)+4)
t.out = buf
}
buf = buf[:len(from)+4]
if len(from) == 0 {
return 0, syscall.EIO
}
// Determine the IP Family for the NULL L2 Header
ipVer := from[0] >> 4
if ipVer == 4 {
buf[3] = syscall.AF_INET
} else if ipVer == 6 {
buf[3] = syscall.AF_INET6
} else {
return 0, fmt.Errorf("unable to determine IP version from packet")
}
copy(buf[4:], from)
n, err := t.ReadWriteCloser.Write(buf)
return n - 4, err
}

View File

@@ -8,6 +8,7 @@ import (
"io"
"net"
"os"
"sync/atomic"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cidr"
@@ -18,9 +19,10 @@ type TestTun struct {
Device string
cidr *net.IPNet
Routes []Route
routeTree *cidr.Tree4
routeTree *cidr.Tree4[iputil.VpnIp]
l *logrus.Logger
closed atomic.Bool
rxPackets chan []byte // Packets to receive into nebula
TxPackets chan []byte // Packets transmitted outside by nebula
}
@@ -50,6 +52,10 @@ func newTunFromFd(_ *logrus.Logger, _ int, _ *net.IPNet, _ int, _ []Route, _ int
// These are unencrypted ip layer frames destined for another nebula node.
// packets should exit the udp side, capture them with udpConn.Get
func (t *TestTun) Send(packet []byte) {
if t.closed.Load() {
return
}
if t.l.Level >= logrus.DebugLevel {
t.l.WithField("dataLen", len(packet)).Debug("Tun receiving injected packet")
}
@@ -77,12 +83,8 @@ func (t *TestTun) Get(block bool) []byte {
//********************************************************************************************************************//
func (t *TestTun) RouteFor(ip iputil.VpnIp) iputil.VpnIp {
r := t.routeTree.MostSpecificContains(ip)
if r != nil {
return r.(iputil.VpnIp)
}
return 0
_, r := t.routeTree.MostSpecificContains(ip)
return r
}
func (t *TestTun) Activate() error {
@@ -98,6 +100,10 @@ func (t *TestTun) Name() string {
}
func (t *TestTun) Write(b []byte) (n int, err error) {
if t.closed.Load() {
return 0, io.ErrClosedPipe
}
packet := make([]byte, len(b), len(b))
copy(packet, b)
t.TxPackets <- packet
@@ -105,7 +111,10 @@ func (t *TestTun) Write(b []byte) (n int, err error) {
}
func (t *TestTun) Close() error {
close(t.rxPackets)
if t.closed.CompareAndSwap(false, true) {
close(t.rxPackets)
close(t.TxPackets)
}
return nil
}

View File

@@ -18,7 +18,7 @@ type waterTun struct {
cidr *net.IPNet
MTU int
Routes []Route
routeTree *cidr.Tree4
routeTree *cidr.Tree4[iputil.VpnIp]
*water.Interface
}
@@ -97,12 +97,8 @@ func (t *waterTun) Activate() error {
}
func (t *waterTun) RouteFor(ip iputil.VpnIp) iputil.VpnIp {
r := t.routeTree.MostSpecificContains(ip)
if r != nil {
return r.(iputil.VpnIp)
}
return 0
_, r := t.routeTree.MostSpecificContains(ip)
return r
}
func (t *waterTun) Cidr() *net.IPNet {

View File

@@ -24,7 +24,7 @@ type winTun struct {
prefix netip.Prefix
MTU int
Routes []Route
routeTree *cidr.Tree4
routeTree *cidr.Tree4[iputil.VpnIp]
tun *wintun.NativeTun
}
@@ -54,9 +54,16 @@ func newWinTun(l *logrus.Logger, deviceName string, cidr *net.IPNet, defaultMTU
return nil, fmt.Errorf("generate GUID failed: %w", err)
}
tunDevice, err := wintun.CreateTUNWithRequestedGUID(deviceName, guid, defaultMTU)
var tunDevice wintun.Device
tunDevice, err = wintun.CreateTUNWithRequestedGUID(deviceName, guid, defaultMTU)
if err != nil {
return nil, fmt.Errorf("create TUN device failed: %w", err)
// Windows 10 has an issue with unclean shutdowns not fully cleaning up the wintun device.
// Trying a second time resolves the issue.
l.WithError(err).Debug("Failed to create wintun device, retrying")
tunDevice, err = wintun.CreateTUNWithRequestedGUID(deviceName, guid, defaultMTU)
if err != nil {
return nil, fmt.Errorf("create TUN device failed: %w", err)
}
}
routeTree, err := makeRouteTree(l, routes, false)
@@ -139,12 +146,8 @@ func (t *winTun) Activate() error {
}
func (t *winTun) RouteFor(ip iputil.VpnIp) iputil.VpnIp {
r := t.routeTree.MostSpecificContains(ip)
if r != nil {
return r.(iputil.VpnIp)
}
return 0
_, r := t.routeTree.MostSpecificContains(ip)
return r
}
func (t *winTun) Cidr() *net.IPNet {

63
overlay/user.go Normal file
View File

@@ -0,0 +1,63 @@
package overlay
import (
"io"
"net"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/iputil"
)
func NewUserDeviceFromConfig(c *config.C, l *logrus.Logger, tunCidr *net.IPNet, routines int) (Device, error) {
return NewUserDevice(tunCidr)
}
func NewUserDevice(tunCidr *net.IPNet) (Device, error) {
// these pipes guarantee each write/read will match 1:1
or, ow := io.Pipe()
ir, iw := io.Pipe()
return &UserDevice{
tunCidr: tunCidr,
outboundReader: or,
outboundWriter: ow,
inboundReader: ir,
inboundWriter: iw,
}, nil
}
type UserDevice struct {
tunCidr *net.IPNet
outboundReader *io.PipeReader
outboundWriter *io.PipeWriter
inboundReader *io.PipeReader
inboundWriter *io.PipeWriter
}
func (d *UserDevice) Activate() error {
return nil
}
func (d *UserDevice) Cidr() *net.IPNet { return d.tunCidr }
func (d *UserDevice) Name() string { return "faketun0" }
func (d *UserDevice) RouteFor(ip iputil.VpnIp) iputil.VpnIp { return ip }
func (d *UserDevice) NewMultiQueueReader() (io.ReadWriteCloser, error) {
return d, nil
}
func (d *UserDevice) Pipe() (*io.PipeReader, *io.PipeWriter) {
return d.inboundReader, d.outboundWriter
}
func (d *UserDevice) Read(p []byte) (n int, err error) {
return d.outboundReader.Read(p)
}
func (d *UserDevice) Write(p []byte) (n int, err error) {
return d.inboundWriter.Write(p)
}
func (d *UserDevice) Close() error {
d.inboundWriter.Close()
d.outboundWriter.Close()
return nil
}

248
pki.go Normal file
View File

@@ -0,0 +1,248 @@
package nebula
import (
"errors"
"fmt"
"os"
"strings"
"sync/atomic"
"time"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/util"
)
type PKI struct {
cs atomic.Pointer[CertState]
caPool atomic.Pointer[cert.NebulaCAPool]
l *logrus.Logger
}
type CertState struct {
Certificate *cert.NebulaCertificate
RawCertificate []byte
RawCertificateNoKey []byte
PublicKey []byte
PrivateKey []byte
}
func NewPKIFromConfig(l *logrus.Logger, c *config.C) (*PKI, error) {
pki := &PKI{l: l}
err := pki.reload(c, true)
if err != nil {
return nil, err
}
c.RegisterReloadCallback(func(c *config.C) {
rErr := pki.reload(c, false)
if rErr != nil {
util.LogWithContextIfNeeded("Failed to reload PKI from config", rErr, l)
}
})
return pki, nil
}
func (p *PKI) GetCertState() *CertState {
return p.cs.Load()
}
func (p *PKI) GetCAPool() *cert.NebulaCAPool {
return p.caPool.Load()
}
func (p *PKI) reload(c *config.C, initial bool) error {
err := p.reloadCert(c, initial)
if err != nil {
if initial {
return err
}
err.Log(p.l)
}
err = p.reloadCAPool(c)
if err != nil {
if initial {
return err
}
err.Log(p.l)
}
return nil
}
func (p *PKI) reloadCert(c *config.C, initial bool) *util.ContextualError {
cs, err := newCertStateFromConfig(c)
if err != nil {
return util.NewContextualError("Could not load client cert", nil, err)
}
if !initial {
// did IP in cert change? if so, don't set
currentCert := p.cs.Load().Certificate
oldIPs := currentCert.Details.Ips
newIPs := cs.Certificate.Details.Ips
if len(oldIPs) > 0 && len(newIPs) > 0 && oldIPs[0].String() != newIPs[0].String() {
return util.NewContextualError(
"IP in new cert was different from old",
m{"new_ip": newIPs[0], "old_ip": oldIPs[0]},
nil,
)
}
}
p.cs.Store(cs)
if initial {
p.l.WithField("cert", cs.Certificate).Debug("Client nebula certificate")
} else {
p.l.WithField("cert", cs.Certificate).Info("Client cert refreshed from disk")
}
return nil
}
func (p *PKI) reloadCAPool(c *config.C) *util.ContextualError {
caPool, err := loadCAPoolFromConfig(p.l, c)
if err != nil {
return util.NewContextualError("Failed to load ca from config", nil, err)
}
p.caPool.Store(caPool)
p.l.WithField("fingerprints", caPool.GetFingerprints()).Debug("Trusted CA fingerprints")
return nil
}
func newCertState(certificate *cert.NebulaCertificate, privateKey []byte) (*CertState, error) {
// Marshal the certificate to ensure it is valid
rawCertificate, err := certificate.Marshal()
if err != nil {
return nil, fmt.Errorf("invalid nebula certificate on interface: %s", err)
}
publicKey := certificate.Details.PublicKey
cs := &CertState{
RawCertificate: rawCertificate,
Certificate: certificate,
PrivateKey: privateKey,
PublicKey: publicKey,
}
cs.Certificate.Details.PublicKey = nil
rawCertNoKey, err := cs.Certificate.Marshal()
if err != nil {
return nil, fmt.Errorf("error marshalling certificate no key: %s", err)
}
cs.RawCertificateNoKey = rawCertNoKey
// put public key back
cs.Certificate.Details.PublicKey = cs.PublicKey
return cs, nil
}
func newCertStateFromConfig(c *config.C) (*CertState, error) {
var pemPrivateKey []byte
var err error
privPathOrPEM := c.GetString("pki.key", "")
if privPathOrPEM == "" {
return nil, errors.New("no pki.key path or PEM data provided")
}
if strings.Contains(privPathOrPEM, "-----BEGIN") {
pemPrivateKey = []byte(privPathOrPEM)
privPathOrPEM = "<inline>"
} else {
pemPrivateKey, err = os.ReadFile(privPathOrPEM)
if err != nil {
return nil, fmt.Errorf("unable to read pki.key file %s: %s", privPathOrPEM, err)
}
}
rawKey, _, curve, err := cert.UnmarshalPrivateKey(pemPrivateKey)
if err != nil {
return nil, fmt.Errorf("error while unmarshaling pki.key %s: %s", privPathOrPEM, err)
}
var rawCert []byte
pubPathOrPEM := c.GetString("pki.cert", "")
if pubPathOrPEM == "" {
return nil, errors.New("no pki.cert path or PEM data provided")
}
if strings.Contains(pubPathOrPEM, "-----BEGIN") {
rawCert = []byte(pubPathOrPEM)
pubPathOrPEM = "<inline>"
} else {
rawCert, err = os.ReadFile(pubPathOrPEM)
if err != nil {
return nil, fmt.Errorf("unable to read pki.cert file %s: %s", pubPathOrPEM, err)
}
}
nebulaCert, _, err := cert.UnmarshalNebulaCertificateFromPEM(rawCert)
if err != nil {
return nil, fmt.Errorf("error while unmarshaling pki.cert %s: %s", pubPathOrPEM, err)
}
if nebulaCert.Expired(time.Now()) {
return nil, fmt.Errorf("nebula certificate for this host is expired")
}
if len(nebulaCert.Details.Ips) == 0 {
return nil, fmt.Errorf("no IPs encoded in certificate")
}
if err = nebulaCert.VerifyPrivateKey(curve, rawKey); err != nil {
return nil, fmt.Errorf("private key is not a pair with public key in nebula cert")
}
return newCertState(nebulaCert, rawKey)
}
func loadCAPoolFromConfig(l *logrus.Logger, c *config.C) (*cert.NebulaCAPool, error) {
var rawCA []byte
var err error
caPathOrPEM := c.GetString("pki.ca", "")
if caPathOrPEM == "" {
return nil, errors.New("no pki.ca path or PEM data provided")
}
if strings.Contains(caPathOrPEM, "-----BEGIN") {
rawCA = []byte(caPathOrPEM)
} else {
rawCA, err = os.ReadFile(caPathOrPEM)
if err != nil {
return nil, fmt.Errorf("unable to read pki.ca file %s: %s", caPathOrPEM, err)
}
}
caPool, err := cert.NewCAPoolFromBytes(rawCA)
if errors.Is(err, cert.ErrExpired) {
var expired int
for _, crt := range caPool.CAs {
if crt.Expired(time.Now()) {
expired++
l.WithField("cert", crt).Warn("expired certificate present in CA pool")
}
}
if expired >= len(caPool.CAs) {
return nil, errors.New("no valid CA certificates present")
}
} else if err != nil {
return nil, fmt.Errorf("error while adding CA certificate to CA trust store: %s", err)
}
for _, fp := range c.GetStringSlice("pki.blocklist", []string{}) {
l.WithField("fingerprint", fp).Info("Blocklisting cert")
caPool.BlocklistFingerprint(fp)
}
return caPool, nil
}

View File

@@ -131,9 +131,9 @@ func (rm *relayManager) handleCreateRelayResponse(h *HostInfo, f *Interface, m *
return
}
// I'm the middle man. Let the initiator know that the I've established the relay they requested.
peerHostInfo, err := rm.hostmap.QueryVpnIp(relay.PeerIp)
if err != nil {
rm.l.WithError(err).WithField("relayTo", relay.PeerIp).Error("Can't find a HostInfo for peer")
peerHostInfo := rm.hostmap.QueryVpnIp(relay.PeerIp)
if peerHostInfo == nil {
rm.l.WithField("relayTo", relay.PeerIp).Error("Can't find a HostInfo for peer")
return
}
peerRelay, ok := peerHostInfo.relayState.QueryRelayForByIp(target)
@@ -179,6 +179,12 @@ func (rm *relayManager) handleCreateRelayRequest(h *HostInfo, f *Interface, m *N
"vpnIp": h.vpnIp})
logMsg.Info("handleCreateRelayRequest")
// Is the source of the relay me? This should never happen, but did happen due to
// an issue migrating relays over to newly re-handshaked host info objects.
if from == f.myVpnIp {
logMsg.WithField("myIP", f.myVpnIp).Error("Discarding relay request from myself")
return
}
// Is the target of the relay me?
if target == f.myVpnIp {
existingRelay, ok := h.relayState.QueryRelayForByIp(from)
@@ -240,11 +246,11 @@ func (rm *relayManager) handleCreateRelayRequest(h *HostInfo, f *Interface, m *N
if !rm.GetAmRelay() {
return
}
peer, err := rm.hostmap.QueryVpnIp(target)
if err != nil {
peer := rm.hostmap.QueryVpnIp(target)
if peer == nil {
// Try to establish a connection to this host. If we get a future relay request,
// we'll be ready!
f.getOrHandshake(target)
f.Handshake(target)
return
}
if peer.remote == nil {
@@ -253,6 +259,7 @@ func (rm *relayManager) handleCreateRelayRequest(h *HostInfo, f *Interface, m *N
}
sendCreateRequest := false
var index uint32
var err error
targetRelay, ok := peer.relayState.QueryRelayForByIp(from)
if ok {
index = targetRelay.LocalIndex

View File

@@ -70,7 +70,7 @@ type hostnamesResults struct {
hostnames []hostnamePort
network string
lookupTimeout time.Duration
stop chan struct{}
cancelFn func()
l *logrus.Logger
ips atomic.Pointer[map[netip.AddrPort]struct{}]
}
@@ -80,7 +80,6 @@ func NewHostnameResults(ctx context.Context, l *logrus.Logger, d time.Duration,
hostnames: make([]hostnamePort, len(hostPorts)),
network: network,
lookupTimeout: timeout,
stop: make(chan (struct{})),
l: l,
}
@@ -115,6 +114,8 @@ func NewHostnameResults(ctx context.Context, l *logrus.Logger, d time.Duration,
// Time for the DNS lookup goroutine
if performBackgroundLookup {
newCtx, cancel := context.WithCancel(ctx)
r.cancelFn = cancel
ticker := time.NewTicker(d)
go func() {
defer ticker.Stop()
@@ -154,9 +155,7 @@ func NewHostnameResults(ctx context.Context, l *logrus.Logger, d time.Duration,
onUpdate()
}
select {
case <-ctx.Done():
return
case <-r.stop:
case <-newCtx.Done():
return
case <-ticker.C:
continue
@@ -169,8 +168,8 @@ func NewHostnameResults(ctx context.Context, l *logrus.Logger, d time.Duration,
}
func (hr *hostnamesResults) Cancel() {
if hr != nil {
hr.stop <- struct{}{}
if hr != nil && hr.cancelFn != nil {
hr.cancelFn()
}
}
@@ -582,20 +581,11 @@ func (r *RemoteList) unlockedCollect() {
dnsAddrs := r.hr.GetIPs()
for _, addr := range dnsAddrs {
if r.shouldAdd == nil || r.shouldAdd(addr.Addr()) {
switch {
case addr.Addr().Is4():
v4 := addr.Addr().As4()
addrs = append(addrs, &udp.Addr{
IP: v4[:],
Port: addr.Port(),
})
case addr.Addr().Is6():
v6 := addr.Addr().As16()
addrs = append(addrs, &udp.Addr{
IP: v6[:],
Port: addr.Port(),
})
}
v6 := addr.Addr().As16()
addrs = append(addrs, &udp.Addr{
IP: v6[:],
Port: addr.Port(),
})
}
}

36
service/listener.go Normal file
View File

@@ -0,0 +1,36 @@
package service
import (
"io"
"net"
)
type tcpListener struct {
port uint16
s *Service
addr *net.TCPAddr
accept chan net.Conn
}
func (l *tcpListener) Accept() (net.Conn, error) {
conn, ok := <-l.accept
if !ok {
return nil, io.EOF
}
return conn, nil
}
func (l *tcpListener) Close() error {
l.s.mu.Lock()
defer l.s.mu.Unlock()
delete(l.s.mu.listeners, uint16(l.addr.Port))
close(l.accept)
return nil
}
// Addr returns the listener's network address.
func (l *tcpListener) Addr() net.Addr {
return l.addr
}

248
service/service.go Normal file
View File

@@ -0,0 +1,248 @@
package service
import (
"bytes"
"context"
"errors"
"fmt"
"log"
"math"
"net"
"os"
"strings"
"sync"
"github.com/sirupsen/logrus"
"github.com/slackhq/nebula"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/overlay"
"golang.org/x/sync/errgroup"
"gvisor.dev/gvisor/pkg/bufferv2"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/link/channel"
"gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
"gvisor.dev/gvisor/pkg/tcpip/network/ipv6"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"gvisor.dev/gvisor/pkg/tcpip/transport/icmp"
"gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
"gvisor.dev/gvisor/pkg/tcpip/transport/udp"
"gvisor.dev/gvisor/pkg/waiter"
)
const nicID = 1
type Service struct {
eg *errgroup.Group
control *nebula.Control
ipstack *stack.Stack
mu struct {
sync.Mutex
listeners map[uint16]*tcpListener
}
}
func New(config *config.C) (*Service, error) {
logger := logrus.New()
logger.Out = os.Stdout
control, err := nebula.Main(config, false, "custom-app", logger, overlay.NewUserDeviceFromConfig)
if err != nil {
return nil, err
}
control.Start()
ctx := control.Context()
eg, ctx := errgroup.WithContext(ctx)
s := Service{
eg: eg,
control: control,
}
s.mu.listeners = map[uint16]*tcpListener{}
device, ok := control.Device().(*overlay.UserDevice)
if !ok {
return nil, errors.New("must be using user device")
}
s.ipstack = stack.New(stack.Options{
NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
TransportProtocols: []stack.TransportProtocolFactory{tcp.NewProtocol, udp.NewProtocol, icmp.NewProtocol4, icmp.NewProtocol6},
})
sackEnabledOpt := tcpip.TCPSACKEnabled(true) // TCP SACK is disabled by default
tcpipErr := s.ipstack.SetTransportProtocolOption(tcp.ProtocolNumber, &sackEnabledOpt)
if tcpipErr != nil {
return nil, fmt.Errorf("could not enable TCP SACK: %v", tcpipErr)
}
linkEP := channel.New( /*size*/ 512 /*mtu*/, 1280, "")
if tcpipProblem := s.ipstack.CreateNIC(nicID, linkEP); tcpipProblem != nil {
return nil, fmt.Errorf("could not create netstack NIC: %v", tcpipProblem)
}
ipv4Subnet, _ := tcpip.NewSubnet(tcpip.Address(strings.Repeat("\x00", 4)), tcpip.AddressMask(strings.Repeat("\x00", 4)))
s.ipstack.SetRouteTable([]tcpip.Route{
{
Destination: ipv4Subnet,
NIC: nicID,
},
})
ipNet := device.Cidr()
pa := tcpip.ProtocolAddress{
AddressWithPrefix: tcpip.Address(ipNet.IP).WithPrefix(),
Protocol: ipv4.ProtocolNumber,
}
if err := s.ipstack.AddProtocolAddress(nicID, pa, stack.AddressProperties{
PEB: stack.CanBePrimaryEndpoint, // zero value default
ConfigType: stack.AddressConfigStatic, // zero value default
}); err != nil {
return nil, fmt.Errorf("error creating IP: %s", err)
}
const tcpReceiveBufferSize = 0
const maxInFlightConnectionAttempts = 1024
tcpFwd := tcp.NewForwarder(s.ipstack, tcpReceiveBufferSize, maxInFlightConnectionAttempts, s.tcpHandler)
s.ipstack.SetTransportProtocolHandler(tcp.ProtocolNumber, tcpFwd.HandlePacket)
reader, writer := device.Pipe()
go func() {
<-ctx.Done()
reader.Close()
writer.Close()
}()
// create Goroutines to forward packets between Nebula and Gvisor
eg.Go(func() error {
buf := make([]byte, header.IPv4MaximumHeaderSize+header.IPv4MaximumPayloadSize)
for {
// this will read exactly one packet
n, err := reader.Read(buf)
if err != nil {
return err
}
packetBuf := stack.NewPacketBuffer(stack.PacketBufferOptions{
Payload: bufferv2.MakeWithData(bytes.Clone(buf[:n])),
})
linkEP.InjectInbound(header.IPv4ProtocolNumber, packetBuf)
if err := ctx.Err(); err != nil {
return err
}
}
})
eg.Go(func() error {
for {
packet := linkEP.ReadContext(ctx)
if packet.IsNil() {
if err := ctx.Err(); err != nil {
return err
}
continue
}
bufView := packet.ToView()
if _, err := bufView.WriteTo(writer); err != nil {
return err
}
bufView.Release()
}
})
return &s, nil
}
// DialContext dials the provided address. Currently only TCP is supported.
func (s *Service) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
if network != "tcp" && network != "tcp4" {
return nil, errors.New("only tcp is supported")
}
addr, err := net.ResolveTCPAddr(network, address)
if err != nil {
return nil, err
}
fullAddr := tcpip.FullAddress{
NIC: nicID,
Addr: tcpip.Address(addr.IP),
Port: uint16(addr.Port),
}
return gonet.DialContextTCP(ctx, s.ipstack, fullAddr, ipv4.ProtocolNumber)
}
// Listen listens on the provided address. Currently only TCP with wildcard
// addresses are supported.
func (s *Service) Listen(network, address string) (net.Listener, error) {
if network != "tcp" && network != "tcp4" {
return nil, errors.New("only tcp is supported")
}
addr, err := net.ResolveTCPAddr(network, address)
if err != nil {
return nil, err
}
if addr.IP != nil && !bytes.Equal(addr.IP, []byte{0, 0, 0, 0}) {
return nil, fmt.Errorf("only wildcard address supported, got %q %v", address, addr.IP)
}
if addr.Port == 0 {
return nil, errors.New("specific port required, got 0")
}
if addr.Port < 0 || addr.Port >= math.MaxUint16 {
return nil, fmt.Errorf("invalid port %d", addr.Port)
}
port := uint16(addr.Port)
l := &tcpListener{
port: port,
s: s,
addr: addr,
accept: make(chan net.Conn),
}
s.mu.Lock()
defer s.mu.Unlock()
if _, ok := s.mu.listeners[port]; ok {
return nil, fmt.Errorf("already listening on port %d", port)
}
s.mu.listeners[port] = l
return l, nil
}
func (s *Service) Wait() error {
return s.eg.Wait()
}
func (s *Service) Close() error {
s.control.Stop()
return nil
}
func (s *Service) tcpHandler(r *tcp.ForwarderRequest) {
endpointID := r.ID()
s.mu.Lock()
defer s.mu.Unlock()
l, ok := s.mu.listeners[endpointID.LocalPort]
if !ok {
r.Complete(true)
return
}
var wq waiter.Queue
ep, err := r.CreateEndpoint(&wq)
if err != nil {
log.Printf("got error creating endpoint %q", err)
r.Complete(true)
return
}
r.Complete(false)
ep.SocketOptions().SetKeepAlive(true)
conn := gonet.NewTCPConn(&wq, ep)
l.accept <- conn
}

165
service/service_test.go Normal file
View File

@@ -0,0 +1,165 @@
package service
import (
"bytes"
"context"
"errors"
"net"
"testing"
"time"
"dario.cat/mergo"
"github.com/slackhq/nebula/cert"
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/e2e"
"golang.org/x/sync/errgroup"
"gopkg.in/yaml.v2"
)
type m map[string]interface{}
func newSimpleService(caCrt *cert.NebulaCertificate, caKey []byte, name string, udpIp net.IP, overrides m) *Service {
vpnIpNet := &net.IPNet{IP: make([]byte, len(udpIp)), Mask: net.IPMask{255, 255, 255, 0}}
copy(vpnIpNet.IP, udpIp)
_, _, myPrivKey, myPEM := e2e.NewTestCert(caCrt, caKey, "a", time.Now(), time.Now().Add(5*time.Minute), vpnIpNet, nil, []string{})
caB, err := caCrt.MarshalToPEM()
if err != nil {
panic(err)
}
mc := m{
"pki": m{
"ca": string(caB),
"cert": string(myPEM),
"key": string(myPrivKey),
},
//"tun": m{"disabled": true},
"firewall": m{
"outbound": []m{{
"proto": "any",
"port": "any",
"host": "any",
}},
"inbound": []m{{
"proto": "any",
"port": "any",
"host": "any",
}},
},
"timers": m{
"pending_deletion_interval": 2,
"connection_alive_interval": 2,
},
"handshakes": m{
"try_interval": "200ms",
},
}
if overrides != nil {
err = mergo.Merge(&overrides, mc, mergo.WithAppendSlice)
if err != nil {
panic(err)
}
mc = overrides
}
cb, err := yaml.Marshal(mc)
if err != nil {
panic(err)
}
var c config.C
if err := c.LoadString(string(cb)); err != nil {
panic(err)
}
s, err := New(&c)
if err != nil {
panic(err)
}
return s
}
func TestService(t *testing.T) {
ca, _, caKey, _ := e2e.NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
a := newSimpleService(ca, caKey, "a", net.IP{10, 0, 0, 1}, m{
"static_host_map": m{},
"lighthouse": m{
"am_lighthouse": true,
},
"listen": m{
"host": "0.0.0.0",
"port": 4243,
},
})
b := newSimpleService(ca, caKey, "b", net.IP{10, 0, 0, 2}, m{
"static_host_map": m{
"10.0.0.1": []string{"localhost:4243"},
},
"lighthouse": m{
"hosts": []string{"10.0.0.1"},
"interval": 1,
},
})
ln, err := a.Listen("tcp", ":1234")
if err != nil {
t.Fatal(err)
}
var eg errgroup.Group
eg.Go(func() error {
conn, err := ln.Accept()
if err != nil {
return err
}
defer conn.Close()
t.Log("accepted connection")
if _, err := conn.Write([]byte("server msg")); err != nil {
return err
}
t.Log("server: wrote message")
data := make([]byte, 100)
n, err := conn.Read(data)
if err != nil {
return err
}
data = data[:n]
if !bytes.Equal(data, []byte("client msg")) {
return errors.New("got invalid message from client")
}
t.Log("server: read message")
return conn.Close()
})
c, err := b.DialContext(context.Background(), "tcp", "10.0.0.1:1234")
if err != nil {
t.Fatal(err)
}
if _, err := c.Write([]byte("client msg")); err != nil {
t.Fatal(err)
}
data := make([]byte, 100)
n, err := c.Read(data)
if err != nil {
t.Fatal(err)
}
data = data[:n]
if !bytes.Equal(data, []byte("server msg")) {
t.Fatal("got invalid message from client")
}
if err := c.Close(); err != nil {
t.Fatal(err)
}
if err := eg.Wait(); err != nil {
t.Fatal(err)
}
}

71
ssh.go
View File

@@ -3,9 +3,9 @@ package nebula
import (
"bytes"
"encoding/json"
"errors"
"flag"
"fmt"
"io/ioutil"
"net"
"os"
"reflect"
@@ -95,7 +95,7 @@ func configSSH(l *logrus.Logger, ssh *sshd.SSHServer, c *config.C) (func(), erro
return nil, fmt.Errorf("sshd.host_key must be provided")
}
hostKeyBytes, err := ioutil.ReadFile(hostKeyFile)
hostKeyBytes, err := os.ReadFile(hostKeyFile)
if err != nil {
return nil, fmt.Errorf("error while loading sshd.host_key file: %s", err)
}
@@ -168,7 +168,7 @@ func configSSH(l *logrus.Logger, ssh *sshd.SSHServer, c *config.C) (func(), erro
return runner, nil
}
func attachCommands(l *logrus.Logger, c *config.C, ssh *sshd.SSHServer, hostMap *HostMap, pendingHostMap *HostMap, lightHouse *LightHouse, ifce *Interface) {
func attachCommands(l *logrus.Logger, c *config.C, ssh *sshd.SSHServer, f *Interface) {
ssh.RegisterCommand(&sshd.Command{
Name: "list-hostmap",
ShortDescription: "List all known previously connected hosts",
@@ -181,7 +181,7 @@ func attachCommands(l *logrus.Logger, c *config.C, ssh *sshd.SSHServer, hostMap
return fl, &s
},
Callback: func(fs interface{}, a []string, w sshd.StringWriter) error {
return sshListHostMap(hostMap, fs, w)
return sshListHostMap(f.hostMap, fs, w)
},
})
@@ -197,7 +197,7 @@ func attachCommands(l *logrus.Logger, c *config.C, ssh *sshd.SSHServer, hostMap
return fl, &s
},
Callback: func(fs interface{}, a []string, w sshd.StringWriter) error {
return sshListHostMap(pendingHostMap, fs, w)
return sshListHostMap(f.handshakeManager, fs, w)
},
})
@@ -212,7 +212,7 @@ func attachCommands(l *logrus.Logger, c *config.C, ssh *sshd.SSHServer, hostMap
return fl, &s
},
Callback: func(fs interface{}, a []string, w sshd.StringWriter) error {
return sshListLighthouseMap(lightHouse, fs, w)
return sshListLighthouseMap(f.lightHouse, fs, w)
},
})
@@ -277,7 +277,7 @@ func attachCommands(l *logrus.Logger, c *config.C, ssh *sshd.SSHServer, hostMap
Name: "version",
ShortDescription: "Prints the currently running version of nebula",
Callback: func(fs interface{}, a []string, w sshd.StringWriter) error {
return sshVersion(ifce, fs, a, w)
return sshVersion(f, fs, a, w)
},
})
@@ -293,7 +293,7 @@ func attachCommands(l *logrus.Logger, c *config.C, ssh *sshd.SSHServer, hostMap
return fl, &s
},
Callback: func(fs interface{}, a []string, w sshd.StringWriter) error {
return sshPrintCert(ifce, fs, a, w)
return sshPrintCert(f, fs, a, w)
},
})
@@ -307,7 +307,7 @@ func attachCommands(l *logrus.Logger, c *config.C, ssh *sshd.SSHServer, hostMap
return fl, &s
},
Callback: func(fs interface{}, a []string, w sshd.StringWriter) error {
return sshPrintTunnel(ifce, fs, a, w)
return sshPrintTunnel(f, fs, a, w)
},
})
@@ -321,7 +321,7 @@ func attachCommands(l *logrus.Logger, c *config.C, ssh *sshd.SSHServer, hostMap
return fl, &s
},
Callback: func(fs interface{}, a []string, w sshd.StringWriter) error {
return sshPrintRelays(ifce, fs, a, w)
return sshPrintRelays(f, fs, a, w)
},
})
@@ -335,7 +335,7 @@ func attachCommands(l *logrus.Logger, c *config.C, ssh *sshd.SSHServer, hostMap
return fl, &s
},
Callback: func(fs interface{}, a []string, w sshd.StringWriter) error {
return sshChangeRemote(ifce, fs, a, w)
return sshChangeRemote(f, fs, a, w)
},
})
@@ -349,7 +349,7 @@ func attachCommands(l *logrus.Logger, c *config.C, ssh *sshd.SSHServer, hostMap
return fl, &s
},
Callback: func(fs interface{}, a []string, w sshd.StringWriter) error {
return sshCloseTunnel(ifce, fs, a, w)
return sshCloseTunnel(f, fs, a, w)
},
})
@@ -364,7 +364,7 @@ func attachCommands(l *logrus.Logger, c *config.C, ssh *sshd.SSHServer, hostMap
return fl, &s
},
Callback: func(fs interface{}, a []string, w sshd.StringWriter) error {
return sshCreateTunnel(ifce, fs, a, w)
return sshCreateTunnel(f, fs, a, w)
},
})
@@ -373,12 +373,12 @@ func attachCommands(l *logrus.Logger, c *config.C, ssh *sshd.SSHServer, hostMap
ShortDescription: "Query the lighthouses for the provided vpn ip",
Help: "This command is asynchronous. Only currently known udp ips will be printed.",
Callback: func(fs interface{}, a []string, w sshd.StringWriter) error {
return sshQueryLighthouse(ifce, fs, a, w)
return sshQueryLighthouse(f, fs, a, w)
},
})
}
func sshListHostMap(hostMap *HostMap, a interface{}, w sshd.StringWriter) error {
func sshListHostMap(hl controlHostLister, a interface{}, w sshd.StringWriter) error {
fs, ok := a.(*sshListHostMapFlags)
if !ok {
//TODO: error
@@ -387,9 +387,9 @@ func sshListHostMap(hostMap *HostMap, a interface{}, w sshd.StringWriter) error
var hm []ControlHostInfo
if fs.ByIndex {
hm = listHostMapIndexes(hostMap)
hm = listHostMapIndexes(hl)
} else {
hm = listHostMapHosts(hostMap)
hm = listHostMapHosts(hl)
}
sort.Slice(hm, func(i, j int) bool {
@@ -518,7 +518,7 @@ func sshQueryLighthouse(ifce *Interface, fs interface{}, a []string, w sshd.Stri
}
var cm *CacheMap
rl := ifce.lightHouse.Query(vpnIp, ifce)
rl := ifce.lightHouse.Query(vpnIp)
if rl != nil {
cm = rl.CopyCache()
}
@@ -546,8 +546,8 @@ func sshCloseTunnel(ifce *Interface, fs interface{}, a []string, w sshd.StringWr
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
}
hostInfo, err := ifce.hostMap.QueryVpnIp(vpnIp)
if err != nil {
hostInfo := ifce.hostMap.QueryVpnIp(vpnIp)
if hostInfo == nil {
return w.WriteLine(fmt.Sprintf("Could not find tunnel for vpn ip: %v", a[0]))
}
@@ -588,12 +588,12 @@ func sshCreateTunnel(ifce *Interface, fs interface{}, a []string, w sshd.StringW
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
}
hostInfo, _ := ifce.hostMap.QueryVpnIp(vpnIp)
hostInfo := ifce.hostMap.QueryVpnIp(vpnIp)
if hostInfo != nil {
return w.WriteLine(fmt.Sprintf("Tunnel already exists"))
}
hostInfo, _ = ifce.handshakeManager.pendingHostMap.QueryVpnIp(vpnIp)
hostInfo = ifce.handshakeManager.QueryVpnIp(vpnIp)
if hostInfo != nil {
return w.WriteLine(fmt.Sprintf("Tunnel already handshaking"))
}
@@ -606,11 +606,10 @@ func sshCreateTunnel(ifce *Interface, fs interface{}, a []string, w sshd.StringW
}
}
hostInfo = ifce.handshakeManager.AddVpnIp(vpnIp, ifce.initHostInfo)
hostInfo = ifce.handshakeManager.StartHandshake(vpnIp, nil)
if addr != nil {
hostInfo.SetRemote(addr)
}
ifce.getOrHandshake(vpnIp)
return w.WriteLine("Created")
}
@@ -645,8 +644,8 @@ func sshChangeRemote(ifce *Interface, fs interface{}, a []string, w sshd.StringW
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
}
hostInfo, err := ifce.hostMap.QueryVpnIp(vpnIp)
if err != nil {
hostInfo := ifce.hostMap.QueryVpnIp(vpnIp)
if hostInfo == nil {
return w.WriteLine(fmt.Sprintf("Could not find tunnel for vpn ip: %v", a[0]))
}
@@ -753,7 +752,7 @@ func sshPrintCert(ifce *Interface, fs interface{}, a []string, w sshd.StringWrit
return nil
}
cert := ifce.certState.Load().certificate
cert := ifce.pki.GetCertState().Certificate
if len(a) > 0 {
parsedIp := net.ParseIP(a[0])
if parsedIp == nil {
@@ -765,8 +764,8 @@ func sshPrintCert(ifce *Interface, fs interface{}, a []string, w sshd.StringWrit
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
}
hostInfo, err := ifce.hostMap.QueryVpnIp(vpnIp)
if err != nil {
hostInfo := ifce.hostMap.QueryVpnIp(vpnIp)
if hostInfo == nil {
return w.WriteLine(fmt.Sprintf("Could not find tunnel for vpn ip: %v", a[0]))
}
@@ -851,9 +850,9 @@ func sshPrintRelays(ifce *Interface, fs interface{}, a []string, w sshd.StringWr
for k, v := range relays {
ro := RelayOutput{NebulaIp: v.vpnIp}
co.Relays = append(co.Relays, &ro)
relayHI, err := ifce.hostMap.QueryVpnIp(v.vpnIp)
if err != nil {
ro.RelayForIps = append(ro.RelayForIps, RelayFor{Error: err})
relayHI := ifce.hostMap.QueryVpnIp(v.vpnIp)
if relayHI == nil {
ro.RelayForIps = append(ro.RelayForIps, RelayFor{Error: errors.New("could not find hostinfo")})
continue
}
for _, vpnIp := range relayHI.relayState.CopyRelayForIps() {
@@ -889,8 +888,8 @@ func sshPrintRelays(ifce *Interface, fs interface{}, a []string, w sshd.StringWr
rf.Error = fmt.Errorf("hostmap LocalIndex '%v' does not match RelayState LocalIndex", k)
}
}
relayedHI, err := ifce.hostMap.QueryVpnIp(vpnIp)
if err == nil {
relayedHI := ifce.hostMap.QueryVpnIp(vpnIp)
if relayedHI != nil {
rf.RelayedThrough = append(rf.RelayedThrough, relayedHI.relayState.CopyRelayIps()...)
}
@@ -925,8 +924,8 @@ func sshPrintTunnel(ifce *Interface, fs interface{}, a []string, w sshd.StringWr
return w.WriteLine(fmt.Sprintf("The provided vpn ip could not be parsed: %s", a[0]))
}
hostInfo, err := ifce.hostMap.QueryVpnIp(vpnIp)
if err != nil {
hostInfo := ifce.hostMap.QueryVpnIp(vpnIp)
if hostInfo == nil {
return w.WriteLine(fmt.Sprintf("Could not find tunnel for vpn ip: %v", a[0]))
}

View File

@@ -1,7 +1,7 @@
package test
import (
"io/ioutil"
"io"
"os"
"github.com/sirupsen/logrus"
@@ -12,7 +12,7 @@ func NewLogger() *logrus.Logger {
v := os.Getenv("TEST_LOGS")
if v == "" {
l.SetOutput(ioutil.Discard)
l.SetOutput(io.Discard)
return l
}

View File

@@ -1,6 +1,7 @@
package udp
import (
"github.com/slackhq/nebula/config"
"github.com/slackhq/nebula/firewall"
"github.com/slackhq/nebula/header"
)
@@ -18,3 +19,33 @@ type EncReader func(
q int,
localCache firewall.ConntrackCache,
)
type Conn interface {
Rebind() error
LocalAddr() (*Addr, error)
ListenOut(r EncReader, lhf LightHouseHandlerFunc, cache *firewall.ConntrackCacheTicker, q int)
WriteTo(b []byte, addr *Addr) error
ReloadConfig(c *config.C)
Close() error
}
type NoopConn struct{}
func (NoopConn) Rebind() error {
return nil
}
func (NoopConn) LocalAddr() (*Addr, error) {
return nil, nil
}
func (NoopConn) ListenOut(_ EncReader, _ LightHouseHandlerFunc, _ *firewall.ConntrackCacheTicker, _ int) {
return
}
func (NoopConn) WriteTo(_ []byte, _ *Addr) error {
return nil
}
func (NoopConn) ReloadConfig(_ *config.C) {
return
}
func (NoopConn) Close() error {
return nil
}

View File

@@ -8,9 +8,14 @@ import (
"net"
"syscall"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
func NewListener(l *logrus.Logger, ip net.IP, port int, multi bool, batch int) (Conn, error) {
return NewGenericListener(l, ip, port, multi, batch)
}
func NewListenConfig(multi bool) net.ListenConfig {
return net.ListenConfig{
Control: func(network, address string, c syscall.RawConn) error {
@@ -34,6 +39,6 @@ func NewListenConfig(multi bool) net.ListenConfig {
}
}
func (u *Conn) Rebind() error {
func (u *GenericConn) Rebind() error {
return nil
}

47
udp/udp_bsd.go Normal file
View File

@@ -0,0 +1,47 @@
//go:build (openbsd || freebsd) && !e2e_testing
// +build openbsd freebsd
// +build !e2e_testing
package udp
// FreeBSD support is primarily implemented in udp_generic, besides NewListenConfig
import (
"fmt"
"net"
"syscall"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
func NewListener(l *logrus.Logger, ip net.IP, port int, multi bool, batch int) (Conn, error) {
return NewGenericListener(l, ip, port, multi, batch)
}
func NewListenConfig(multi bool) net.ListenConfig {
return net.ListenConfig{
Control: func(network, address string, c syscall.RawConn) error {
if multi {
var controlErr error
err := c.Control(func(fd uintptr) {
if err := syscall.SetsockoptInt(int(fd), syscall.SOL_SOCKET, unix.SO_REUSEPORT, 1); err != nil {
controlErr = fmt.Errorf("SO_REUSEPORT failed: %v", err)
return
}
})
if err != nil {
return err
}
if controlErr != nil {
return controlErr
}
}
return nil
},
}
}
func (u *GenericConn) Rebind() error {
return nil
}

View File

@@ -10,9 +10,14 @@ import (
"net"
"syscall"
"github.com/sirupsen/logrus"
"golang.org/x/sys/unix"
)
func NewListener(l *logrus.Logger, ip net.IP, port int, multi bool, batch int) (Conn, error) {
return NewGenericListener(l, ip, port, multi, batch)
}
func NewListenConfig(multi bool) net.ListenConfig {
return net.ListenConfig{
Control: func(network, address string, c syscall.RawConn) error {
@@ -37,11 +42,16 @@ func NewListenConfig(multi bool) net.ListenConfig {
}
}
func (u *Conn) Rebind() error {
file, err := u.File()
func (u *GenericConn) Rebind() error {
rc, err := u.UDPConn.SyscallConn()
if err != nil {
return err
}
return syscall.SetsockoptInt(int(file.Fd()), unix.IPPROTO_IPV6, unix.IPV6_BOUND_IF, 0)
return rc.Control(func(fd uintptr) {
err := syscall.SetsockoptInt(int(fd), unix.IPPROTO_IPV6, unix.IPV6_BOUND_IF, 0)
if err != nil {
u.l.WithError(err).Error("Failed to rebind udp socket")
}
})
}

View File

@@ -18,30 +18,32 @@ import (
"github.com/slackhq/nebula/header"
)
type Conn struct {
type GenericConn struct {
*net.UDPConn
l *logrus.Logger
}
func NewListener(l *logrus.Logger, ip net.IP, port int, multi bool, batch int) (*Conn, error) {
var _ Conn = &GenericConn{}
func NewGenericListener(l *logrus.Logger, ip net.IP, port int, multi bool, batch int) (Conn, error) {
lc := NewListenConfig(multi)
pc, err := lc.ListenPacket(context.TODO(), "udp", net.JoinHostPort(ip.String(), fmt.Sprintf("%v", port)))
if err != nil {
return nil, err
}
if uc, ok := pc.(*net.UDPConn); ok {
return &Conn{UDPConn: uc, l: l}, nil
return &GenericConn{UDPConn: uc, l: l}, nil
}
return nil, fmt.Errorf("Unexpected PacketConn: %T %#v", pc, pc)
}
func (uc *Conn) WriteTo(b []byte, addr *Addr) error {
_, err := uc.UDPConn.WriteToUDP(b, &net.UDPAddr{IP: addr.IP, Port: int(addr.Port)})
func (u *GenericConn) WriteTo(b []byte, addr *Addr) error {
_, err := u.UDPConn.WriteToUDP(b, &net.UDPAddr{IP: addr.IP, Port: int(addr.Port)})
return err
}
func (uc *Conn) LocalAddr() (*Addr, error) {
a := uc.UDPConn.LocalAddr()
func (u *GenericConn) LocalAddr() (*Addr, error) {
a := u.UDPConn.LocalAddr()
switch v := a.(type) {
case *net.UDPAddr:
@@ -55,11 +57,11 @@ func (uc *Conn) LocalAddr() (*Addr, error) {
}
}
func (u *Conn) ReloadConfig(c *config.C) {
func (u *GenericConn) ReloadConfig(c *config.C) {
// TODO
}
func NewUDPStatsEmitter(udpConns []*Conn) func() {
func NewUDPStatsEmitter(udpConns []Conn) func() {
// No UDP stats for non-linux
return func() {}
}
@@ -68,7 +70,7 @@ type rawMessage struct {
Len uint32
}
func (u *Conn) ListenOut(r EncReader, lhf LightHouseHandlerFunc, cache *firewall.ConntrackCacheTicker, q int) {
func (u *GenericConn) ListenOut(r EncReader, lhf LightHouseHandlerFunc, cache *firewall.ConntrackCacheTicker, q int) {
plaintext := make([]byte, MTU)
buffer := make([]byte, MTU)
h := &header.H{}
@@ -80,8 +82,8 @@ func (u *Conn) ListenOut(r EncReader, lhf LightHouseHandlerFunc, cache *firewall
// Just read one packet at a time
n, rua, err := u.ReadFromUDP(buffer)
if err != nil {
u.l.WithError(err).Error("Failed to read packets")
continue
u.l.WithError(err).Debug("udp socket is closed, exiting read loop")
return
}
udpAddr.IP = rua.IP

View File

@@ -20,7 +20,7 @@ import (
//TODO: make it support reload as best you can!
type Conn struct {
type StdConn struct {
sysFd int
l *logrus.Logger
batch int
@@ -45,7 +45,7 @@ const (
type _SK_MEMINFO [_SK_MEMINFO_VARS]uint32
func NewListener(l *logrus.Logger, ip net.IP, port int, multi bool, batch int) (*Conn, error) {
func NewListener(l *logrus.Logger, ip net.IP, port int, multi bool, batch int) (Conn, error) {
syscall.ForkLock.RLock()
fd, err := unix.Socket(unix.AF_INET6, unix.SOCK_DGRAM, unix.IPPROTO_UDP)
if err == nil {
@@ -77,30 +77,30 @@ func NewListener(l *logrus.Logger, ip net.IP, port int, multi bool, batch int) (
//v, err := unix.GetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_INCOMING_CPU)
//l.Println(v, err)
return &Conn{sysFd: fd, l: l, batch: batch}, err
return &StdConn{sysFd: fd, l: l, batch: batch}, err
}
func (u *Conn) Rebind() error {
func (u *StdConn) Rebind() error {
return nil
}
func (u *Conn) SetRecvBuffer(n int) error {
func (u *StdConn) SetRecvBuffer(n int) error {
return unix.SetsockoptInt(u.sysFd, unix.SOL_SOCKET, unix.SO_RCVBUFFORCE, n)
}
func (u *Conn) SetSendBuffer(n int) error {
func (u *StdConn) SetSendBuffer(n int) error {
return unix.SetsockoptInt(u.sysFd, unix.SOL_SOCKET, unix.SO_SNDBUFFORCE, n)
}
func (u *Conn) GetRecvBuffer() (int, error) {
func (u *StdConn) GetRecvBuffer() (int, error) {
return unix.GetsockoptInt(int(u.sysFd), unix.SOL_SOCKET, unix.SO_RCVBUF)
}
func (u *Conn) GetSendBuffer() (int, error) {
func (u *StdConn) GetSendBuffer() (int, error) {
return unix.GetsockoptInt(int(u.sysFd), unix.SOL_SOCKET, unix.SO_SNDBUF)
}
func (u *Conn) LocalAddr() (*Addr, error) {
func (u *StdConn) LocalAddr() (*Addr, error) {
sa, err := unix.Getsockname(u.sysFd)
if err != nil {
return nil, err
@@ -119,7 +119,7 @@ func (u *Conn) LocalAddr() (*Addr, error) {
return addr, nil
}
func (u *Conn) ListenOut(r EncReader, lhf LightHouseHandlerFunc, cache *firewall.ConntrackCacheTicker, q int) {
func (u *StdConn) ListenOut(r EncReader, lhf LightHouseHandlerFunc, cache *firewall.ConntrackCacheTicker, q int) {
plaintext := make([]byte, MTU)
h := &header.H{}
fwPacket := &firewall.Packet{}
@@ -137,8 +137,8 @@ func (u *Conn) ListenOut(r EncReader, lhf LightHouseHandlerFunc, cache *firewall
for {
n, err := read(msgs)
if err != nil {
u.l.WithError(err).Error("Failed to read packets")
continue
u.l.WithError(err).Debug("udp socket is closed, exiting read loop")
return
}
//metric.Update(int64(n))
@@ -150,7 +150,7 @@ func (u *Conn) ListenOut(r EncReader, lhf LightHouseHandlerFunc, cache *firewall
}
}
func (u *Conn) ReadSingle(msgs []rawMessage) (int, error) {
func (u *StdConn) ReadSingle(msgs []rawMessage) (int, error) {
for {
n, _, err := unix.Syscall6(
unix.SYS_RECVMSG,
@@ -171,7 +171,7 @@ func (u *Conn) ReadSingle(msgs []rawMessage) (int, error) {
}
}
func (u *Conn) ReadMulti(msgs []rawMessage) (int, error) {
func (u *StdConn) ReadMulti(msgs []rawMessage) (int, error) {
for {
n, _, err := unix.Syscall6(
unix.SYS_RECVMMSG,
@@ -191,7 +191,7 @@ func (u *Conn) ReadMulti(msgs []rawMessage) (int, error) {
}
}
func (u *Conn) WriteTo(b []byte, addr *Addr) error {
func (u *StdConn) WriteTo(b []byte, addr *Addr) error {
var rsa unix.RawSockaddrInet6
rsa.Family = unix.AF_INET6
@@ -221,7 +221,7 @@ func (u *Conn) WriteTo(b []byte, addr *Addr) error {
}
}
func (u *Conn) ReloadConfig(c *config.C) {
func (u *StdConn) ReloadConfig(c *config.C) {
b := c.GetInt("listen.read_buffer", 0)
if b > 0 {
err := u.SetRecvBuffer(b)
@@ -253,7 +253,7 @@ func (u *Conn) ReloadConfig(c *config.C) {
}
}
func (u *Conn) getMemInfo(meminfo *_SK_MEMINFO) error {
func (u *StdConn) getMemInfo(meminfo *_SK_MEMINFO) error {
var vallen uint32 = 4 * _SK_MEMINFO_VARS
_, _, err := unix.Syscall6(unix.SYS_GETSOCKOPT, uintptr(u.sysFd), uintptr(unix.SOL_SOCKET), uintptr(unix.SO_MEMINFO), uintptr(unsafe.Pointer(meminfo)), uintptr(unsafe.Pointer(&vallen)), 0)
if err != 0 {
@@ -262,11 +262,16 @@ func (u *Conn) getMemInfo(meminfo *_SK_MEMINFO) error {
return nil
}
func NewUDPStatsEmitter(udpConns []*Conn) func() {
func (u *StdConn) Close() error {
//TODO: this will not interrupt the read loop
return syscall.Close(u.sysFd)
}
func NewUDPStatsEmitter(udpConns []Conn) func() {
// Check if our kernel supports SO_MEMINFO before registering the gauges
var udpGauges [][_SK_MEMINFO_VARS]metrics.Gauge
var meminfo _SK_MEMINFO
if err := udpConns[0].getMemInfo(&meminfo); err == nil {
if err := udpConns[0].(*StdConn).getMemInfo(&meminfo); err == nil {
udpGauges = make([][_SK_MEMINFO_VARS]metrics.Gauge, len(udpConns))
for i := range udpConns {
udpGauges[i] = [_SK_MEMINFO_VARS]metrics.Gauge{
@@ -285,7 +290,7 @@ func NewUDPStatsEmitter(udpConns []*Conn) func() {
return func() {
for i, gauges := range udpGauges {
if err := udpConns[i].getMemInfo(&meminfo); err == nil {
if err := udpConns[i].(*StdConn).getMemInfo(&meminfo); err == nil {
for j := 0; j < _SK_MEMINFO_VARS; j++ {
gauges[j].Update(int64(meminfo[j]))
}

Some files were not shown because too many files have changed in this diff Show More