mirror of
https://github.com/slackhq/nebula.git
synced 2025-11-22 00:15:37 +01:00
Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0681195da3 | ||
|
|
2c9cc63c1a |
22
.github/ISSUE_TEMPLATE/config.yml
vendored
22
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,21 +1,13 @@
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: 💨 Performance Issues
|
||||
url: https://github.com/slackhq/nebula/discussions/new/choose
|
||||
about: 'We ask that you create a discussion instead of an issue for performance-related questions. This allows us to have a more open conversation about the issue and helps us to better understand the problem.'
|
||||
|
||||
- name: 📄 Documentation Issues
|
||||
url: https://github.com/definednet/nebula-docs
|
||||
about: "If you've found an issue with the website documentation, please file it in the nebula-docs repository."
|
||||
|
||||
- name: 📱 Mobile Nebula Issues
|
||||
url: https://github.com/definednet/mobile_nebula
|
||||
about: "If you're using the mobile Nebula app and have found an issue, please file it in the mobile_nebula repository."
|
||||
|
||||
- name: 📘 Documentation
|
||||
url: https://nebula.defined.net/docs/
|
||||
about: 'The documentation is the best place to start if you are new to Nebula.'
|
||||
about: Review documentation.
|
||||
|
||||
- name: 💁 Support/Chat
|
||||
url: https://join.slack.com/t/nebulaoss/shared_invite/zt-39pk4xopc-CUKlGcb5Z39dQ0cK1v7ehA
|
||||
about: 'For faster support, join us on Slack for assistance!'
|
||||
url: https://join.slack.com/t/nebulaoss/shared_invite/enQtOTA5MDI4NDg3MTg4LTkwY2EwNTI4NzQyMzc0M2ZlODBjNWI3NTY1MzhiOThiMmZlZjVkMTI0NGY4YTMyNjUwMWEyNzNkZTJmYzQxOGU
|
||||
about: 'This issue tracker is not for support questions. Join us on Slack for assistance!'
|
||||
|
||||
- name: 📱 Mobile Nebula
|
||||
url: https://github.com/definednet/mobile_nebula
|
||||
about: 'This issue tracker is not for mobile support. Try the Mobile Nebula repo instead!'
|
||||
|
||||
11
.github/pull_request_template.md
vendored
11
.github/pull_request_template.md
vendored
@@ -1,11 +0,0 @@
|
||||
<!--
|
||||
Thank you for taking the time to submit a pull request!
|
||||
|
||||
Please be sure to provide a clear description of what you're trying to achieve with the change.
|
||||
|
||||
- If you're submitting a new feature, please explain how to use it and document any new config options in the example config.
|
||||
- If you're submitting a bugfix, please link the related issue or describe the circumstances surrounding the issue.
|
||||
- If you're changing a default, explain why you believe the new default is appropriate for most users.
|
||||
|
||||
P.S. If you're only updating the README or other docs, please file a pull request here instead: https://github.com/DefinedNet/nebula-docs
|
||||
-->
|
||||
6
.github/workflows/gofmt.yml
vendored
6
.github/workflows/gofmt.yml
vendored
@@ -14,11 +14,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v6
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.25'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
|
||||
- name: Install goimports
|
||||
|
||||
34
.github/workflows/release.yml
vendored
34
.github/workflows/release.yml
vendored
@@ -10,11 +10,11 @@ jobs:
|
||||
name: Build Linux/BSD All
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v6
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.25'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
|
||||
- name: Build
|
||||
@@ -24,7 +24,7 @@ jobs:
|
||||
mv build/*.tar.gz release
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v5
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: linux-latest
|
||||
path: release
|
||||
@@ -33,11 +33,11 @@ jobs:
|
||||
name: Build Windows
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v6
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.25'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
|
||||
- name: Build
|
||||
@@ -55,7 +55,7 @@ jobs:
|
||||
mv dist\windows\wintun build\dist\windows\
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v5
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: windows-latest
|
||||
path: build
|
||||
@@ -66,16 +66,16 @@ jobs:
|
||||
HAS_SIGNING_CREDS: ${{ secrets.AC_USERNAME != '' }}
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v6
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.25'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
|
||||
- name: Import certificates
|
||||
if: env.HAS_SIGNING_CREDS == 'true'
|
||||
uses: Apple-Actions/import-codesign-certs@v5
|
||||
uses: Apple-Actions/import-codesign-certs@v3
|
||||
with:
|
||||
p12-file-base64: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_P12_BASE64 }}
|
||||
p12-password: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_PASSWORD }}
|
||||
@@ -104,7 +104,7 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v5
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: darwin-latest
|
||||
path: ./release/*
|
||||
@@ -124,11 +124,11 @@ jobs:
|
||||
# be overwritten
|
||||
- name: Checkout code
|
||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download artifacts
|
||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
||||
uses: actions/download-artifact@v6
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: linux-latest
|
||||
path: artifacts
|
||||
@@ -160,10 +160,10 @@ jobs:
|
||||
needs: [build-linux, build-darwin, build-windows]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v6
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
|
||||
6
.github/workflows/smoke-extra.yml
vendored
6
.github/workflows/smoke-extra.yml
vendored
@@ -20,11 +20,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v6
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.25'
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
|
||||
- name: add hashicorp source
|
||||
|
||||
6
.github/workflows/smoke.yml
vendored
6
.github/workflows/smoke.yml
vendored
@@ -18,11 +18,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v6
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.25'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
|
||||
- name: build
|
||||
|
||||
36
.github/workflows/test.yml
vendored
36
.github/workflows/test.yml
vendored
@@ -18,11 +18,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v6
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.25'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
|
||||
- name: Build
|
||||
@@ -32,9 +32,9 @@ jobs:
|
||||
run: make vet
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: v2.5
|
||||
version: v1.64
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
- name: Build test mobile
|
||||
run: make build-test-mobile
|
||||
|
||||
- uses: actions/upload-artifact@v5
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: e2e packet flow linux-latest
|
||||
path: e2e/mermaid/linux-latest
|
||||
@@ -56,11 +56,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v6
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.25'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
|
||||
- name: Build
|
||||
@@ -77,11 +77,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v6
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.25'
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
|
||||
- name: Build
|
||||
@@ -98,11 +98,11 @@ jobs:
|
||||
os: [windows-latest, macos-latest]
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v6
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.25'
|
||||
go-version: '1.23'
|
||||
check-latest: true
|
||||
|
||||
- name: Build nebula
|
||||
@@ -115,9 +115,9 @@ jobs:
|
||||
run: make vet
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v9
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: v2.5
|
||||
version: v1.64
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
@@ -125,7 +125,7 @@ jobs:
|
||||
- name: End 2 end
|
||||
run: make e2evv
|
||||
|
||||
- uses: actions/upload-artifact@v5
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: e2e packet flow ${{ matrix.os }}
|
||||
path: e2e/mermaid/${{ matrix.os }}
|
||||
|
||||
@@ -1,23 +1,9 @@
|
||||
version: "2"
|
||||
# yaml-language-server: $schema=https://golangci-lint.run/jsonschema/golangci.jsonschema.json
|
||||
linters:
|
||||
default: none
|
||||
# Disable all linters.
|
||||
# Default: false
|
||||
disable-all: true
|
||||
# Enable specific linter
|
||||
# https://golangci-lint.run/usage/linters/#enabled-by-default
|
||||
enable:
|
||||
- testifylint
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
||||
@@ -7,13 +7,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
### Changed
|
||||
|
||||
- `default_local_cidr_any` now defaults to false, meaning that any firewall rule
|
||||
intended to target an `unsafe_routes` entry must explicitly declare it via the
|
||||
`local_cidr` field. This is almost always the intended behavior. This flag is
|
||||
deprecated and will be removed in a future release.
|
||||
|
||||
## [1.9.4] - 2024-09-09
|
||||
|
||||
### Added
|
||||
|
||||
67
README.md
67
README.md
@@ -4,7 +4,7 @@ It lets you seamlessly connect computers anywhere in the world. Nebula is portab
|
||||
It can be used to connect a small number of computers, but is also able to connect tens of thousands of computers.
|
||||
|
||||
Nebula incorporates a number of existing concepts like encryption, security groups, certificates,
|
||||
and tunneling.
|
||||
and tunneling, and each of those individual pieces existed before Nebula in various forms.
|
||||
What makes Nebula different to existing offerings is that it brings all of these ideas together,
|
||||
resulting in a sum that is greater than its individual parts.
|
||||
|
||||
@@ -12,7 +12,7 @@ Further documentation can be found [here](https://nebula.defined.net/docs/).
|
||||
|
||||
You can read more about Nebula [here](https://medium.com/p/884110a5579).
|
||||
|
||||
You can also join the NebulaOSS Slack group [here](https://join.slack.com/t/nebulaoss/shared_invite/zt-39pk4xopc-CUKlGcb5Z39dQ0cK1v7ehA).
|
||||
You can also join the NebulaOSS Slack group [here](https://join.slack.com/t/nebulaoss/shared_invite/zt-2xqe6e7vn-k_KGi8s13nsr7cvHVvHvuQ).
|
||||
|
||||
## Supported Platforms
|
||||
|
||||
@@ -28,33 +28,33 @@ Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for
|
||||
#### Distribution Packages
|
||||
|
||||
- [Arch Linux](https://archlinux.org/packages/extra/x86_64/nebula/)
|
||||
```sh
|
||||
sudo pacman -S nebula
|
||||
```
|
||||
$ sudo pacman -S nebula
|
||||
```
|
||||
|
||||
- [Fedora Linux](https://src.fedoraproject.org/rpms/nebula)
|
||||
```sh
|
||||
sudo dnf install nebula
|
||||
```
|
||||
$ sudo dnf install nebula
|
||||
```
|
||||
|
||||
- [Debian Linux](https://packages.debian.org/source/stable/nebula)
|
||||
```sh
|
||||
sudo apt install nebula
|
||||
```
|
||||
$ sudo apt install nebula
|
||||
```
|
||||
|
||||
- [Alpine Linux](https://pkgs.alpinelinux.org/packages?name=nebula)
|
||||
```sh
|
||||
sudo apk add nebula
|
||||
```
|
||||
$ sudo apk add nebula
|
||||
```
|
||||
|
||||
- [macOS Homebrew](https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/n/nebula.rb)
|
||||
```sh
|
||||
brew install nebula
|
||||
```
|
||||
$ brew install nebula
|
||||
```
|
||||
|
||||
- [Docker](https://hub.docker.com/r/nebulaoss/nebula)
|
||||
```sh
|
||||
docker pull nebulaoss/nebula
|
||||
```
|
||||
$ docker pull nebulaoss/nebula
|
||||
```
|
||||
|
||||
#### Mobile
|
||||
@@ -64,10 +64,10 @@ Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for
|
||||
|
||||
## Technical Overview
|
||||
|
||||
Nebula is a mutually authenticated peer-to-peer software-defined network based on the [Noise Protocol Framework](https://noiseprotocol.org/).
|
||||
Nebula is a mutually authenticated peer-to-peer software defined network based on the [Noise Protocol Framework](https://noiseprotocol.org/).
|
||||
Nebula uses certificates to assert a node's IP address, name, and membership within user-defined groups.
|
||||
Nebula's user-defined groups allow for provider agnostic traffic filtering between nodes.
|
||||
Discovery nodes (aka lighthouses) allow individual peers to find each other and optionally use UDP hole punching to establish connections from behind most firewalls or NATs.
|
||||
Discovery nodes allow individual peers to find each other and optionally use UDP hole punching to establish connections from behind most firewalls or NATs.
|
||||
Users can move data between nodes in any number of cloud service providers, datacenters, and endpoints, without needing to maintain a particular addressing scheme.
|
||||
|
||||
Nebula uses Elliptic-curve Diffie-Hellman (`ECDH`) key exchange and `AES-256-GCM` in its default configuration.
|
||||
@@ -82,34 +82,28 @@ To set up a Nebula network, you'll need:
|
||||
|
||||
#### 2. (Optional, but you really should..) At least one discovery node with a routable IP address, which we call a lighthouse.
|
||||
|
||||
Nebula lighthouses allow nodes to find each other, anywhere in the world. A lighthouse is the only node in a Nebula network whose IP should not change. Running a lighthouse requires very few compute resources, and you can easily use the least expensive option from a cloud hosting provider. If you're not sure which provider to use, a number of us have used $6/mo [DigitalOcean](https://digitalocean.com) droplets as lighthouses.
|
||||
Nebula lighthouses allow nodes to find each other, anywhere in the world. A lighthouse is the only node in a Nebula network whose IP should not change. Running a lighthouse requires very few compute resources, and you can easily use the least expensive option from a cloud hosting provider. If you're not sure which provider to use, a number of us have used $5/mo [DigitalOcean](https://digitalocean.com) droplets as lighthouses.
|
||||
|
||||
Once you have launched an instance, ensure that Nebula udp traffic (default port udp/4242) can reach it over the internet.
|
||||
|
||||
Once you have launched an instance, ensure that Nebula udp traffic (default port udp/4242) can reach it over the internet.
|
||||
|
||||
#### 3. A Nebula certificate authority, which will be the root of trust for a particular Nebula network.
|
||||
|
||||
```sh
|
||||
./nebula-cert ca -name "Myorganization, Inc"
|
||||
```
|
||||
|
||||
This will create files named `ca.key` and `ca.cert` in the current directory. The `ca.key` file is the most sensitive file you'll create, because it is the key used to sign the certificates for individual nebula nodes/hosts. Please store this file somewhere safe, preferably with strong encryption.
|
||||
|
||||
**Be aware!** By default, certificate authorities have a 1-year lifetime before expiration. See [this guide](https://nebula.defined.net/docs/guides/rotating-certificate-authority/) for details on rotating a CA.
|
||||
```
|
||||
./nebula-cert ca -name "Myorganization, Inc"
|
||||
```
|
||||
This will create files named `ca.key` and `ca.cert` in the current directory. The `ca.key` file is the most sensitive file you'll create, because it is the key used to sign the certificates for individual nebula nodes/hosts. Please store this file somewhere safe, preferably with strong encryption.
|
||||
|
||||
#### 4. Nebula host keys and certificates generated from that certificate authority
|
||||
|
||||
This assumes you have four nodes, named lighthouse1, laptop, server1, host3. You can name the nodes any way you'd like, including FQDN. You'll also need to choose IP addresses and the associated subnet. In this example, we are creating a nebula network that will use 192.168.100.x/24 as its network range. This example also demonstrates nebula groups, which can later be used to define traffic rules in a nebula network.
|
||||
```sh
|
||||
```
|
||||
./nebula-cert sign -name "lighthouse1" -ip "192.168.100.1/24"
|
||||
./nebula-cert sign -name "laptop" -ip "192.168.100.2/24" -groups "laptop,home,ssh"
|
||||
./nebula-cert sign -name "server1" -ip "192.168.100.9/24" -groups "servers"
|
||||
./nebula-cert sign -name "host3" -ip "192.168.100.10/24"
|
||||
```
|
||||
|
||||
By default, host certificates will expire 1 second before the CA expires. Use the `-duration` flag to specify a shorter lifetime.
|
||||
|
||||
#### 5. Configuration files for each host
|
||||
|
||||
Download a copy of the nebula [example configuration](https://github.com/slackhq/nebula/blob/master/examples/config.yml).
|
||||
|
||||
* On the lighthouse node, you'll need to ensure `am_lighthouse: true` is set.
|
||||
@@ -124,13 +118,10 @@ For each host, copy the nebula binary to the host, along with `config.yml` from
|
||||
**DO NOT COPY `ca.key` TO INDIVIDUAL NODES.**
|
||||
|
||||
#### 7. Run nebula on each host
|
||||
|
||||
```sh
|
||||
```
|
||||
./nebula -config /path/to/config.yml
|
||||
```
|
||||
|
||||
For more detailed instructions, [find the full documentation here](https://nebula.defined.net/docs/).
|
||||
|
||||
## Building Nebula from source
|
||||
|
||||
Make sure you have [go](https://go.dev/doc/install) installed and clone this repo. Change to the nebula directory.
|
||||
@@ -149,10 +140,8 @@ The default curve used for cryptographic handshakes and signatures is Curve25519
|
||||
|
||||
In addition, Nebula can be built using the [BoringCrypto GOEXPERIMENT](https://github.com/golang/go/blob/go1.20/src/crypto/internal/boring/README.md) by running either of the following make targets:
|
||||
|
||||
```sh
|
||||
make bin-boringcrypto
|
||||
make release-boringcrypto
|
||||
```
|
||||
make bin-boringcrypto
|
||||
make release-boringcrypto
|
||||
|
||||
This is not the recommended default deployment, but may be useful based on your compliance requirements.
|
||||
|
||||
@@ -160,3 +149,5 @@ This is not the recommended default deployment, but may be useful based on your
|
||||
|
||||
Nebula was created at Slack Technologies, Inc by Nate Brown and Ryan Huber, with contributions from Oliver Fross, Alan Lam, Wade Simmons, and Lining Wang.
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -36,7 +36,7 @@ type AllowListNameRule struct {
|
||||
|
||||
func NewLocalAllowListFromConfig(c *config.C, k string) (*LocalAllowList, error) {
|
||||
var nameRules []AllowListNameRule
|
||||
handleKey := func(key string, value any) (bool, error) {
|
||||
handleKey := func(key string, value interface{}) (bool, error) {
|
||||
if key == "interfaces" {
|
||||
var err error
|
||||
nameRules, err = getAllowListInterfaces(k, value)
|
||||
@@ -70,7 +70,7 @@ func NewRemoteAllowListFromConfig(c *config.C, k, rangesKey string) (*RemoteAllo
|
||||
|
||||
// If the handleKey func returns true, the rest of the parsing is skipped
|
||||
// for this key. This allows parsing of special values like `interfaces`.
|
||||
func newAllowListFromConfig(c *config.C, k string, handleKey func(key string, value any) (bool, error)) (*AllowList, error) {
|
||||
func newAllowListFromConfig(c *config.C, k string, handleKey func(key string, value interface{}) (bool, error)) (*AllowList, error) {
|
||||
r := c.Get(k)
|
||||
if r == nil {
|
||||
return nil, nil
|
||||
@@ -81,8 +81,8 @@ func newAllowListFromConfig(c *config.C, k string, handleKey func(key string, va
|
||||
|
||||
// If the handleKey func returns true, the rest of the parsing is skipped
|
||||
// for this key. This allows parsing of special values like `interfaces`.
|
||||
func newAllowList(k string, raw any, handleKey func(key string, value any) (bool, error)) (*AllowList, error) {
|
||||
rawMap, ok := raw.(map[string]any)
|
||||
func newAllowList(k string, raw interface{}, handleKey func(key string, value interface{}) (bool, error)) (*AllowList, error) {
|
||||
rawMap, ok := raw.(map[interface{}]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, raw)
|
||||
}
|
||||
@@ -100,7 +100,12 @@ func newAllowList(k string, raw any, handleKey func(key string, value any) (bool
|
||||
rules4 := allowListRules{firstValue: true, allValuesMatch: true, defaultSet: false}
|
||||
rules6 := allowListRules{firstValue: true, allValuesMatch: true, defaultSet: false}
|
||||
|
||||
for rawCIDR, rawValue := range rawMap {
|
||||
for rawKey, rawValue := range rawMap {
|
||||
rawCIDR, ok := rawKey.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
|
||||
}
|
||||
|
||||
if handleKey != nil {
|
||||
handled, err := handleKey(rawCIDR, rawValue)
|
||||
if err != nil {
|
||||
@@ -111,7 +116,7 @@ func newAllowList(k string, raw any, handleKey func(key string, value any) (bool
|
||||
}
|
||||
}
|
||||
|
||||
value, ok := config.AsBool(rawValue)
|
||||
value, ok := rawValue.(bool)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("config `%s` has invalid value (type %T): %v", k, rawValue, rawValue)
|
||||
}
|
||||
@@ -168,18 +173,22 @@ func newAllowList(k string, raw any, handleKey func(key string, value any) (bool
|
||||
return &AllowList{cidrTree: tree}, nil
|
||||
}
|
||||
|
||||
func getAllowListInterfaces(k string, v any) ([]AllowListNameRule, error) {
|
||||
func getAllowListInterfaces(k string, v interface{}) ([]AllowListNameRule, error) {
|
||||
var nameRules []AllowListNameRule
|
||||
|
||||
rawRules, ok := v.(map[string]any)
|
||||
rawRules, ok := v.(map[interface{}]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("config `%s.interfaces` is invalid (type %T): %v", k, v, v)
|
||||
}
|
||||
|
||||
firstEntry := true
|
||||
var allValues bool
|
||||
for name, rawAllow := range rawRules {
|
||||
allow, ok := config.AsBool(rawAllow)
|
||||
for rawName, rawAllow := range rawRules {
|
||||
name, ok := rawName.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("config `%s.interfaces` has invalid key (type %T): %v", k, rawName, rawName)
|
||||
}
|
||||
allow, ok := rawAllow.(bool)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("config `%s.interfaces` has invalid value (type %T): %v", k, rawAllow, rawAllow)
|
||||
}
|
||||
@@ -215,11 +224,16 @@ func getRemoteAllowRanges(c *config.C, k string) (*bart.Table[*AllowList], error
|
||||
|
||||
remoteAllowRanges := new(bart.Table[*AllowList])
|
||||
|
||||
rawMap, ok := value.(map[string]any)
|
||||
rawMap, ok := value.(map[interface{}]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, value)
|
||||
}
|
||||
for rawCIDR, rawValue := range rawMap {
|
||||
for rawKey, rawValue := range rawMap {
|
||||
rawCIDR, ok := rawKey.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
|
||||
}
|
||||
|
||||
allowList, err := newAllowList(fmt.Sprintf("%s.%s", k, rawCIDR), rawValue, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@@ -15,27 +15,27 @@ import (
|
||||
func TestNewAllowListFromConfig(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
c := config.NewC(l)
|
||||
c.Settings["allowlist"] = map[string]any{
|
||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||
"192.168.0.0": true,
|
||||
}
|
||||
r, err := newAllowListFromConfig(c, "allowlist", nil)
|
||||
require.EqualError(t, err, "config `allowlist` has invalid CIDR: 192.168.0.0. netip.ParsePrefix(\"192.168.0.0\"): no '/'")
|
||||
assert.Nil(t, r)
|
||||
|
||||
c.Settings["allowlist"] = map[string]any{
|
||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||
"192.168.0.0/16": "abc",
|
||||
}
|
||||
r, err = newAllowListFromConfig(c, "allowlist", nil)
|
||||
require.EqualError(t, err, "config `allowlist` has invalid value (type string): abc")
|
||||
|
||||
c.Settings["allowlist"] = map[string]any{
|
||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||
"192.168.0.0/16": true,
|
||||
"10.0.0.0/8": false,
|
||||
}
|
||||
r, err = newAllowListFromConfig(c, "allowlist", nil)
|
||||
require.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for 0.0.0.0/0")
|
||||
|
||||
c.Settings["allowlist"] = map[string]any{
|
||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||
"0.0.0.0/0": true,
|
||||
"10.0.0.0/8": false,
|
||||
"10.42.42.0/24": true,
|
||||
@@ -45,7 +45,7 @@ func TestNewAllowListFromConfig(t *testing.T) {
|
||||
r, err = newAllowListFromConfig(c, "allowlist", nil)
|
||||
require.EqualError(t, err, "config `allowlist` contains both true and false rules, but no default set for ::/0")
|
||||
|
||||
c.Settings["allowlist"] = map[string]any{
|
||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||
"0.0.0.0/0": true,
|
||||
"10.0.0.0/8": false,
|
||||
"10.42.42.0/24": true,
|
||||
@@ -55,7 +55,7 @@ func TestNewAllowListFromConfig(t *testing.T) {
|
||||
assert.NotNil(t, r)
|
||||
}
|
||||
|
||||
c.Settings["allowlist"] = map[string]any{
|
||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||
"0.0.0.0/0": true,
|
||||
"10.0.0.0/8": false,
|
||||
"10.42.42.0/24": true,
|
||||
@@ -70,16 +70,16 @@ func TestNewAllowListFromConfig(t *testing.T) {
|
||||
|
||||
// Test interface names
|
||||
|
||||
c.Settings["allowlist"] = map[string]any{
|
||||
"interfaces": map[string]any{
|
||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||
"interfaces": map[interface{}]interface{}{
|
||||
`docker.*`: "foo",
|
||||
},
|
||||
}
|
||||
lr, err := NewLocalAllowListFromConfig(c, "allowlist")
|
||||
require.EqualError(t, err, "config `allowlist.interfaces` has invalid value (type string): foo")
|
||||
|
||||
c.Settings["allowlist"] = map[string]any{
|
||||
"interfaces": map[string]any{
|
||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||
"interfaces": map[interface{}]interface{}{
|
||||
`docker.*`: false,
|
||||
`eth.*`: true,
|
||||
},
|
||||
@@ -87,8 +87,8 @@ func TestNewAllowListFromConfig(t *testing.T) {
|
||||
lr, err = NewLocalAllowListFromConfig(c, "allowlist")
|
||||
require.EqualError(t, err, "config `allowlist.interfaces` values must all be the same true/false value")
|
||||
|
||||
c.Settings["allowlist"] = map[string]any{
|
||||
"interfaces": map[string]any{
|
||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||
"interfaces": map[interface{}]interface{}{
|
||||
`docker.*`: false,
|
||||
},
|
||||
}
|
||||
|
||||
2
bits.go
2
bits.go
@@ -43,7 +43,7 @@ func (b *Bits) Check(l logrus.FieldLogger, i uint64) bool {
|
||||
}
|
||||
|
||||
// Not within the window
|
||||
l.Debugf("rejected a packet (top) %d %d delta %d\n", b.current, i, b.current-i)
|
||||
l.Debugf("rejected a packet (top) %d %d\n", b.current, i)
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
@@ -84,11 +84,16 @@ func NewCalculatedRemotesFromConfig(c *config.C, k string) (*bart.Table[[]*calcu
|
||||
|
||||
calculatedRemotes := new(bart.Table[[]*calculatedRemote])
|
||||
|
||||
rawMap, ok := value.(map[string]any)
|
||||
rawMap, ok := value.(map[any]any)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, value)
|
||||
}
|
||||
for rawCIDR, rawValue := range rawMap {
|
||||
for rawKey, rawValue := range rawMap {
|
||||
rawCIDR, ok := rawKey.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
|
||||
}
|
||||
|
||||
cidr, err := netip.ParsePrefix(rawCIDR)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
|
||||
@@ -124,7 +129,7 @@ func newCalculatedRemotesListFromConfig(cidr netip.Prefix, raw any) ([]*calculat
|
||||
}
|
||||
|
||||
func newCalculatedRemotesEntryFromConfig(cidr netip.Prefix, raw any) (*calculatedRemote, error) {
|
||||
rawMap, ok := raw.(map[string]any)
|
||||
rawMap, ok := raw.(map[any]any)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid type: %T", raw)
|
||||
}
|
||||
|
||||
@@ -58,9 +58,6 @@ type Certificate interface {
|
||||
// PublicKey is the raw bytes to be used in asymmetric cryptographic operations.
|
||||
PublicKey() []byte
|
||||
|
||||
// MarshalPublicKeyPEM is the value of PublicKey marshalled to PEM
|
||||
MarshalPublicKeyPEM() []byte
|
||||
|
||||
// Curve identifies which curve was used for the PublicKey and Signature.
|
||||
Curve() Curve
|
||||
|
||||
@@ -138,7 +135,8 @@ func Recombine(v Version, rawCertBytes, publicKey []byte, curve Curve) (Certific
|
||||
case Version2:
|
||||
c, err = unmarshalCertificateV2(rawCertBytes, publicKey, curve)
|
||||
default:
|
||||
return nil, ErrUnknownVersion
|
||||
//TODO: CERT-V2 make a static var
|
||||
return nil, fmt.Errorf("unknown certificate version %d", v)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
|
||||
@@ -41,7 +41,7 @@ type detailsV1 struct {
|
||||
curve Curve
|
||||
}
|
||||
|
||||
type m = map[string]any
|
||||
type m map[string]interface{}
|
||||
|
||||
func (c *certificateV1) Version() Version {
|
||||
return Version1
|
||||
@@ -83,10 +83,6 @@ func (c *certificateV1) PublicKey() []byte {
|
||||
return c.details.publicKey
|
||||
}
|
||||
|
||||
func (c *certificateV1) MarshalPublicKeyPEM() []byte {
|
||||
return marshalCertPublicKeyToPEM(c)
|
||||
}
|
||||
|
||||
func (c *certificateV1) Signature() []byte {
|
||||
return c.signature
|
||||
}
|
||||
@@ -114,10 +110,8 @@ func (c *certificateV1) CheckSignature(key []byte) bool {
|
||||
case Curve_CURVE25519:
|
||||
return ed25519.Verify(key, b, c.signature)
|
||||
case Curve_P256:
|
||||
pubKey, err := ecdsa.ParseUncompressedPublicKey(elliptic.P256(), key)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
x, y := elliptic.Unmarshal(elliptic.P256(), key)
|
||||
pubKey := &ecdsa.PublicKey{Curve: elliptic.P256(), X: x, Y: y}
|
||||
hashed := sha256.Sum256(b)
|
||||
return ecdsa.VerifyASN1(pubKey, hashed[:], c.signature)
|
||||
default:
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package cert
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"testing"
|
||||
@@ -14,7 +13,6 @@ import (
|
||||
)
|
||||
|
||||
func TestCertificateV1_Marshal(t *testing.T) {
|
||||
t.Parallel()
|
||||
before := time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
after := time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
pubKey := []byte("1234567890abcedfghij1234567890ab")
|
||||
@@ -62,58 +60,6 @@ func TestCertificateV1_Marshal(t *testing.T) {
|
||||
assert.Equal(t, nc.Groups(), nc2.Groups())
|
||||
}
|
||||
|
||||
func TestCertificateV1_PublicKeyPem(t *testing.T) {
|
||||
t.Parallel()
|
||||
before := time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
after := time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
pubKey := ed25519.PublicKey("1234567890abcedfghij1234567890ab")
|
||||
|
||||
nc := certificateV1{
|
||||
details: detailsV1{
|
||||
name: "testing",
|
||||
networks: []netip.Prefix{},
|
||||
unsafeNetworks: []netip.Prefix{},
|
||||
groups: []string{"test-group1", "test-group2", "test-group3"},
|
||||
notBefore: before,
|
||||
notAfter: after,
|
||||
publicKey: pubKey,
|
||||
isCA: false,
|
||||
issuer: "1234567890abcedfghij1234567890ab",
|
||||
},
|
||||
signature: []byte("1234567890abcedfghij1234567890ab"),
|
||||
}
|
||||
|
||||
assert.Equal(t, Version1, nc.Version())
|
||||
assert.Equal(t, Curve_CURVE25519, nc.Curve())
|
||||
pubPem := "-----BEGIN NEBULA X25519 PUBLIC KEY-----\nMTIzNDU2Nzg5MGFiY2VkZmdoaWoxMjM0NTY3ODkwYWI=\n-----END NEBULA X25519 PUBLIC KEY-----\n"
|
||||
assert.Equal(t, string(nc.MarshalPublicKeyPEM()), pubPem)
|
||||
assert.False(t, nc.IsCA())
|
||||
|
||||
nc.details.isCA = true
|
||||
assert.Equal(t, Curve_CURVE25519, nc.Curve())
|
||||
pubPem = "-----BEGIN NEBULA ED25519 PUBLIC KEY-----\nMTIzNDU2Nzg5MGFiY2VkZmdoaWoxMjM0NTY3ODkwYWI=\n-----END NEBULA ED25519 PUBLIC KEY-----\n"
|
||||
assert.Equal(t, string(nc.MarshalPublicKeyPEM()), pubPem)
|
||||
assert.True(t, nc.IsCA())
|
||||
|
||||
pubP256KeyPem := []byte(`-----BEGIN NEBULA P256 PUBLIC KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAA=
|
||||
-----END NEBULA P256 PUBLIC KEY-----
|
||||
`)
|
||||
pubP256Key, _, _, err := UnmarshalPublicKeyFromPEM(pubP256KeyPem)
|
||||
require.NoError(t, err)
|
||||
nc.details.curve = Curve_P256
|
||||
nc.details.publicKey = pubP256Key
|
||||
assert.Equal(t, Curve_P256, nc.Curve())
|
||||
assert.Equal(t, string(nc.MarshalPublicKeyPEM()), string(pubP256KeyPem))
|
||||
assert.True(t, nc.IsCA())
|
||||
|
||||
nc.details.isCA = false
|
||||
assert.Equal(t, Curve_P256, nc.Curve())
|
||||
assert.Equal(t, string(nc.MarshalPublicKeyPEM()), string(pubP256KeyPem))
|
||||
assert.False(t, nc.IsCA())
|
||||
}
|
||||
|
||||
func TestCertificateV1_Expired(t *testing.T) {
|
||||
nc := certificateV1{
|
||||
details: detailsV1{
|
||||
|
||||
@@ -114,10 +114,6 @@ func (c *certificateV2) PublicKey() []byte {
|
||||
return c.publicKey
|
||||
}
|
||||
|
||||
func (c *certificateV2) MarshalPublicKeyPEM() []byte {
|
||||
return marshalCertPublicKeyToPEM(c)
|
||||
}
|
||||
|
||||
func (c *certificateV2) Signature() []byte {
|
||||
return c.signature
|
||||
}
|
||||
@@ -153,10 +149,8 @@ func (c *certificateV2) CheckSignature(key []byte) bool {
|
||||
case Curve_CURVE25519:
|
||||
return ed25519.Verify(key, b, c.signature)
|
||||
case Curve_P256:
|
||||
pubKey, err := ecdsa.ParseUncompressedPublicKey(elliptic.P256(), key)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
x, y := elliptic.Unmarshal(elliptic.P256(), key)
|
||||
pubKey := &ecdsa.PublicKey{Curve: elliptic.P256(), X: x, Y: y}
|
||||
hashed := sha256.Sum256(b)
|
||||
return ecdsa.VerifyASN1(pubKey, hashed[:], c.signature)
|
||||
default:
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
)
|
||||
|
||||
func TestCertificateV2_Marshal(t *testing.T) {
|
||||
t.Parallel()
|
||||
before := time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
after := time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
pubKey := []byte("1234567890abcedfghij1234567890ab")
|
||||
@@ -76,58 +75,6 @@ func TestCertificateV2_Marshal(t *testing.T) {
|
||||
assert.Equal(t, nc.Groups(), nc2.Groups())
|
||||
}
|
||||
|
||||
func TestCertificateV2_PublicKeyPem(t *testing.T) {
|
||||
t.Parallel()
|
||||
before := time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
after := time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
pubKey := ed25519.PublicKey("1234567890abcedfghij1234567890ab")
|
||||
|
||||
nc := certificateV2{
|
||||
details: detailsV2{
|
||||
name: "testing",
|
||||
networks: []netip.Prefix{},
|
||||
unsafeNetworks: []netip.Prefix{},
|
||||
groups: []string{"test-group1", "test-group2", "test-group3"},
|
||||
notBefore: before,
|
||||
notAfter: after,
|
||||
isCA: false,
|
||||
issuer: "1234567890abcedfghij1234567890ab",
|
||||
},
|
||||
publicKey: pubKey,
|
||||
signature: []byte("1234567890abcedfghij1234567890ab"),
|
||||
}
|
||||
|
||||
assert.Equal(t, Version2, nc.Version())
|
||||
assert.Equal(t, Curve_CURVE25519, nc.Curve())
|
||||
pubPem := "-----BEGIN NEBULA X25519 PUBLIC KEY-----\nMTIzNDU2Nzg5MGFiY2VkZmdoaWoxMjM0NTY3ODkwYWI=\n-----END NEBULA X25519 PUBLIC KEY-----\n"
|
||||
assert.Equal(t, string(nc.MarshalPublicKeyPEM()), pubPem)
|
||||
assert.False(t, nc.IsCA())
|
||||
|
||||
nc.details.isCA = true
|
||||
assert.Equal(t, Curve_CURVE25519, nc.Curve())
|
||||
pubPem = "-----BEGIN NEBULA ED25519 PUBLIC KEY-----\nMTIzNDU2Nzg5MGFiY2VkZmdoaWoxMjM0NTY3ODkwYWI=\n-----END NEBULA ED25519 PUBLIC KEY-----\n"
|
||||
assert.Equal(t, string(nc.MarshalPublicKeyPEM()), pubPem)
|
||||
assert.True(t, nc.IsCA())
|
||||
|
||||
pubP256KeyPem := []byte(`-----BEGIN NEBULA P256 PUBLIC KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAA=
|
||||
-----END NEBULA P256 PUBLIC KEY-----
|
||||
`)
|
||||
pubP256Key, _, _, err := UnmarshalPublicKeyFromPEM(pubP256KeyPem)
|
||||
require.NoError(t, err)
|
||||
nc.curve = Curve_P256
|
||||
nc.publicKey = pubP256Key
|
||||
assert.Equal(t, Curve_P256, nc.Curve())
|
||||
assert.Equal(t, string(nc.MarshalPublicKeyPEM()), string(pubP256KeyPem))
|
||||
assert.True(t, nc.IsCA())
|
||||
|
||||
nc.details.isCA = false
|
||||
assert.Equal(t, Curve_P256, nc.Curve())
|
||||
assert.Equal(t, string(nc.MarshalPublicKeyPEM()), string(pubP256KeyPem))
|
||||
assert.False(t, nc.IsCA())
|
||||
}
|
||||
|
||||
func TestCertificateV2_Expired(t *testing.T) {
|
||||
nc := certificateV2{
|
||||
details: detailsV2{
|
||||
|
||||
@@ -10,14 +10,14 @@ import (
|
||||
|
||||
func TestNewArgon2Parameters(t *testing.T) {
|
||||
p := NewArgon2Parameters(64*1024, 4, 3)
|
||||
assert.Equal(t, &Argon2Parameters{
|
||||
assert.EqualValues(t, &Argon2Parameters{
|
||||
version: argon2.Version,
|
||||
Memory: 64 * 1024,
|
||||
Parallelism: 4,
|
||||
Iterations: 3,
|
||||
}, p)
|
||||
p = NewArgon2Parameters(2*1024*1024, 2, 1)
|
||||
assert.Equal(t, &Argon2Parameters{
|
||||
assert.EqualValues(t, &Argon2Parameters{
|
||||
version: argon2.Version,
|
||||
Memory: 2 * 1024 * 1024,
|
||||
Parallelism: 2,
|
||||
@@ -26,21 +26,21 @@ func TestNewArgon2Parameters(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDecryptAndUnmarshalSigningPrivateKey(t *testing.T) {
|
||||
passphrase := []byte("DO NOT USE")
|
||||
passphrase := []byte("DO NOT USE THIS KEY")
|
||||
privKey := []byte(`# A good key
|
||||
-----BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||
CjsKC0FFUy0yNTYtR0NNEiwIExCAgAQYAyAEKiCPoDfGQiosxNPTbPn5EsMlc2MI
|
||||
c0Bt4oz6gTrFQhX3aBJcimhHKeAuhyTGvllD0Z19fe+DFPcLH3h5VrdjVfIAajg0
|
||||
KrbV3n9UHif/Au5skWmquNJzoW1E4MTdRbvpti6o+WdQ49DxjBFhx0YH8LBqrbPU
|
||||
0BGkUHmIO7daP24=
|
||||
CjwKC0FFUy0yNTYtR0NNEi0IExCAgIABGAEgBCognnjujd67Vsv99p22wfAjQaDT
|
||||
oCMW1mdjkU3gACKNW4MSXOWR9Sts4C81yk1RUku2gvGKs3TB9LYoklLsIizSYOLl
|
||||
+Vs//O1T0I1Xbml2XBAROsb/VSoDln/6LMqR4B6fn6B3GOsLBBqRI8daDl9lRMPB
|
||||
qrlJ69wer3ZUHFXA
|
||||
-----END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||
`)
|
||||
shortKey := []byte(`# A key which, once decrypted, is too short
|
||||
-----BEGIN NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||
CjsKC0FFUy0yNTYtR0NNEiwIExCAgAQYAyAEKiAVJwdfl3r+eqi/vF6S7OMdpjfo
|
||||
hAzmTCRnr58Su4AqmBJbCv3zleYCEKYJP6UI3S8ekLMGISsgO4hm5leukCCyqT0Z
|
||||
cQ76yrberpzkJKoPLGisX8f+xdy4aXSZl7oEYWQte1+vqbtl/eY9PGZhxUQdcyq7
|
||||
hqzIyrRqfUgVuA==
|
||||
CjwKC0FFUy0yNTYtR0NNEi0IExCAgIABGAEgBCoga5h8owMEBWRSMMJKzuUvWce7
|
||||
k0qlBkQmCxiuLh80MuASW70YcKt8jeEIS2axo2V6zAKA9TSMcCsJW1kDDXEtL/xe
|
||||
GLF5T7sDl5COp4LU3pGxpV+KoeQ/S3gQCAAcnaOtnJQX+aSDnbO3jCHyP7U9CHbs
|
||||
rQr3bdH3Oy/WiYU=
|
||||
-----END NEBULA ED25519 ENCRYPTED PRIVATE KEY-----
|
||||
`)
|
||||
invalidBanner := []byte(`# Invalid banner (not encrypted)
|
||||
|
||||
@@ -20,7 +20,6 @@ var (
|
||||
ErrPublicPrivateKeyMismatch = errors.New("public key and private key are not a pair")
|
||||
ErrPrivateKeyEncrypted = errors.New("private key must be decrypted")
|
||||
ErrCaNotFound = errors.New("could not find ca for the certificate")
|
||||
ErrUnknownVersion = errors.New("certificate version unrecognized")
|
||||
|
||||
ErrInvalidPEMBlock = errors.New("input did not contain a valid PEM encoded block")
|
||||
ErrInvalidPEMCertificateBanner = errors.New("bytes did not contain a proper certificate banner")
|
||||
|
||||
52
cert/pem.go
52
cert/pem.go
@@ -7,26 +7,19 @@ import (
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
const ( //cert banners
|
||||
CertificateBanner = "NEBULA CERTIFICATE"
|
||||
CertificateV2Banner = "NEBULA CERTIFICATE V2"
|
||||
)
|
||||
const (
|
||||
CertificateBanner = "NEBULA CERTIFICATE"
|
||||
CertificateV2Banner = "NEBULA CERTIFICATE V2"
|
||||
X25519PrivateKeyBanner = "NEBULA X25519 PRIVATE KEY"
|
||||
X25519PublicKeyBanner = "NEBULA X25519 PUBLIC KEY"
|
||||
EncryptedEd25519PrivateKeyBanner = "NEBULA ED25519 ENCRYPTED PRIVATE KEY"
|
||||
Ed25519PrivateKeyBanner = "NEBULA ED25519 PRIVATE KEY"
|
||||
Ed25519PublicKeyBanner = "NEBULA ED25519 PUBLIC KEY"
|
||||
|
||||
const ( //key-agreement-key banners
|
||||
X25519PrivateKeyBanner = "NEBULA X25519 PRIVATE KEY"
|
||||
X25519PublicKeyBanner = "NEBULA X25519 PUBLIC KEY"
|
||||
P256PrivateKeyBanner = "NEBULA P256 PRIVATE KEY"
|
||||
P256PublicKeyBanner = "NEBULA P256 PUBLIC KEY"
|
||||
)
|
||||
|
||||
/* including "ECDSA" in the P256 banners is a clue that these keys should be used only for signing */
|
||||
const ( //signing key banners
|
||||
P256PrivateKeyBanner = "NEBULA P256 PRIVATE KEY"
|
||||
P256PublicKeyBanner = "NEBULA P256 PUBLIC KEY"
|
||||
EncryptedECDSAP256PrivateKeyBanner = "NEBULA ECDSA P256 ENCRYPTED PRIVATE KEY"
|
||||
ECDSAP256PrivateKeyBanner = "NEBULA ECDSA P256 PRIVATE KEY"
|
||||
ECDSAP256PublicKeyBanner = "NEBULA ECDSA P256 PUBLIC KEY"
|
||||
EncryptedEd25519PrivateKeyBanner = "NEBULA ED25519 ENCRYPTED PRIVATE KEY"
|
||||
Ed25519PrivateKeyBanner = "NEBULA ED25519 PRIVATE KEY"
|
||||
Ed25519PublicKeyBanner = "NEBULA ED25519 PUBLIC KEY"
|
||||
)
|
||||
|
||||
// UnmarshalCertificateFromPEM will try to unmarshal the first pem block in a byte array, returning any non consumed
|
||||
@@ -58,16 +51,6 @@ func UnmarshalCertificateFromPEM(b []byte) (Certificate, []byte, error) {
|
||||
|
||||
}
|
||||
|
||||
func marshalCertPublicKeyToPEM(c Certificate) []byte {
|
||||
if c.IsCA() {
|
||||
return MarshalSigningPublicKeyToPEM(c.Curve(), c.PublicKey())
|
||||
} else {
|
||||
return MarshalPublicKeyToPEM(c.Curve(), c.PublicKey())
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalPublicKeyToPEM returns a PEM representation of a public key used for ECDH.
|
||||
// if your public key came from a certificate, prefer Certificate.PublicKeyPEM() if possible, to avoid mistakes!
|
||||
func MarshalPublicKeyToPEM(curve Curve, b []byte) []byte {
|
||||
switch curve {
|
||||
case Curve_CURVE25519:
|
||||
@@ -79,19 +62,6 @@ func MarshalPublicKeyToPEM(curve Curve, b []byte) []byte {
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalSigningPublicKeyToPEM returns a PEM representation of a public key used for signing.
|
||||
// if your public key came from a certificate, prefer Certificate.PublicKeyPEM() if possible, to avoid mistakes!
|
||||
func MarshalSigningPublicKeyToPEM(curve Curve, b []byte) []byte {
|
||||
switch curve {
|
||||
case Curve_CURVE25519:
|
||||
return pem.EncodeToMemory(&pem.Block{Type: Ed25519PublicKeyBanner, Bytes: b})
|
||||
case Curve_P256:
|
||||
return pem.EncodeToMemory(&pem.Block{Type: P256PublicKeyBanner, Bytes: b})
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func UnmarshalPublicKeyFromPEM(b []byte) ([]byte, []byte, Curve, error) {
|
||||
k, r := pem.Decode(b)
|
||||
if k == nil {
|
||||
@@ -103,7 +73,7 @@ func UnmarshalPublicKeyFromPEM(b []byte) ([]byte, []byte, Curve, error) {
|
||||
case X25519PublicKeyBanner, Ed25519PublicKeyBanner:
|
||||
expectedLen = 32
|
||||
curve = Curve_CURVE25519
|
||||
case P256PublicKeyBanner, ECDSAP256PublicKeyBanner:
|
||||
case P256PublicKeyBanner:
|
||||
// Uncompressed
|
||||
expectedLen = 65
|
||||
curve = Curve_P256
|
||||
|
||||
@@ -177,7 +177,6 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
}
|
||||
|
||||
func TestUnmarshalPublicKeyFromPEM(t *testing.T) {
|
||||
t.Parallel()
|
||||
pubKey := []byte(`# A good key
|
||||
-----BEGIN NEBULA ED25519 PUBLIC KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
@@ -231,7 +230,6 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
}
|
||||
|
||||
func TestUnmarshalX25519PublicKey(t *testing.T) {
|
||||
t.Parallel()
|
||||
pubKey := []byte(`# A good key
|
||||
-----BEGIN NEBULA X25519 PUBLIC KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
@@ -242,12 +240,6 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAA=
|
||||
-----END NEBULA P256 PUBLIC KEY-----
|
||||
`)
|
||||
oldPubP256Key := []byte(`# A good key
|
||||
-----BEGIN NEBULA ECDSA P256 PUBLIC KEY-----
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA
|
||||
AAAAAAAAAAAAAAAAAAAAAAA=
|
||||
-----END NEBULA ECDSA P256 PUBLIC KEY-----
|
||||
`)
|
||||
shortKey := []byte(`# A short key
|
||||
-----BEGIN NEBULA X25519 PUBLIC KEY-----
|
||||
@@ -264,22 +256,15 @@ AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=
|
||||
-END NEBULA X25519 PUBLIC KEY-----`)
|
||||
|
||||
keyBundle := appendByteSlices(pubKey, pubP256Key, oldPubP256Key, shortKey, invalidBanner, invalidPem)
|
||||
keyBundle := appendByteSlices(pubKey, pubP256Key, shortKey, invalidBanner, invalidPem)
|
||||
|
||||
// Success test case
|
||||
k, rest, curve, err := UnmarshalPublicKeyFromPEM(keyBundle)
|
||||
assert.Len(t, k, 32)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, rest, appendByteSlices(pubP256Key, oldPubP256Key, shortKey, invalidBanner, invalidPem))
|
||||
assert.Equal(t, rest, appendByteSlices(pubP256Key, shortKey, invalidBanner, invalidPem))
|
||||
assert.Equal(t, Curve_CURVE25519, curve)
|
||||
|
||||
// Success test case
|
||||
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
|
||||
assert.Len(t, k, 65)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, rest, appendByteSlices(oldPubP256Key, shortKey, invalidBanner, invalidPem))
|
||||
assert.Equal(t, Curve_P256, curve)
|
||||
|
||||
// Success test case
|
||||
k, rest, curve, err = UnmarshalPublicKeyFromPEM(rest)
|
||||
assert.Len(t, k, 65)
|
||||
|
||||
12
cert/sign.go
12
cert/sign.go
@@ -7,6 +7,7 @@ import (
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"net/netip"
|
||||
"time"
|
||||
)
|
||||
@@ -54,10 +55,15 @@ func (t *TBSCertificate) Sign(signer Certificate, curve Curve, key []byte) (Cert
|
||||
}
|
||||
return t.SignWith(signer, curve, sp)
|
||||
case Curve_P256:
|
||||
pk, err := ecdsa.ParseRawPrivateKey(elliptic.P256(), key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
pk := &ecdsa.PrivateKey{
|
||||
PublicKey: ecdsa.PublicKey{
|
||||
Curve: elliptic.P256(),
|
||||
},
|
||||
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L95
|
||||
D: new(big.Int).SetBytes(key),
|
||||
}
|
||||
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L119
|
||||
pk.X, pk.Y = pk.Curve.ScalarBaseMult(key)
|
||||
sp := func(certBytes []byte) ([]byte, error) {
|
||||
// We need to hash first for ECDSA
|
||||
// - https://pkg.go.dev/crypto/ecdsa#SignASN1
|
||||
|
||||
@@ -114,33 +114,6 @@ func NewTestCert(v cert.Version, curve cert.Curve, ca cert.Certificate, key []by
|
||||
return c, pub, cert.MarshalPrivateKeyToPEM(curve, priv), pem
|
||||
}
|
||||
|
||||
func NewTestCertDifferentVersion(c cert.Certificate, v cert.Version, ca cert.Certificate, key []byte) (cert.Certificate, []byte) {
|
||||
nc := &cert.TBSCertificate{
|
||||
Version: v,
|
||||
Curve: c.Curve(),
|
||||
Name: c.Name(),
|
||||
Networks: c.Networks(),
|
||||
UnsafeNetworks: c.UnsafeNetworks(),
|
||||
Groups: c.Groups(),
|
||||
NotBefore: time.Unix(c.NotBefore().Unix(), 0),
|
||||
NotAfter: time.Unix(c.NotAfter().Unix(), 0),
|
||||
PublicKey: c.PublicKey(),
|
||||
IsCA: false,
|
||||
}
|
||||
|
||||
c, err := nc.Sign(ca, ca.Curve(), key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pem, err := c.MarshalPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return c, pem
|
||||
}
|
||||
|
||||
func X25519Keypair() ([]byte, []byte) {
|
||||
privkey := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
||||
|
||||
@@ -90,26 +90,26 @@ func Test_ca(t *testing.T) {
|
||||
assertHelpError(t, ca(
|
||||
[]string{"-version", "1", "-out-key", "nope", "-out-crt", "nope", "duration", "100m"}, ob, eb, nopw,
|
||||
), "-name is required")
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// ipv4 only ips
|
||||
assertHelpError(t, ca([]string{"-version", "1", "-name", "ipv6", "-ips", "100::100/100"}, ob, eb, nopw), "invalid -networks definition: v1 certificates can only be ipv4, have 100::100/100")
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// ipv4 only subnets
|
||||
assertHelpError(t, ca([]string{"-version", "1", "-name", "ipv6", "-subnets", "100::100/100"}, ob, eb, nopw), "invalid -unsafe-networks definition: v1 certificates can only be ipv4, have 100::100/100")
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// failed key write
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args := []string{"-version", "1", "-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", "/do/not/write/pleasekey"}
|
||||
require.EqualError(t, ca(args, ob, eb, nopw), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// create temp key file
|
||||
keyF, err := os.CreateTemp("", "test.key")
|
||||
@@ -121,8 +121,8 @@ func Test_ca(t *testing.T) {
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-name", "test", "-duration", "100m", "-out-crt", "/do/not/write/pleasecrt", "-out-key", keyF.Name()}
|
||||
require.EqualError(t, ca(args, ob, eb, nopw), "error while writing out-crt: open /do/not/write/pleasecrt: "+NoSuchDirError)
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// create temp cert file
|
||||
crtF, err := os.CreateTemp("", "test.crt")
|
||||
@@ -135,8 +135,8 @@ func Test_ca(t *testing.T) {
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||
require.NoError(t, ca(args, ob, eb, nopw))
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// read cert and key files
|
||||
rb, _ := os.ReadFile(keyF.Name())
|
||||
@@ -158,7 +158,7 @@ func Test_ca(t *testing.T) {
|
||||
assert.Empty(t, lCrt.UnsafeNetworks())
|
||||
assert.Len(t, lCrt.PublicKey(), 32)
|
||||
assert.Equal(t, time.Duration(time.Minute*100), lCrt.NotAfter().Sub(lCrt.NotBefore()))
|
||||
assert.Empty(t, lCrt.Issuer())
|
||||
assert.Equal(t, "", lCrt.Issuer())
|
||||
assert.True(t, lCrt.CheckSignature(lCrt.PublicKey()))
|
||||
|
||||
// test encrypted key
|
||||
@@ -169,7 +169,7 @@ func Test_ca(t *testing.T) {
|
||||
args = []string{"-version", "1", "-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||
require.NoError(t, ca(args, ob, eb, testpw))
|
||||
assert.Equal(t, pwPromptOb, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// read encrypted key file and verify default params
|
||||
rb, _ = os.ReadFile(keyF.Name())
|
||||
@@ -197,7 +197,7 @@ func Test_ca(t *testing.T) {
|
||||
args = []string{"-version", "1", "-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||
require.Error(t, ca(args, ob, eb, errpw))
|
||||
assert.Equal(t, pwPromptOb, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// test when user fails to enter a password
|
||||
os.Remove(keyF.Name())
|
||||
@@ -207,7 +207,7 @@ func Test_ca(t *testing.T) {
|
||||
args = []string{"-version", "1", "-encrypt", "-name", "test", "-duration", "100m", "-groups", "1,2,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||
require.EqualError(t, ca(args, ob, eb, nopw), "no passphrase specified, remove -encrypt flag to write out-key in plaintext")
|
||||
assert.Equal(t, strings.Repeat(pwPromptOb, 5), ob.String()) // prompts 5 times before giving up
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// create valid cert/key for overwrite tests
|
||||
os.Remove(keyF.Name())
|
||||
@@ -222,8 +222,8 @@ func Test_ca(t *testing.T) {
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||
require.EqualError(t, ca(args, ob, eb, nopw), "refusing to overwrite existing CA key: "+keyF.Name())
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// test that we won't overwrite existing key file
|
||||
os.Remove(keyF.Name())
|
||||
@@ -231,8 +231,8 @@ func Test_ca(t *testing.T) {
|
||||
eb.Reset()
|
||||
args = []string{"-version", "1", "-name", "test", "-duration", "100m", "-groups", "1,, 2 , ,,,3,4,5", "-out-crt", crtF.Name(), "-out-key", keyF.Name()}
|
||||
require.EqualError(t, ca(args, ob, eb, nopw), "refusing to overwrite existing CA cert: "+crtF.Name())
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
os.Remove(keyF.Name())
|
||||
|
||||
}
|
||||
|
||||
@@ -37,20 +37,20 @@ func Test_keygen(t *testing.T) {
|
||||
|
||||
// required args
|
||||
assertHelpError(t, keygen([]string{"-out-pub", "nope"}, ob, eb), "-out-key is required")
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
assertHelpError(t, keygen([]string{"-out-key", "nope"}, ob, eb), "-out-pub is required")
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// failed key write
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
args := []string{"-out-pub", "/do/not/write/pleasepub", "-out-key", "/do/not/write/pleasekey"}
|
||||
require.EqualError(t, keygen(args, ob, eb), "error while writing out-key: open /do/not/write/pleasekey: "+NoSuchDirError)
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// create temp key file
|
||||
keyF, err := os.CreateTemp("", "test.key")
|
||||
@@ -62,8 +62,8 @@ func Test_keygen(t *testing.T) {
|
||||
eb.Reset()
|
||||
args = []string{"-out-pub", "/do/not/write/pleasepub", "-out-key", keyF.Name()}
|
||||
require.EqualError(t, keygen(args, ob, eb), "error while writing out-pub: open /do/not/write/pleasepub: "+NoSuchDirError)
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// create temp pub file
|
||||
pubF, err := os.CreateTemp("", "test.pub")
|
||||
@@ -75,8 +75,8 @@ func Test_keygen(t *testing.T) {
|
||||
eb.Reset()
|
||||
args = []string{"-out-pub", pubF.Name(), "-out-key", keyF.Name()}
|
||||
require.NoError(t, keygen(args, ob, eb))
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// read cert and key files
|
||||
rb, _ := os.ReadFile(keyF.Name())
|
||||
|
||||
@@ -17,7 +17,7 @@ func (he *helpError) Error() string {
|
||||
return he.s
|
||||
}
|
||||
|
||||
func newHelpErrorf(s string, v ...any) error {
|
||||
func newHelpErrorf(s string, v ...interface{}) error {
|
||||
return &helpError{s: fmt.Sprintf(s, v...)}
|
||||
}
|
||||
|
||||
|
||||
@@ -43,16 +43,16 @@ func Test_printCert(t *testing.T) {
|
||||
|
||||
// no path
|
||||
err := printCert([]string{}, ob, eb)
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
assertHelpError(t, err, "-path is required")
|
||||
|
||||
// no cert at path
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
err = printCert([]string{"-path", "does_not_exist"}, ob, eb)
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
require.EqualError(t, err, "unable to read cert; open does_not_exist: "+NoSuchFileError)
|
||||
|
||||
// invalid cert at path
|
||||
@@ -64,8 +64,8 @@ func Test_printCert(t *testing.T) {
|
||||
|
||||
tf.WriteString("-----BEGIN NOPE-----")
|
||||
err = printCert([]string{"-path", tf.Name()}, ob, eb)
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
require.EqualError(t, err, "error while unmarshaling cert: input did not contain a valid PEM encoded block")
|
||||
|
||||
// test multiple certs
|
||||
@@ -155,7 +155,7 @@ func Test_printCert(t *testing.T) {
|
||||
`,
|
||||
ob.String(),
|
||||
)
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// test json
|
||||
ob.Reset()
|
||||
@@ -177,7 +177,7 @@ func Test_printCert(t *testing.T) {
|
||||
`,
|
||||
ob.String(),
|
||||
)
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
}
|
||||
|
||||
// NewTestCaCert will generate a CA cert
|
||||
|
||||
@@ -38,19 +38,19 @@ func Test_verify(t *testing.T) {
|
||||
|
||||
// required args
|
||||
assertHelpError(t, verify([]string{"-ca", "derp"}, ob, eb), "-crt is required")
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
assertHelpError(t, verify([]string{"-crt", "derp"}, ob, eb), "-ca is required")
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// no ca at path
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
err := verify([]string{"-ca", "does_not_exist", "-crt", "does_not_exist"}, ob, eb)
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
require.EqualError(t, err, "error while reading ca: open does_not_exist: "+NoSuchFileError)
|
||||
|
||||
// invalid ca at path
|
||||
@@ -62,8 +62,8 @@ func Test_verify(t *testing.T) {
|
||||
|
||||
caFile.WriteString("-----BEGIN NOPE-----")
|
||||
err = verify([]string{"-ca", caFile.Name(), "-crt", "does_not_exist"}, ob, eb)
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
require.EqualError(t, err, "error while adding ca cert to pool: input did not contain a valid PEM encoded block")
|
||||
|
||||
// make a ca for later
|
||||
@@ -76,8 +76,8 @@ func Test_verify(t *testing.T) {
|
||||
|
||||
// no crt at path
|
||||
err = verify([]string{"-ca", caFile.Name(), "-crt", "does_not_exist"}, ob, eb)
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
require.EqualError(t, err, "unable to read crt: open does_not_exist: "+NoSuchFileError)
|
||||
|
||||
// invalid crt at path
|
||||
@@ -89,8 +89,8 @@ func Test_verify(t *testing.T) {
|
||||
|
||||
certFile.WriteString("-----BEGIN NOPE-----")
|
||||
err = verify([]string{"-ca", caFile.Name(), "-crt", certFile.Name()}, ob, eb)
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
require.EqualError(t, err, "error while parsing crt: input did not contain a valid PEM encoded block")
|
||||
|
||||
// unverifiable cert at path
|
||||
@@ -106,8 +106,8 @@ func Test_verify(t *testing.T) {
|
||||
certFile.Write(b)
|
||||
|
||||
err = verify([]string{"-ca", caFile.Name(), "-crt", certFile.Name()}, ob, eb)
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
require.ErrorIs(t, err, cert.ErrSignatureMismatch)
|
||||
|
||||
// verified cert at path
|
||||
@@ -118,7 +118,7 @@ func Test_verify(t *testing.T) {
|
||||
certFile.Write(b)
|
||||
|
||||
err = verify([]string{"-ca", caFile.Name(), "-crt", certFile.Name()}, ob, eb)
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Empty(t, eb.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
assert.Equal(t, "", eb.String())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
@@ -3,9 +3,6 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -61,10 +58,6 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
go func() {
|
||||
log.Println(http.ListenAndServe("0.0.0.0:6060", nil))
|
||||
}()
|
||||
|
||||
if !*configTest {
|
||||
ctrl.Start()
|
||||
notifyReady(l)
|
||||
|
||||
@@ -17,14 +17,14 @@ import (
|
||||
|
||||
"dario.cat/mergo"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.yaml.in/yaml/v3"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type C struct {
|
||||
path string
|
||||
files []string
|
||||
Settings map[string]any
|
||||
oldSettings map[string]any
|
||||
Settings map[interface{}]interface{}
|
||||
oldSettings map[interface{}]interface{}
|
||||
callbacks []func(*C)
|
||||
l *logrus.Logger
|
||||
reloadLock sync.Mutex
|
||||
@@ -32,7 +32,7 @@ type C struct {
|
||||
|
||||
func NewC(l *logrus.Logger) *C {
|
||||
return &C{
|
||||
Settings: make(map[string]any),
|
||||
Settings: make(map[interface{}]interface{}),
|
||||
l: l,
|
||||
}
|
||||
}
|
||||
@@ -92,8 +92,8 @@ func (c *C) HasChanged(k string) bool {
|
||||
}
|
||||
|
||||
var (
|
||||
nv any
|
||||
ov any
|
||||
nv interface{}
|
||||
ov interface{}
|
||||
)
|
||||
|
||||
if k == "" {
|
||||
@@ -147,7 +147,7 @@ func (c *C) ReloadConfig() {
|
||||
c.reloadLock.Lock()
|
||||
defer c.reloadLock.Unlock()
|
||||
|
||||
c.oldSettings = make(map[string]any)
|
||||
c.oldSettings = make(map[interface{}]interface{})
|
||||
for k, v := range c.Settings {
|
||||
c.oldSettings[k] = v
|
||||
}
|
||||
@@ -167,7 +167,7 @@ func (c *C) ReloadConfigString(raw string) error {
|
||||
c.reloadLock.Lock()
|
||||
defer c.reloadLock.Unlock()
|
||||
|
||||
c.oldSettings = make(map[string]any)
|
||||
c.oldSettings = make(map[interface{}]interface{})
|
||||
for k, v := range c.Settings {
|
||||
c.oldSettings[k] = v
|
||||
}
|
||||
@@ -201,7 +201,7 @@ func (c *C) GetStringSlice(k string, d []string) []string {
|
||||
return d
|
||||
}
|
||||
|
||||
rv, ok := r.([]any)
|
||||
rv, ok := r.([]interface{})
|
||||
if !ok {
|
||||
return d
|
||||
}
|
||||
@@ -215,13 +215,13 @@ func (c *C) GetStringSlice(k string, d []string) []string {
|
||||
}
|
||||
|
||||
// GetMap will get the map for k or return the default d if not found or invalid
|
||||
func (c *C) GetMap(k string, d map[string]any) map[string]any {
|
||||
func (c *C) GetMap(k string, d map[interface{}]interface{}) map[interface{}]interface{} {
|
||||
r := c.Get(k)
|
||||
if r == nil {
|
||||
return d
|
||||
}
|
||||
|
||||
v, ok := r.(map[string]any)
|
||||
v, ok := r.(map[interface{}]interface{})
|
||||
if !ok {
|
||||
return d
|
||||
}
|
||||
@@ -243,7 +243,7 @@ func (c *C) GetInt(k string, d int) int {
|
||||
// GetUint32 will get the uint32 for k or return the default d if not found or invalid
|
||||
func (c *C) GetUint32(k string, d uint32) uint32 {
|
||||
r := c.GetInt(k, int(d))
|
||||
if r < 0 || uint64(r) > uint64(math.MaxUint32) {
|
||||
if uint64(r) > uint64(math.MaxUint32) {
|
||||
return d
|
||||
}
|
||||
return uint32(r)
|
||||
@@ -266,22 +266,6 @@ func (c *C) GetBool(k string, d bool) bool {
|
||||
return v
|
||||
}
|
||||
|
||||
func AsBool(v any) (value bool, ok bool) {
|
||||
switch x := v.(type) {
|
||||
case bool:
|
||||
return x, true
|
||||
case string:
|
||||
switch x {
|
||||
case "y", "yes":
|
||||
return true, true
|
||||
case "n", "no":
|
||||
return false, true
|
||||
}
|
||||
}
|
||||
|
||||
return false, false
|
||||
}
|
||||
|
||||
// GetDuration will get the duration for k or return the default d if not found or invalid
|
||||
func (c *C) GetDuration(k string, d time.Duration) time.Duration {
|
||||
r := c.GetString(k, "")
|
||||
@@ -292,7 +276,7 @@ func (c *C) GetDuration(k string, d time.Duration) time.Duration {
|
||||
return v
|
||||
}
|
||||
|
||||
func (c *C) Get(k string) any {
|
||||
func (c *C) Get(k string) interface{} {
|
||||
return c.get(k, c.Settings)
|
||||
}
|
||||
|
||||
@@ -300,10 +284,10 @@ func (c *C) IsSet(k string) bool {
|
||||
return c.get(k, c.Settings) != nil
|
||||
}
|
||||
|
||||
func (c *C) get(k string, v any) any {
|
||||
func (c *C) get(k string, v interface{}) interface{} {
|
||||
parts := strings.Split(k, ".")
|
||||
for _, p := range parts {
|
||||
m, ok := v.(map[string]any)
|
||||
m, ok := v.(map[interface{}]interface{})
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
@@ -362,7 +346,7 @@ func (c *C) addFile(path string, direct bool) error {
|
||||
}
|
||||
|
||||
func (c *C) parseRaw(b []byte) error {
|
||||
var m map[string]any
|
||||
var m map[interface{}]interface{}
|
||||
|
||||
err := yaml.Unmarshal(b, &m)
|
||||
if err != nil {
|
||||
@@ -374,7 +358,7 @@ func (c *C) parseRaw(b []byte) error {
|
||||
}
|
||||
|
||||
func (c *C) parse() error {
|
||||
var m map[string]any
|
||||
var m map[interface{}]interface{}
|
||||
|
||||
for _, path := range c.files {
|
||||
b, err := os.ReadFile(path)
|
||||
@@ -382,7 +366,7 @@ func (c *C) parse() error {
|
||||
return err
|
||||
}
|
||||
|
||||
var nm map[string]any
|
||||
var nm map[interface{}]interface{}
|
||||
err = yaml.Unmarshal(b, &nm)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"github.com/slackhq/nebula/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.yaml.in/yaml/v3"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
func TestConfig_Load(t *testing.T) {
|
||||
@@ -19,7 +19,7 @@ func TestConfig_Load(t *testing.T) {
|
||||
// invalid yaml
|
||||
c := NewC(l)
|
||||
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644)
|
||||
require.EqualError(t, c.Load(dir), "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into map[string]interface {}")
|
||||
require.EqualError(t, c.Load(dir), "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into map[interface {}]interface {}")
|
||||
|
||||
// simple multi config merge
|
||||
c = NewC(l)
|
||||
@@ -31,8 +31,8 @@ func TestConfig_Load(t *testing.T) {
|
||||
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
||||
os.WriteFile(filepath.Join(dir, "02.yml"), []byte("outer:\n inner: override\nnew: hi"), 0644)
|
||||
require.NoError(t, c.Load(dir))
|
||||
expected := map[string]any{
|
||||
"outer": map[string]any{
|
||||
expected := map[interface{}]interface{}{
|
||||
"outer": map[interface{}]interface{}{
|
||||
"inner": "override",
|
||||
},
|
||||
"new": "hi",
|
||||
@@ -44,12 +44,12 @@ func TestConfig_Get(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
// test simple type
|
||||
c := NewC(l)
|
||||
c.Settings["firewall"] = map[string]any{"outbound": "hi"}
|
||||
c.Settings["firewall"] = map[interface{}]interface{}{"outbound": "hi"}
|
||||
assert.Equal(t, "hi", c.Get("firewall.outbound"))
|
||||
|
||||
// test complex type
|
||||
inner := []map[string]any{{"port": "1", "code": "2"}}
|
||||
c.Settings["firewall"] = map[string]any{"outbound": inner}
|
||||
inner := []map[interface{}]interface{}{{"port": "1", "code": "2"}}
|
||||
c.Settings["firewall"] = map[interface{}]interface{}{"outbound": inner}
|
||||
assert.EqualValues(t, inner, c.Get("firewall.outbound"))
|
||||
|
||||
// test missing
|
||||
@@ -59,7 +59,7 @@ func TestConfig_Get(t *testing.T) {
|
||||
func TestConfig_GetStringSlice(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
c := NewC(l)
|
||||
c.Settings["slice"] = []any{"one", "two"}
|
||||
c.Settings["slice"] = []interface{}{"one", "two"}
|
||||
assert.Equal(t, []string{"one", "two"}, c.GetStringSlice("slice", []string{}))
|
||||
}
|
||||
|
||||
@@ -101,14 +101,14 @@ func TestConfig_HasChanged(t *testing.T) {
|
||||
// Test key change
|
||||
c = NewC(l)
|
||||
c.Settings["test"] = "hi"
|
||||
c.oldSettings = map[string]any{"test": "no"}
|
||||
c.oldSettings = map[interface{}]interface{}{"test": "no"}
|
||||
assert.True(t, c.HasChanged("test"))
|
||||
assert.True(t, c.HasChanged(""))
|
||||
|
||||
// No key change
|
||||
c = NewC(l)
|
||||
c.Settings["test"] = "hi"
|
||||
c.oldSettings = map[string]any{"test": "hi"}
|
||||
c.oldSettings = map[interface{}]interface{}{"test": "hi"}
|
||||
assert.False(t, c.HasChanged("test"))
|
||||
assert.False(t, c.HasChanged(""))
|
||||
}
|
||||
@@ -184,11 +184,11 @@ firewall:
|
||||
`),
|
||||
}
|
||||
|
||||
var m map[string]any
|
||||
var m map[any]any
|
||||
|
||||
// merge the same way config.parse() merges
|
||||
for _, b := range configs {
|
||||
var nm map[string]any
|
||||
var nm map[any]any
|
||||
err := yaml.Unmarshal(b, &nm)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -205,15 +205,15 @@ firewall:
|
||||
t.Logf("Merged Config as YAML:\n%s", mYaml)
|
||||
|
||||
// If a bug is present, some items might be replaced instead of merged like we expect
|
||||
expected := map[string]any{
|
||||
"firewall": map[string]any{
|
||||
expected := map[any]any{
|
||||
"firewall": map[any]any{
|
||||
"inbound": []any{
|
||||
map[string]any{"host": "any", "port": "any", "proto": "icmp"},
|
||||
map[string]any{"groups": []any{"server"}, "port": 443, "proto": "tcp"},
|
||||
map[string]any{"groups": []any{"webapp"}, "port": 443, "proto": "tcp"}},
|
||||
map[any]any{"host": "any", "port": "any", "proto": "icmp"},
|
||||
map[any]any{"groups": []any{"server"}, "port": 443, "proto": "tcp"},
|
||||
map[any]any{"groups": []any{"webapp"}, "port": 443, "proto": "tcp"}},
|
||||
"outbound": []any{
|
||||
map[string]any{"host": "any", "port": "any", "proto": "any"}}},
|
||||
"listen": map[string]any{
|
||||
map[any]any{"host": "any", "port": "any", "proto": "any"}}},
|
||||
"listen": map[any]any{
|
||||
"host": "0.0.0.0",
|
||||
"port": 4242,
|
||||
},
|
||||
|
||||
@@ -4,16 +4,13 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/header"
|
||||
)
|
||||
|
||||
@@ -30,124 +27,130 @@ const (
|
||||
)
|
||||
|
||||
type connectionManager struct {
|
||||
in map[uint32]struct{}
|
||||
inLock *sync.RWMutex
|
||||
|
||||
out map[uint32]struct{}
|
||||
outLock *sync.RWMutex
|
||||
|
||||
// relayUsed holds which relay localIndexs are in use
|
||||
relayUsed map[uint32]struct{}
|
||||
relayUsedLock *sync.RWMutex
|
||||
|
||||
hostMap *HostMap
|
||||
trafficTimer *LockingTimerWheel[uint32]
|
||||
intf *Interface
|
||||
punchy *Punchy
|
||||
|
||||
// Configuration settings
|
||||
hostMap *HostMap
|
||||
trafficTimer *LockingTimerWheel[uint32]
|
||||
intf *Interface
|
||||
pendingDeletion map[uint32]struct{}
|
||||
punchy *Punchy
|
||||
checkInterval time.Duration
|
||||
pendingDeletionInterval time.Duration
|
||||
inactivityTimeout atomic.Int64
|
||||
dropInactive atomic.Bool
|
||||
|
||||
metricsTxPunchy metrics.Counter
|
||||
metricsTxPunchy metrics.Counter
|
||||
|
||||
l *logrus.Logger
|
||||
}
|
||||
|
||||
func newConnectionManagerFromConfig(l *logrus.Logger, c *config.C, hm *HostMap, p *Punchy) *connectionManager {
|
||||
cm := &connectionManager{
|
||||
hostMap: hm,
|
||||
l: l,
|
||||
punchy: p,
|
||||
relayUsed: make(map[uint32]struct{}),
|
||||
relayUsedLock: &sync.RWMutex{},
|
||||
metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
|
||||
func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval time.Duration, punchy *Punchy) *connectionManager {
|
||||
var max time.Duration
|
||||
if checkInterval < pendingDeletionInterval {
|
||||
max = pendingDeletionInterval
|
||||
} else {
|
||||
max = checkInterval
|
||||
}
|
||||
|
||||
cm.reload(c, true)
|
||||
c.RegisterReloadCallback(func(c *config.C) {
|
||||
cm.reload(c, false)
|
||||
})
|
||||
|
||||
return cm
|
||||
}
|
||||
|
||||
func (cm *connectionManager) reload(c *config.C, initial bool) {
|
||||
if initial {
|
||||
cm.checkInterval = time.Duration(c.GetInt("timers.connection_alive_interval", 5)) * time.Second
|
||||
cm.pendingDeletionInterval = time.Duration(c.GetInt("timers.pending_deletion_interval", 10)) * time.Second
|
||||
|
||||
// We want at least a minimum resolution of 500ms per tick so that we can hit these intervals
|
||||
// pretty close to their configured duration.
|
||||
// The inactivity duration is checked each time a hostinfo ticks through so we don't need the wheel to contain it.
|
||||
minDuration := min(time.Millisecond*500, cm.checkInterval, cm.pendingDeletionInterval)
|
||||
maxDuration := max(cm.checkInterval, cm.pendingDeletionInterval)
|
||||
cm.trafficTimer = NewLockingTimerWheel[uint32](minDuration, maxDuration)
|
||||
nc := &connectionManager{
|
||||
hostMap: intf.hostMap,
|
||||
in: make(map[uint32]struct{}),
|
||||
inLock: &sync.RWMutex{},
|
||||
out: make(map[uint32]struct{}),
|
||||
outLock: &sync.RWMutex{},
|
||||
relayUsed: make(map[uint32]struct{}),
|
||||
relayUsedLock: &sync.RWMutex{},
|
||||
trafficTimer: NewLockingTimerWheel[uint32](time.Millisecond*500, max),
|
||||
intf: intf,
|
||||
pendingDeletion: make(map[uint32]struct{}),
|
||||
checkInterval: checkInterval,
|
||||
pendingDeletionInterval: pendingDeletionInterval,
|
||||
punchy: punchy,
|
||||
metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
|
||||
l: l,
|
||||
}
|
||||
|
||||
if initial || c.HasChanged("tunnels.inactivity_timeout") {
|
||||
old := cm.getInactivityTimeout()
|
||||
cm.inactivityTimeout.Store((int64)(c.GetDuration("tunnels.inactivity_timeout", 10*time.Minute)))
|
||||
if !initial {
|
||||
cm.l.WithField("oldDuration", old).
|
||||
WithField("newDuration", cm.getInactivityTimeout()).
|
||||
Info("Inactivity timeout has changed")
|
||||
}
|
||||
}
|
||||
|
||||
if initial || c.HasChanged("tunnels.drop_inactive") {
|
||||
old := cm.dropInactive.Load()
|
||||
cm.dropInactive.Store(c.GetBool("tunnels.drop_inactive", false))
|
||||
if !initial {
|
||||
cm.l.WithField("oldBool", old).
|
||||
WithField("newBool", cm.dropInactive.Load()).
|
||||
Info("Drop inactive setting has changed")
|
||||
}
|
||||
}
|
||||
nc.Start(ctx)
|
||||
return nc
|
||||
}
|
||||
|
||||
func (cm *connectionManager) getInactivityTimeout() time.Duration {
|
||||
return (time.Duration)(cm.inactivityTimeout.Load())
|
||||
}
|
||||
|
||||
func (cm *connectionManager) In(h *HostInfo) {
|
||||
h.in.Store(true)
|
||||
}
|
||||
|
||||
func (cm *connectionManager) Out(h *HostInfo) {
|
||||
h.out.Store(true)
|
||||
}
|
||||
|
||||
func (cm *connectionManager) RelayUsed(localIndex uint32) {
|
||||
cm.relayUsedLock.RLock()
|
||||
func (n *connectionManager) In(localIndex uint32) {
|
||||
n.inLock.RLock()
|
||||
// If this already exists, return
|
||||
if _, ok := cm.relayUsed[localIndex]; ok {
|
||||
cm.relayUsedLock.RUnlock()
|
||||
if _, ok := n.in[localIndex]; ok {
|
||||
n.inLock.RUnlock()
|
||||
return
|
||||
}
|
||||
cm.relayUsedLock.RUnlock()
|
||||
cm.relayUsedLock.Lock()
|
||||
cm.relayUsed[localIndex] = struct{}{}
|
||||
cm.relayUsedLock.Unlock()
|
||||
n.inLock.RUnlock()
|
||||
n.inLock.Lock()
|
||||
n.in[localIndex] = struct{}{}
|
||||
n.inLock.Unlock()
|
||||
}
|
||||
|
||||
func (n *connectionManager) Out(localIndex uint32) {
|
||||
n.outLock.RLock()
|
||||
// If this already exists, return
|
||||
if _, ok := n.out[localIndex]; ok {
|
||||
n.outLock.RUnlock()
|
||||
return
|
||||
}
|
||||
n.outLock.RUnlock()
|
||||
n.outLock.Lock()
|
||||
n.out[localIndex] = struct{}{}
|
||||
n.outLock.Unlock()
|
||||
}
|
||||
|
||||
func (n *connectionManager) RelayUsed(localIndex uint32) {
|
||||
n.relayUsedLock.RLock()
|
||||
// If this already exists, return
|
||||
if _, ok := n.relayUsed[localIndex]; ok {
|
||||
n.relayUsedLock.RUnlock()
|
||||
return
|
||||
}
|
||||
n.relayUsedLock.RUnlock()
|
||||
n.relayUsedLock.Lock()
|
||||
n.relayUsed[localIndex] = struct{}{}
|
||||
n.relayUsedLock.Unlock()
|
||||
}
|
||||
|
||||
// getAndResetTrafficCheck returns if there was any inbound or outbound traffic within the last tick and
|
||||
// resets the state for this local index
|
||||
func (cm *connectionManager) getAndResetTrafficCheck(h *HostInfo, now time.Time) (bool, bool) {
|
||||
in := h.in.Swap(false)
|
||||
out := h.out.Swap(false)
|
||||
if in || out {
|
||||
h.lastUsed = now
|
||||
}
|
||||
func (n *connectionManager) getAndResetTrafficCheck(localIndex uint32) (bool, bool) {
|
||||
n.inLock.Lock()
|
||||
n.outLock.Lock()
|
||||
_, in := n.in[localIndex]
|
||||
_, out := n.out[localIndex]
|
||||
delete(n.in, localIndex)
|
||||
delete(n.out, localIndex)
|
||||
n.inLock.Unlock()
|
||||
n.outLock.Unlock()
|
||||
return in, out
|
||||
}
|
||||
|
||||
// AddTrafficWatch must be called for every new HostInfo.
|
||||
// We will continue to monitor the HostInfo until the tunnel is dropped.
|
||||
func (cm *connectionManager) AddTrafficWatch(h *HostInfo) {
|
||||
if h.out.Swap(true) == false {
|
||||
cm.trafficTimer.Add(h.localIndexId, cm.checkInterval)
|
||||
func (n *connectionManager) AddTrafficWatch(localIndex uint32) {
|
||||
// Use a write lock directly because it should be incredibly rare that we are ever already tracking this index
|
||||
n.outLock.Lock()
|
||||
if _, ok := n.out[localIndex]; ok {
|
||||
n.outLock.Unlock()
|
||||
return
|
||||
}
|
||||
n.out[localIndex] = struct{}{}
|
||||
n.trafficTimer.Add(localIndex, n.checkInterval)
|
||||
n.outLock.Unlock()
|
||||
}
|
||||
|
||||
func (cm *connectionManager) Start(ctx context.Context) {
|
||||
clockSource := time.NewTicker(cm.trafficTimer.t.tickDuration)
|
||||
func (n *connectionManager) Start(ctx context.Context) {
|
||||
go n.Run(ctx)
|
||||
}
|
||||
|
||||
func (n *connectionManager) Run(ctx context.Context) {
|
||||
//TODO: this tick should be based on the min wheel tick? Check firewall
|
||||
clockSource := time.NewTicker(500 * time.Millisecond)
|
||||
defer clockSource.Stop()
|
||||
|
||||
p := []byte("")
|
||||
@@ -160,61 +163,61 @@ func (cm *connectionManager) Start(ctx context.Context) {
|
||||
return
|
||||
|
||||
case now := <-clockSource.C:
|
||||
cm.trafficTimer.Advance(now)
|
||||
n.trafficTimer.Advance(now)
|
||||
for {
|
||||
localIndex, has := cm.trafficTimer.Purge()
|
||||
localIndex, has := n.trafficTimer.Purge()
|
||||
if !has {
|
||||
break
|
||||
}
|
||||
|
||||
cm.doTrafficCheck(localIndex, p, nb, out, now)
|
||||
n.doTrafficCheck(localIndex, p, nb, out, now)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cm *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
|
||||
decision, hostinfo, primary := cm.makeTrafficDecision(localIndex, now)
|
||||
func (n *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
|
||||
decision, hostinfo, primary := n.makeTrafficDecision(localIndex, now)
|
||||
|
||||
switch decision {
|
||||
case deleteTunnel:
|
||||
if cm.hostMap.DeleteHostInfo(hostinfo) {
|
||||
if n.hostMap.DeleteHostInfo(hostinfo) {
|
||||
// Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap
|
||||
cm.intf.lightHouse.DeleteVpnAddrs(hostinfo.vpnAddrs)
|
||||
n.intf.lightHouse.DeleteVpnAddrs(hostinfo.vpnAddrs)
|
||||
}
|
||||
|
||||
case closeTunnel:
|
||||
cm.intf.sendCloseTunnel(hostinfo)
|
||||
cm.intf.closeTunnel(hostinfo)
|
||||
n.intf.sendCloseTunnel(hostinfo)
|
||||
n.intf.closeTunnel(hostinfo)
|
||||
|
||||
case swapPrimary:
|
||||
cm.swapPrimary(hostinfo, primary)
|
||||
n.swapPrimary(hostinfo, primary)
|
||||
|
||||
case migrateRelays:
|
||||
cm.migrateRelayUsed(hostinfo, primary)
|
||||
n.migrateRelayUsed(hostinfo, primary)
|
||||
|
||||
case tryRehandshake:
|
||||
cm.tryRehandshake(hostinfo)
|
||||
n.tryRehandshake(hostinfo)
|
||||
|
||||
case sendTestPacket:
|
||||
cm.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
|
||||
n.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
|
||||
}
|
||||
|
||||
cm.resetRelayTrafficCheck(hostinfo)
|
||||
n.resetRelayTrafficCheck(hostinfo)
|
||||
}
|
||||
|
||||
func (cm *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) {
|
||||
func (n *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) {
|
||||
if hostinfo != nil {
|
||||
cm.relayUsedLock.Lock()
|
||||
defer cm.relayUsedLock.Unlock()
|
||||
n.relayUsedLock.Lock()
|
||||
defer n.relayUsedLock.Unlock()
|
||||
// No need to migrate any relays, delete usage info now.
|
||||
for _, idx := range hostinfo.relayState.CopyRelayForIdxs() {
|
||||
delete(cm.relayUsed, idx)
|
||||
delete(n.relayUsed, idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (cm *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) {
|
||||
func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) {
|
||||
relayFor := oldhostinfo.relayState.CopyAllRelayFor()
|
||||
|
||||
for _, r := range relayFor {
|
||||
@@ -224,51 +227,46 @@ func (cm *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo
|
||||
var relayFrom netip.Addr
|
||||
var relayTo netip.Addr
|
||||
switch {
|
||||
case ok:
|
||||
switch existing.State {
|
||||
case Established, PeerRequested, Disestablished:
|
||||
// This relay already exists in newhostinfo, then do nothing.
|
||||
continue
|
||||
case Requested:
|
||||
// The relay exists in a Requested state; re-send the request
|
||||
index = existing.LocalIndex
|
||||
switch r.Type {
|
||||
case TerminalType:
|
||||
relayFrom = cm.intf.myVpnAddrs[0]
|
||||
relayTo = existing.PeerAddr
|
||||
case ForwardingType:
|
||||
relayFrom = existing.PeerAddr
|
||||
relayTo = newhostinfo.vpnAddrs[0]
|
||||
default:
|
||||
// should never happen
|
||||
panic(fmt.Sprintf("Migrating unknown relay type: %v", r.Type))
|
||||
}
|
||||
case ok && existing.State == Established:
|
||||
// This relay already exists in newhostinfo, then do nothing.
|
||||
continue
|
||||
case ok && existing.State == Requested:
|
||||
// The relay exists in a Requested state; re-send the request
|
||||
index = existing.LocalIndex
|
||||
switch r.Type {
|
||||
case TerminalType:
|
||||
relayFrom = n.intf.myVpnAddrs[0]
|
||||
relayTo = existing.PeerAddr
|
||||
case ForwardingType:
|
||||
relayFrom = existing.PeerAddr
|
||||
relayTo = newhostinfo.vpnAddrs[0]
|
||||
default:
|
||||
// should never happen
|
||||
}
|
||||
case !ok:
|
||||
cm.relayUsedLock.RLock()
|
||||
if _, relayUsed := cm.relayUsed[r.LocalIndex]; !relayUsed {
|
||||
n.relayUsedLock.RLock()
|
||||
if _, relayUsed := n.relayUsed[r.LocalIndex]; !relayUsed {
|
||||
// The relay hasn't been used; don't migrate it.
|
||||
cm.relayUsedLock.RUnlock()
|
||||
n.relayUsedLock.RUnlock()
|
||||
continue
|
||||
}
|
||||
cm.relayUsedLock.RUnlock()
|
||||
n.relayUsedLock.RUnlock()
|
||||
// The relay doesn't exist at all; create some relay state and send the request.
|
||||
var err error
|
||||
index, err = AddRelay(cm.l, newhostinfo, cm.hostMap, r.PeerAddr, nil, r.Type, Requested)
|
||||
index, err = AddRelay(n.l, newhostinfo, n.hostMap, r.PeerAddr, nil, r.Type, Requested)
|
||||
if err != nil {
|
||||
cm.l.WithError(err).Error("failed to migrate relay to new hostinfo")
|
||||
n.l.WithError(err).Error("failed to migrate relay to new hostinfo")
|
||||
continue
|
||||
}
|
||||
switch r.Type {
|
||||
case TerminalType:
|
||||
relayFrom = cm.intf.myVpnAddrs[0]
|
||||
relayFrom = n.intf.myVpnAddrs[0]
|
||||
relayTo = r.PeerAddr
|
||||
case ForwardingType:
|
||||
relayFrom = r.PeerAddr
|
||||
relayTo = newhostinfo.vpnAddrs[0]
|
||||
default:
|
||||
// should never happen
|
||||
panic(fmt.Sprintf("Migrating unknown relay type: %v", r.Type))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -281,12 +279,12 @@ func (cm *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo
|
||||
switch newhostinfo.GetCert().Certificate.Version() {
|
||||
case cert.Version1:
|
||||
if !relayFrom.Is4() {
|
||||
cm.l.Error("can not migrate v1 relay with a v6 network because the relay is not running a current nebula version")
|
||||
n.l.Error("can not migrate v1 relay with a v6 network because the relay is not running a current nebula version")
|
||||
continue
|
||||
}
|
||||
|
||||
if !relayTo.Is4() {
|
||||
cm.l.Error("can not migrate v1 relay with a v6 remote network because the relay is not running a current nebula version")
|
||||
n.l.Error("can not migrate v1 relay with a v6 remote network because the relay is not running a current nebula version")
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -298,16 +296,16 @@ func (cm *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo
|
||||
req.RelayFromAddr = netAddrToProtoAddr(relayFrom)
|
||||
req.RelayToAddr = netAddrToProtoAddr(relayTo)
|
||||
default:
|
||||
newhostinfo.logger(cm.l).Error("Unknown certificate version found while attempting to migrate relay")
|
||||
newhostinfo.logger(n.l).Error("Unknown certificate version found while attempting to migrate relay")
|
||||
continue
|
||||
}
|
||||
|
||||
msg, err := req.Marshal()
|
||||
if err != nil {
|
||||
cm.l.WithError(err).Error("failed to marshal Control message to migrate relay")
|
||||
n.l.WithError(err).Error("failed to marshal Control message to migrate relay")
|
||||
} else {
|
||||
cm.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||
cm.l.WithFields(logrus.Fields{
|
||||
n.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||
n.l.WithFields(logrus.Fields{
|
||||
"relayFrom": req.RelayFromAddr,
|
||||
"relayTo": req.RelayToAddr,
|
||||
"initiatorRelayIndex": req.InitiatorRelayIndex,
|
||||
@@ -318,44 +316,46 @@ func (cm *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo
|
||||
}
|
||||
}
|
||||
|
||||
func (cm *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
|
||||
// Read lock the main hostmap to order decisions based on tunnels being the primary tunnel
|
||||
cm.hostMap.RLock()
|
||||
defer cm.hostMap.RUnlock()
|
||||
func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
|
||||
n.hostMap.RLock()
|
||||
defer n.hostMap.RUnlock()
|
||||
|
||||
hostinfo := cm.hostMap.Indexes[localIndex]
|
||||
hostinfo := n.hostMap.Indexes[localIndex]
|
||||
if hostinfo == nil {
|
||||
cm.l.WithField("localIndex", localIndex).Debugln("Not found in hostmap")
|
||||
n.l.WithField("localIndex", localIndex).Debugf("Not found in hostmap")
|
||||
delete(n.pendingDeletion, localIndex)
|
||||
return doNothing, nil, nil
|
||||
}
|
||||
|
||||
if cm.isInvalidCertificate(now, hostinfo) {
|
||||
if n.isInvalidCertificate(now, hostinfo) {
|
||||
delete(n.pendingDeletion, hostinfo.localIndexId)
|
||||
return closeTunnel, hostinfo, nil
|
||||
}
|
||||
|
||||
primary := cm.hostMap.Hosts[hostinfo.vpnAddrs[0]]
|
||||
primary := n.hostMap.Hosts[hostinfo.vpnAddrs[0]]
|
||||
mainHostInfo := true
|
||||
if primary != nil && primary != hostinfo {
|
||||
mainHostInfo = false
|
||||
}
|
||||
|
||||
// Check for traffic on this hostinfo
|
||||
inTraffic, outTraffic := cm.getAndResetTrafficCheck(hostinfo, now)
|
||||
inTraffic, outTraffic := n.getAndResetTrafficCheck(localIndex)
|
||||
|
||||
// A hostinfo is determined alive if there is incoming traffic
|
||||
if inTraffic {
|
||||
decision := doNothing
|
||||
if cm.l.Level >= logrus.DebugLevel {
|
||||
hostinfo.logger(cm.l).
|
||||
if n.l.Level >= logrus.DebugLevel {
|
||||
hostinfo.logger(n.l).
|
||||
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
|
||||
Debug("Tunnel status")
|
||||
}
|
||||
hostinfo.pendingDeletion.Store(false)
|
||||
delete(n.pendingDeletion, hostinfo.localIndexId)
|
||||
|
||||
if mainHostInfo {
|
||||
decision = tryRehandshake
|
||||
|
||||
} else {
|
||||
if cm.shouldSwapPrimary(hostinfo) {
|
||||
if n.shouldSwapPrimary(hostinfo, primary) {
|
||||
decision = swapPrimary
|
||||
} else {
|
||||
// migrate the relays to the primary, if in use.
|
||||
@@ -363,55 +363,46 @@ func (cm *connectionManager) makeTrafficDecision(localIndex uint32, now time.Tim
|
||||
}
|
||||
}
|
||||
|
||||
cm.trafficTimer.Add(hostinfo.localIndexId, cm.checkInterval)
|
||||
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
|
||||
|
||||
if !outTraffic {
|
||||
// Send a punch packet to keep the NAT state alive
|
||||
cm.sendPunch(hostinfo)
|
||||
n.sendPunch(hostinfo)
|
||||
}
|
||||
|
||||
return decision, hostinfo, primary
|
||||
}
|
||||
|
||||
if hostinfo.pendingDeletion.Load() {
|
||||
if _, ok := n.pendingDeletion[hostinfo.localIndexId]; ok {
|
||||
// We have already sent a test packet and nothing was returned, this hostinfo is dead
|
||||
hostinfo.logger(cm.l).
|
||||
hostinfo.logger(n.l).
|
||||
WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
|
||||
Info("Tunnel status")
|
||||
|
||||
delete(n.pendingDeletion, hostinfo.localIndexId)
|
||||
return deleteTunnel, hostinfo, nil
|
||||
}
|
||||
|
||||
decision := doNothing
|
||||
if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo {
|
||||
if !outTraffic {
|
||||
inactiveFor, isInactive := cm.isInactive(hostinfo, now)
|
||||
if isInactive {
|
||||
// Tunnel is inactive, tear it down
|
||||
hostinfo.logger(cm.l).
|
||||
WithField("inactiveDuration", inactiveFor).
|
||||
WithField("primary", mainHostInfo).
|
||||
Info("Dropping tunnel due to inactivity")
|
||||
|
||||
return closeTunnel, hostinfo, primary
|
||||
}
|
||||
|
||||
// If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel.
|
||||
// Just maintain NAT state if configured to do so.
|
||||
cm.sendPunch(hostinfo)
|
||||
cm.trafficTimer.Add(hostinfo.localIndexId, cm.checkInterval)
|
||||
n.sendPunch(hostinfo)
|
||||
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
|
||||
return doNothing, nil, nil
|
||||
|
||||
}
|
||||
|
||||
if cm.punchy.GetTargetEverything() {
|
||||
if n.punchy.GetTargetEverything() {
|
||||
// This is similar to the old punchy behavior with a slight optimization.
|
||||
// We aren't receiving traffic but we are sending it, punch on all known
|
||||
// ips in case we need to re-prime NAT state
|
||||
cm.sendPunch(hostinfo)
|
||||
n.sendPunch(hostinfo)
|
||||
}
|
||||
|
||||
if cm.l.Level >= logrus.DebugLevel {
|
||||
hostinfo.logger(cm.l).
|
||||
if n.l.Level >= logrus.DebugLevel {
|
||||
hostinfo.logger(n.l).
|
||||
WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
|
||||
Debug("Tunnel status")
|
||||
}
|
||||
@@ -420,33 +411,17 @@ func (cm *connectionManager) makeTrafficDecision(localIndex uint32, now time.Tim
|
||||
decision = sendTestPacket
|
||||
|
||||
} else {
|
||||
if cm.l.Level >= logrus.DebugLevel {
|
||||
hostinfo.logger(cm.l).Debugf("Hostinfo sadness")
|
||||
if n.l.Level >= logrus.DebugLevel {
|
||||
hostinfo.logger(n.l).Debugf("Hostinfo sadness")
|
||||
}
|
||||
}
|
||||
|
||||
hostinfo.pendingDeletion.Store(true)
|
||||
cm.trafficTimer.Add(hostinfo.localIndexId, cm.pendingDeletionInterval)
|
||||
n.pendingDeletion[hostinfo.localIndexId] = struct{}{}
|
||||
n.trafficTimer.Add(hostinfo.localIndexId, n.pendingDeletionInterval)
|
||||
return decision, hostinfo, nil
|
||||
}
|
||||
|
||||
func (cm *connectionManager) isInactive(hostinfo *HostInfo, now time.Time) (time.Duration, bool) {
|
||||
if cm.dropInactive.Load() == false {
|
||||
// We aren't configured to drop inactive tunnels
|
||||
return 0, false
|
||||
}
|
||||
|
||||
inactiveDuration := now.Sub(hostinfo.lastUsed)
|
||||
if inactiveDuration < cm.getInactivityTimeout() {
|
||||
// It's not considered inactive
|
||||
return inactiveDuration, false
|
||||
}
|
||||
|
||||
// The tunnel is inactive
|
||||
return inactiveDuration, true
|
||||
}
|
||||
|
||||
func (cm *connectionManager) shouldSwapPrimary(current *HostInfo) bool {
|
||||
func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
|
||||
// The primary tunnel is the most recent handshake to complete locally and should work entirely fine.
|
||||
// If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary.
|
||||
// Let's sort this out.
|
||||
@@ -454,127 +429,83 @@ func (cm *connectionManager) shouldSwapPrimary(current *HostInfo) bool {
|
||||
// Only one side should swap because if both swap then we may never resolve to a single tunnel.
|
||||
// vpn addr is static across all tunnels for this host pair so lets
|
||||
// use that to determine if we should consider swapping.
|
||||
if current.vpnAddrs[0].Compare(cm.intf.myVpnAddrs[0]) < 0 {
|
||||
if current.vpnAddrs[0].Compare(n.intf.myVpnAddrs[0]) < 0 {
|
||||
// Their primary vpn addr is less than mine. Do not swap.
|
||||
return false
|
||||
}
|
||||
|
||||
crt := cm.intf.pki.getCertState().getCertificate(current.ConnectionState.myCert.Version())
|
||||
if crt == nil {
|
||||
//my cert was reloaded away. We should definitely swap from this tunnel
|
||||
return true
|
||||
}
|
||||
crt := n.intf.pki.getCertState().getCertificate(current.ConnectionState.myCert.Version())
|
||||
// If this tunnel is using the latest certificate then we should swap it to primary for a bit and see if things
|
||||
// settle down.
|
||||
return bytes.Equal(current.ConnectionState.myCert.Signature(), crt.Signature())
|
||||
}
|
||||
|
||||
func (cm *connectionManager) swapPrimary(current, primary *HostInfo) {
|
||||
cm.hostMap.Lock()
|
||||
func (n *connectionManager) swapPrimary(current, primary *HostInfo) {
|
||||
n.hostMap.Lock()
|
||||
// Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake.
|
||||
if cm.hostMap.Hosts[current.vpnAddrs[0]] == primary {
|
||||
cm.hostMap.unlockedMakePrimary(current)
|
||||
if n.hostMap.Hosts[current.vpnAddrs[0]] == primary {
|
||||
n.hostMap.unlockedMakePrimary(current)
|
||||
}
|
||||
cm.hostMap.Unlock()
|
||||
n.hostMap.Unlock()
|
||||
}
|
||||
|
||||
// isInvalidCertificate decides if we should destroy a tunnel.
|
||||
// returns true if pki.disconnect_invalid is true and the certificate is no longer valid.
|
||||
// Blocklisted certificates will skip the pki.disconnect_invalid check and return true.
|
||||
func (cm *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool {
|
||||
// isInvalidCertificate will check if we should destroy a tunnel if pki.disconnect_invalid is true and
|
||||
// the certificate is no longer valid. Block listed certificates will skip the pki.disconnect_invalid
|
||||
// check and return true.
|
||||
func (n *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool {
|
||||
remoteCert := hostinfo.GetCert()
|
||||
if remoteCert == nil {
|
||||
return false //don't tear down tunnels for handshakes in progress
|
||||
}
|
||||
|
||||
caPool := cm.intf.pki.GetCAPool()
|
||||
err := caPool.VerifyCachedCertificate(now, remoteCert)
|
||||
if err == nil {
|
||||
return false //cert is still valid! yay!
|
||||
} else if err == cert.ErrBlockListed { //avoiding errors.Is for speed
|
||||
// Block listed certificates should always be disconnected
|
||||
hostinfo.logger(cm.l).WithError(err).
|
||||
WithField("fingerprint", remoteCert.Fingerprint).
|
||||
Info("Remote certificate is blocked, tearing down the tunnel")
|
||||
return true
|
||||
} else if cm.intf.disconnectInvalid.Load() {
|
||||
hostinfo.logger(cm.l).WithError(err).
|
||||
WithField("fingerprint", remoteCert.Fingerprint).
|
||||
Info("Remote certificate is no longer valid, tearing down the tunnel")
|
||||
return true
|
||||
} else {
|
||||
//if we reach here, the cert is no longer valid, but we're configured to keep tunnels from now-invalid certs open
|
||||
return false
|
||||
}
|
||||
|
||||
caPool := n.intf.pki.GetCAPool()
|
||||
err := caPool.VerifyCachedCertificate(now, remoteCert)
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if !n.intf.disconnectInvalid.Load() && err != cert.ErrBlockListed {
|
||||
// Block listed certificates should always be disconnected
|
||||
return false
|
||||
}
|
||||
|
||||
hostinfo.logger(n.l).WithError(err).
|
||||
WithField("fingerprint", remoteCert.Fingerprint).
|
||||
Info("Remote certificate is no longer valid, tearing down the tunnel")
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (cm *connectionManager) sendPunch(hostinfo *HostInfo) {
|
||||
if !cm.punchy.GetPunch() {
|
||||
func (n *connectionManager) sendPunch(hostinfo *HostInfo) {
|
||||
if !n.punchy.GetPunch() {
|
||||
// Punching is disabled
|
||||
return
|
||||
}
|
||||
|
||||
if cm.intf.lightHouse.IsAnyLighthouseAddr(hostinfo.vpnAddrs) {
|
||||
// Do not punch to lighthouses, we assume our lighthouse update interval is good enough.
|
||||
// In the event the update interval is not sufficient to maintain NAT state then a publicly available lighthouse
|
||||
// would lose the ability to notify us and punchy.respond would become unreliable.
|
||||
return
|
||||
}
|
||||
|
||||
if cm.punchy.GetTargetEverything() {
|
||||
hostinfo.remotes.ForEach(cm.hostMap.GetPreferredRanges(), func(addr netip.AddrPort, preferred bool) {
|
||||
cm.metricsTxPunchy.Inc(1)
|
||||
cm.intf.outside.WriteTo([]byte{1}, addr)
|
||||
if n.punchy.GetTargetEverything() {
|
||||
hostinfo.remotes.ForEach(n.hostMap.GetPreferredRanges(), func(addr netip.AddrPort, preferred bool) {
|
||||
n.metricsTxPunchy.Inc(1)
|
||||
n.intf.outside.WriteTo([]byte{1}, addr)
|
||||
})
|
||||
|
||||
} else if hostinfo.remote.IsValid() {
|
||||
cm.metricsTxPunchy.Inc(1)
|
||||
cm.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
|
||||
n.metricsTxPunchy.Inc(1)
|
||||
n.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
|
||||
}
|
||||
}
|
||||
|
||||
func (cm *connectionManager) tryRehandshake(hostinfo *HostInfo) {
|
||||
cs := cm.intf.pki.getCertState()
|
||||
func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) {
|
||||
cs := n.intf.pki.getCertState()
|
||||
curCrt := hostinfo.ConnectionState.myCert
|
||||
curCrtVersion := curCrt.Version()
|
||||
myCrt := cs.getCertificate(curCrtVersion)
|
||||
if myCrt == nil {
|
||||
cm.l.WithField("vpnAddrs", hostinfo.vpnAddrs).
|
||||
WithField("version", curCrtVersion).
|
||||
WithField("reason", "local certificate removed").
|
||||
Info("Re-handshaking with remote")
|
||||
cm.intf.handshakeManager.StartHandshake(hostinfo.vpnAddrs[0], nil)
|
||||
myCrt := cs.getCertificate(curCrt.Version())
|
||||
if curCrt.Version() >= cs.defaultVersion && bytes.Equal(curCrt.Signature(), myCrt.Signature()) == true {
|
||||
// The current tunnel is using the latest certificate and version, no need to rehandshake.
|
||||
return
|
||||
}
|
||||
peerCrt := hostinfo.ConnectionState.peerCert
|
||||
if peerCrt != nil && curCrtVersion < peerCrt.Certificate.Version() {
|
||||
// if our certificate version is less than theirs, and we have a matching version available, rehandshake?
|
||||
if cs.getCertificate(peerCrt.Certificate.Version()) != nil {
|
||||
cm.l.WithField("vpnAddrs", hostinfo.vpnAddrs).
|
||||
WithField("version", curCrtVersion).
|
||||
WithField("peerVersion", peerCrt.Certificate.Version()).
|
||||
WithField("reason", "local certificate version lower than peer, attempting to correct").
|
||||
Info("Re-handshaking with remote")
|
||||
cm.intf.handshakeManager.StartHandshake(hostinfo.vpnAddrs[0], func(hh *HandshakeHostInfo) {
|
||||
hh.initiatingVersionOverride = peerCrt.Certificate.Version()
|
||||
})
|
||||
return
|
||||
}
|
||||
}
|
||||
if !bytes.Equal(curCrt.Signature(), myCrt.Signature()) {
|
||||
cm.l.WithField("vpnAddrs", hostinfo.vpnAddrs).
|
||||
WithField("reason", "local certificate is not current").
|
||||
Info("Re-handshaking with remote")
|
||||
|
||||
cm.intf.handshakeManager.StartHandshake(hostinfo.vpnAddrs[0], nil)
|
||||
return
|
||||
}
|
||||
if curCrtVersion < cs.initiatingVersion {
|
||||
cm.l.WithField("vpnAddrs", hostinfo.vpnAddrs).
|
||||
WithField("reason", "current cert version < pki.initiatingVersion").
|
||||
Info("Re-handshaking with remote")
|
||||
n.l.WithField("vpnAddrs", hostinfo.vpnAddrs).
|
||||
WithField("reason", "local certificate is not current").
|
||||
Info("Re-handshaking with remote")
|
||||
|
||||
cm.intf.handshakeManager.StartHandshake(hostinfo.vpnAddrs[0], nil)
|
||||
return
|
||||
}
|
||||
n.intf.handshakeManager.StartHandshake(hostinfo.vpnAddrs[0], nil)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package nebula
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"net/netip"
|
||||
@@ -22,7 +23,7 @@ func newTestLighthouse() *LightHouse {
|
||||
addrMap: map[netip.Addr]*RemoteList{},
|
||||
queryChan: make(chan netip.Addr, 10),
|
||||
}
|
||||
lighthouses := []netip.Addr{}
|
||||
lighthouses := map[netip.Addr]struct{}{}
|
||||
staticList := map[netip.Addr]struct{}{}
|
||||
|
||||
lh.lighthouses.Store(&lighthouses)
|
||||
@@ -43,10 +44,10 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
||||
hostMap.preferredRanges.Store(&preferredRanges)
|
||||
|
||||
cs := &CertState{
|
||||
initiatingVersion: cert.Version1,
|
||||
privateKey: []byte{},
|
||||
v1Cert: &dummyCert{version: cert.Version1},
|
||||
v1HandshakeBytes: []byte{},
|
||||
defaultVersion: cert.Version1,
|
||||
privateKey: []byte{},
|
||||
v1Cert: &dummyCert{version: cert.Version1},
|
||||
v1HandshakeBytes: []byte{},
|
||||
}
|
||||
|
||||
lh := newTestLighthouse()
|
||||
@@ -63,10 +64,10 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
||||
ifce.pki.cs.Store(cs)
|
||||
|
||||
// Create manager
|
||||
conf := config.NewC(l)
|
||||
punchy := NewPunchyFromConfig(l, conf)
|
||||
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
|
||||
nc.intf = ifce
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
||||
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
||||
p := []byte("")
|
||||
nb := make([]byte, 12, 12)
|
||||
out := make([]byte, mtu)
|
||||
@@ -84,33 +85,32 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||
|
||||
// We saw traffic out to vpnIp
|
||||
nc.Out(hostinfo)
|
||||
nc.In(hostinfo)
|
||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||
nc.Out(hostinfo.localIndexId)
|
||||
nc.In(hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
assert.True(t, hostinfo.out.Load())
|
||||
assert.True(t, hostinfo.in.Load())
|
||||
assert.Contains(t, nc.out, hostinfo.localIndexId)
|
||||
|
||||
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||
assert.False(t, hostinfo.out.Load())
|
||||
assert.False(t, hostinfo.in.Load())
|
||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||
|
||||
// Do another traffic check tick, this host should be pending deletion now
|
||||
nc.Out(hostinfo)
|
||||
assert.True(t, hostinfo.out.Load())
|
||||
nc.Out(hostinfo.localIndexId)
|
||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||
assert.True(t, hostinfo.pendingDeletion.Load())
|
||||
assert.False(t, hostinfo.out.Load())
|
||||
assert.False(t, hostinfo.in.Load())
|
||||
assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||
|
||||
// Do a final traffic check tick, the host should now be removed
|
||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||
assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs)
|
||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||
assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
}
|
||||
|
||||
@@ -126,10 +126,10 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
||||
hostMap.preferredRanges.Store(&preferredRanges)
|
||||
|
||||
cs := &CertState{
|
||||
initiatingVersion: cert.Version1,
|
||||
privateKey: []byte{},
|
||||
v1Cert: &dummyCert{version: cert.Version1},
|
||||
v1HandshakeBytes: []byte{},
|
||||
defaultVersion: cert.Version1,
|
||||
privateKey: []byte{},
|
||||
v1Cert: &dummyCert{version: cert.Version1},
|
||||
v1HandshakeBytes: []byte{},
|
||||
}
|
||||
|
||||
lh := newTestLighthouse()
|
||||
@@ -146,10 +146,10 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
||||
ifce.pki.cs.Store(cs)
|
||||
|
||||
// Create manager
|
||||
conf := config.NewC(l)
|
||||
punchy := NewPunchyFromConfig(l, conf)
|
||||
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
|
||||
nc.intf = ifce
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
||||
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
||||
p := []byte("")
|
||||
nb := make([]byte, 12, 12)
|
||||
out := make([]byte, mtu)
|
||||
@@ -167,129 +167,33 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||
|
||||
// We saw traffic out to vpnIp
|
||||
nc.Out(hostinfo)
|
||||
nc.In(hostinfo)
|
||||
assert.True(t, hostinfo.in.Load())
|
||||
assert.True(t, hostinfo.out.Load())
|
||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||
nc.Out(hostinfo.localIndexId)
|
||||
nc.In(hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.vpnAddrs[0])
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
|
||||
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||
assert.False(t, hostinfo.out.Load())
|
||||
assert.False(t, hostinfo.in.Load())
|
||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||
|
||||
// Do another traffic check tick, this host should be pending deletion now
|
||||
nc.Out(hostinfo)
|
||||
nc.Out(hostinfo.localIndexId)
|
||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||
assert.True(t, hostinfo.pendingDeletion.Load())
|
||||
assert.False(t, hostinfo.out.Load())
|
||||
assert.False(t, hostinfo.in.Load())
|
||||
assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||
|
||||
// We saw traffic, should no longer be pending deletion
|
||||
nc.In(hostinfo)
|
||||
nc.In(hostinfo.localIndexId)
|
||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||
assert.False(t, hostinfo.out.Load())
|
||||
assert.False(t, hostinfo.in.Load())
|
||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||
}
|
||||
|
||||
func Test_NewConnectionManager_DisconnectInactive(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
||||
vpnAddrs := []netip.Addr{netip.MustParseAddr("172.1.1.2")}
|
||||
preferredRanges := []netip.Prefix{localrange}
|
||||
|
||||
// Very incomplete mock objects
|
||||
hostMap := newHostMap(l)
|
||||
hostMap.preferredRanges.Store(&preferredRanges)
|
||||
|
||||
cs := &CertState{
|
||||
initiatingVersion: cert.Version1,
|
||||
privateKey: []byte{},
|
||||
v1Cert: &dummyCert{version: cert.Version1},
|
||||
v1HandshakeBytes: []byte{},
|
||||
}
|
||||
|
||||
lh := newTestLighthouse()
|
||||
ifce := &Interface{
|
||||
hostMap: hostMap,
|
||||
inside: &test.NoopTun{},
|
||||
outside: &udp.NoopConn{},
|
||||
firewall: &Firewall{},
|
||||
lightHouse: lh,
|
||||
pki: &PKI{},
|
||||
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
|
||||
l: l,
|
||||
}
|
||||
ifce.pki.cs.Store(cs)
|
||||
|
||||
// Create manager
|
||||
conf := config.NewC(l)
|
||||
conf.Settings["tunnels"] = map[string]any{
|
||||
"drop_inactive": true,
|
||||
}
|
||||
punchy := NewPunchyFromConfig(l, conf)
|
||||
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
|
||||
assert.True(t, nc.dropInactive.Load())
|
||||
nc.intf = ifce
|
||||
|
||||
// Add an ip we have established a connection w/ to hostmap
|
||||
hostinfo := &HostInfo{
|
||||
vpnAddrs: vpnAddrs,
|
||||
localIndexId: 1099,
|
||||
remoteIndexId: 9901,
|
||||
}
|
||||
hostinfo.ConnectionState = &ConnectionState{
|
||||
myCert: &dummyCert{version: cert.Version1},
|
||||
H: &noise.HandshakeState{},
|
||||
}
|
||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||
|
||||
// Do a traffic check tick, in and out should be cleared but should not be pending deletion
|
||||
nc.Out(hostinfo)
|
||||
nc.In(hostinfo)
|
||||
assert.True(t, hostinfo.out.Load())
|
||||
assert.True(t, hostinfo.in.Load())
|
||||
|
||||
now := time.Now()
|
||||
decision, _, _ := nc.makeTrafficDecision(hostinfo.localIndexId, now)
|
||||
assert.Equal(t, tryRehandshake, decision)
|
||||
assert.Equal(t, now, hostinfo.lastUsed)
|
||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||
assert.False(t, hostinfo.out.Load())
|
||||
assert.False(t, hostinfo.in.Load())
|
||||
|
||||
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Second*5))
|
||||
assert.Equal(t, doNothing, decision)
|
||||
assert.Equal(t, now, hostinfo.lastUsed)
|
||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||
assert.False(t, hostinfo.out.Load())
|
||||
assert.False(t, hostinfo.in.Load())
|
||||
|
||||
// Do another traffic check tick, should still not be pending deletion
|
||||
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Second*10))
|
||||
assert.Equal(t, doNothing, decision)
|
||||
assert.Equal(t, now, hostinfo.lastUsed)
|
||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||
assert.False(t, hostinfo.out.Load())
|
||||
assert.False(t, hostinfo.in.Load())
|
||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||
|
||||
// Finally advance beyond the inactivity timeout
|
||||
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Minute*10))
|
||||
assert.Equal(t, closeTunnel, decision)
|
||||
assert.Equal(t, now, hostinfo.lastUsed)
|
||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||
assert.False(t, hostinfo.out.Load())
|
||||
assert.False(t, hostinfo.in.Load())
|
||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnAddrs[0])
|
||||
}
|
||||
@@ -360,10 +264,10 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
||||
ifce.disconnectInvalid.Store(true)
|
||||
|
||||
// Create manager
|
||||
conf := config.NewC(l)
|
||||
punchy := NewPunchyFromConfig(l, conf)
|
||||
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
|
||||
nc.intf = ifce
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
||||
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
||||
ifce.connectionManager = nc
|
||||
|
||||
hostinfo := &HostInfo{
|
||||
@@ -446,10 +350,6 @@ func (d *dummyCert) PublicKey() []byte {
|
||||
return d.publicKey
|
||||
}
|
||||
|
||||
func (d *dummyCert) MarshalPublicKeyPEM() []byte {
|
||||
return cert.MarshalPublicKeyToPEM(d.curve, d.publicKey)
|
||||
}
|
||||
|
||||
func (d *dummyCert) Signature() []byte {
|
||||
return d.signature
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
"github.com/slackhq/nebula/noiseutil"
|
||||
)
|
||||
|
||||
const ReplayWindow = 4096
|
||||
const ReplayWindow = 1024
|
||||
|
||||
type ConnectionState struct {
|
||||
eKey *NebulaCipherState
|
||||
@@ -27,7 +27,7 @@ type ConnectionState struct {
|
||||
writeLock sync.Mutex
|
||||
}
|
||||
|
||||
func NewConnectionState(l *logrus.Logger, cs *CertState, crt cert.Certificate, initiator bool, pattern noise.HandshakePattern) (*ConnectionState, error) {
|
||||
func NewConnectionState(l *logrus.Logger, cs *CertState, crt cert.Certificate, initiator bool, pattern noise.HandshakePattern, psk []byte) (*ConnectionState, error) {
|
||||
var dhFunc noise.DHFunc
|
||||
switch crt.Curve() {
|
||||
case cert.Curve_CURVE25519:
|
||||
@@ -56,13 +56,12 @@ func NewConnectionState(l *logrus.Logger, cs *CertState, crt cert.Certificate, i
|
||||
b.Update(l, 0)
|
||||
|
||||
hs, err := noise.NewHandshakeState(noise.Config{
|
||||
CipherSuite: ncs,
|
||||
Random: rand.Reader,
|
||||
Pattern: pattern,
|
||||
Initiator: initiator,
|
||||
StaticKeypair: static,
|
||||
//NOTE: These should come from CertState (pki.go) when we finally implement it
|
||||
PresharedKey: []byte{},
|
||||
CipherSuite: ncs,
|
||||
Random: rand.Reader,
|
||||
Pattern: pattern,
|
||||
Initiator: initiator,
|
||||
StaticKeypair: static,
|
||||
PresharedKey: psk,
|
||||
PresharedKeyPlacement: 0,
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
23
control.go
23
control.go
@@ -26,15 +26,14 @@ type controlHostLister interface {
|
||||
}
|
||||
|
||||
type Control struct {
|
||||
f *Interface
|
||||
l *logrus.Logger
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
sshStart func()
|
||||
statsStart func()
|
||||
dnsStart func()
|
||||
lighthouseStart func()
|
||||
connectionManagerStart func(context.Context)
|
||||
f *Interface
|
||||
l *logrus.Logger
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
sshStart func()
|
||||
statsStart func()
|
||||
dnsStart func()
|
||||
lighthouseStart func()
|
||||
}
|
||||
|
||||
type ControlHostInfo struct {
|
||||
@@ -64,9 +63,6 @@ func (c *Control) Start() {
|
||||
if c.dnsStart != nil {
|
||||
go c.dnsStart()
|
||||
}
|
||||
if c.connectionManagerStart != nil {
|
||||
go c.connectionManagerStart(c.ctx)
|
||||
}
|
||||
if c.lighthouseStart != nil {
|
||||
c.lighthouseStart()
|
||||
}
|
||||
@@ -135,7 +131,8 @@ func (c *Control) ListHostmapIndexes(pendingMap bool) []ControlHostInfo {
|
||||
|
||||
// GetCertByVpnIp returns the authenticated certificate of the given vpn IP, or nil if not found
|
||||
func (c *Control) GetCertByVpnIp(vpnIp netip.Addr) cert.Certificate {
|
||||
if c.f.myVpnAddrsTable.Contains(vpnIp) {
|
||||
_, found := c.f.myVpnAddrsTable.Lookup(vpnIp)
|
||||
if found {
|
||||
// Only returning the default certificate since its impossible
|
||||
// for any other host but ourselves to have more than 1
|
||||
return c.f.pki.getCertState().GetDefaultCertificate().Copy()
|
||||
|
||||
@@ -53,7 +53,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
||||
localIndexId: 201,
|
||||
vpnAddrs: []netip.Addr{vpnIp},
|
||||
relayState: RelayState{
|
||||
relays: nil,
|
||||
relays: map[netip.Addr]struct{}{},
|
||||
relayForByAddr: map[netip.Addr]*Relay{},
|
||||
relayForByIdx: map[uint32]*Relay{},
|
||||
},
|
||||
@@ -72,7 +72,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
||||
localIndexId: 201,
|
||||
vpnAddrs: []netip.Addr{vpnIp2},
|
||||
relayState: RelayState{
|
||||
relays: nil,
|
||||
relays: map[netip.Addr]struct{}{},
|
||||
relayForByAddr: map[netip.Addr]*Relay{},
|
||||
relayForByIdx: map[uint32]*Relay{},
|
||||
},
|
||||
@@ -101,7 +101,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
||||
|
||||
// Make sure we don't have any unexpected fields
|
||||
assertFields(t, []string{"VpnAddrs", "LocalIndex", "RemoteIndex", "RemoteAddrs", "Cert", "MessageCounter", "CurrentRemote", "CurrentRelaysToMe", "CurrentRelaysThroughMe"}, thi)
|
||||
assert.Equal(t, &expectedInfo, thi)
|
||||
assert.EqualValues(t, &expectedInfo, thi)
|
||||
test.AssertDeepCopyEqual(t, &expectedInfo, thi)
|
||||
|
||||
// Make sure we don't panic if the host info doesn't have a cert yet
|
||||
@@ -110,7 +110,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
func assertFields(t *testing.T, expected []string, actualStruct any) {
|
||||
func assertFields(t *testing.T, expected []string, actualStruct interface{}) {
|
||||
val := reflect.ValueOf(actualStruct).Elem()
|
||||
fields := make([]string, val.NumField())
|
||||
for i := 0; i < val.NumField(); i++ {
|
||||
|
||||
@@ -174,10 +174,6 @@ func (c *Control) GetHostmap() *HostMap {
|
||||
return c.f.hostMap
|
||||
}
|
||||
|
||||
func (c *Control) GetF() *Interface {
|
||||
return c.f
|
||||
}
|
||||
|
||||
func (c *Control) GetCertState() *CertState {
|
||||
return c.f.pki.getCertState()
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ type dnsRecords struct {
|
||||
dnsMap4 map[string]netip.Addr
|
||||
dnsMap6 map[string]netip.Addr
|
||||
hostMap *HostMap
|
||||
myVpnAddrsTable *bart.Lite
|
||||
myVpnAddrsTable *bart.Table[struct{}]
|
||||
}
|
||||
|
||||
func newDnsRecords(l *logrus.Logger, cs *CertState, hostMap *HostMap) *dnsRecords {
|
||||
@@ -112,8 +112,8 @@ func (d *dnsRecords) isSelfNebulaOrLocalhost(addr string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
//if we found it in this table, it's good
|
||||
return d.myVpnAddrsTable.Contains(b)
|
||||
_, found := d.myVpnAddrsTable.Lookup(b)
|
||||
return found //if we found it in this table, it's good
|
||||
}
|
||||
|
||||
func (d *dnsRecords) parseQuery(m *dns.Msg, w dns.ResponseWriter) {
|
||||
|
||||
@@ -38,24 +38,24 @@ func TestParsequery(t *testing.T) {
|
||||
func Test_getDnsServerAddr(t *testing.T) {
|
||||
c := config.NewC(nil)
|
||||
|
||||
c.Settings["lighthouse"] = map[string]any{
|
||||
"dns": map[string]any{
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||
"dns": map[interface{}]interface{}{
|
||||
"host": "0.0.0.0",
|
||||
"port": "1",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "0.0.0.0:1", getDnsServerAddr(c))
|
||||
|
||||
c.Settings["lighthouse"] = map[string]any{
|
||||
"dns": map[string]any{
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||
"dns": map[interface{}]interface{}{
|
||||
"host": "::",
|
||||
"port": "1",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
|
||||
|
||||
c.Settings["lighthouse"] = map[string]any{
|
||||
"dns": map[string]any{
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||
"dns": map[interface{}]interface{}{
|
||||
"host": "[::]",
|
||||
"port": "1",
|
||||
},
|
||||
@@ -63,8 +63,8 @@ func Test_getDnsServerAddr(t *testing.T) {
|
||||
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
|
||||
|
||||
// Make sure whitespace doesn't mess us up
|
||||
c.Settings["lighthouse"] = map[string]any{
|
||||
"dns": map[string]any{
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||
"dns": map[interface{}]interface{}{
|
||||
"host": "[::] ",
|
||||
"port": "1",
|
||||
},
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"github.com/slackhq/nebula/udp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.yaml.in/yaml/v3"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
func BenchmarkHotPath(b *testing.B) {
|
||||
@@ -97,41 +97,6 @@ func TestGoodHandshake(t *testing.T) {
|
||||
theirControl.Stop()
|
||||
}
|
||||
|
||||
func TestGoodHandshakeNoOverlap(t *testing.T) {
|
||||
ca, _, caKey, _ := cert_test.NewTestCaCert(cert.Version2, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(cert.Version2, ca, caKey, "me", "10.128.0.1/24", nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(cert.Version2, ca, caKey, "them", "2001::69/24", nil) //look ma, cross-stack!
|
||||
|
||||
// Put their info in our lighthouse
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet[0].Addr(), theirUdpAddr)
|
||||
|
||||
// Start the servers
|
||||
myControl.Start()
|
||||
theirControl.Start()
|
||||
|
||||
empty := []byte{}
|
||||
t.Log("do something to cause a handshake")
|
||||
myControl.GetF().SendMessageToVpnAddr(header.Test, header.MessageNone, theirVpnIpNet[0].Addr(), empty, empty, empty)
|
||||
|
||||
t.Log("Have them consume my stage 0 packet. They have a tunnel now")
|
||||
theirControl.InjectUDPPacket(myControl.GetFromUDP(true))
|
||||
|
||||
t.Log("Get their stage 1 packet")
|
||||
stage1Packet := theirControl.GetFromUDP(true)
|
||||
|
||||
t.Log("Have me consume their stage 1 packet. I have a tunnel now")
|
||||
myControl.InjectUDPPacket(stage1Packet)
|
||||
|
||||
t.Log("Wait until we see a test packet come through to make sure we give the tunnel time to complete")
|
||||
myControl.WaitForType(header.Test, 0, theirControl)
|
||||
|
||||
t.Log("Make sure our host infos are correct")
|
||||
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet, theirVpnIpNet, myControl, theirControl)
|
||||
|
||||
myControl.Stop()
|
||||
theirControl.Stop()
|
||||
}
|
||||
|
||||
func TestWrongResponderHandshake(t *testing.T) {
|
||||
ca, _, caKey, _ := cert_test.NewTestCaCert(cert.Version1, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
|
||||
@@ -499,35 +464,6 @@ func TestRelays(t *testing.T) {
|
||||
r.RenderHostmaps("Final hostmaps", myControl, relayControl, theirControl)
|
||||
}
|
||||
|
||||
func TestRelaysDontCareAboutIps(t *testing.T) {
|
||||
ca, _, caKey, _ := cert_test.NewTestCaCert(cert.Version2, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
myControl, myVpnIpNet, _, _ := newSimpleServer(cert.Version2, ca, caKey, "me ", "10.128.0.1/24", m{"relay": m{"use_relays": true}})
|
||||
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(cert.Version2, ca, caKey, "relay ", "2001::9999/24", m{"relay": m{"am_relay": true}})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(cert.Version2, ca, caKey, "them ", "10.128.0.2/24", m{"relay": m{"use_relays": true}})
|
||||
|
||||
// Teach my how to get to the relay and that their can be reached via the relay
|
||||
myControl.InjectLightHouseAddr(relayVpnIpNet[0].Addr(), relayUdpAddr)
|
||||
myControl.InjectRelays(theirVpnIpNet[0].Addr(), []netip.Addr{relayVpnIpNet[0].Addr()})
|
||||
relayControl.InjectLightHouseAddr(theirVpnIpNet[0].Addr(), theirUdpAddr)
|
||||
|
||||
// Build a router so we don't have to reason who gets which packet
|
||||
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||
defer r.RenderFlow()
|
||||
|
||||
// Start the servers
|
||||
myControl.Start()
|
||||
relayControl.Start()
|
||||
theirControl.Start()
|
||||
|
||||
t.Log("Trigger a handshake from me to them via the relay")
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet[0].Addr(), 80, myVpnIpNet[0].Addr(), 80, []byte("Hi from me"))
|
||||
|
||||
p := r.RouteForAllUntilTxTun(theirControl)
|
||||
r.Log("Assert the tunnel works")
|
||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), 80, 80)
|
||||
r.RenderHostmaps("Final hostmaps", myControl, relayControl, theirControl)
|
||||
}
|
||||
|
||||
func TestReestablishRelays(t *testing.T) {
|
||||
ca, _, caKey, _ := cert_test.NewTestCaCert(cert.Version1, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
myControl, myVpnIpNet, _, _ := newSimpleServer(cert.Version1, ca, caKey, "me ", "10.128.0.1/24", m{"relay": m{"use_relays": true}})
|
||||
@@ -570,7 +506,7 @@ func TestReestablishRelays(t *testing.T) {
|
||||
curIndexes := len(myControl.GetHostmap().Indexes)
|
||||
for curIndexes >= start {
|
||||
curIndexes = len(myControl.GetHostmap().Indexes)
|
||||
r.Logf("Wait for the dead index to go away:start=%v indexes, current=%v indexes", start, curIndexes)
|
||||
r.Logf("Wait for the dead index to go away:start=%v indexes, currnet=%v indexes", start, curIndexes)
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet[0].Addr(), 80, myVpnIpNet[0].Addr(), 80, []byte("Hi from me should fail"))
|
||||
|
||||
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
|
||||
@@ -1055,7 +991,7 @@ func TestRehandshaking(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
var theirNewConfig m
|
||||
require.NoError(t, yaml.Unmarshal(rc, &theirNewConfig))
|
||||
theirFirewall := theirNewConfig["firewall"].(map[string]any)
|
||||
theirFirewall := theirNewConfig["firewall"].(map[interface{}]interface{})
|
||||
theirFirewall["inbound"] = []m{{
|
||||
"proto": "any",
|
||||
"port": "any",
|
||||
@@ -1116,9 +1052,6 @@ func TestRehandshakingLoser(t *testing.T) {
|
||||
t.Log("Stand up a tunnel between me and them")
|
||||
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
|
||||
|
||||
myControl.GetHostInfoByVpnAddr(theirVpnIpNet[0].Addr(), false)
|
||||
theirControl.GetHostInfoByVpnAddr(myVpnIpNet[0].Addr(), false)
|
||||
|
||||
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
||||
|
||||
r.Log("Renew their certificate and spin until mine sees it")
|
||||
@@ -1154,7 +1087,7 @@ func TestRehandshakingLoser(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
var myNewConfig m
|
||||
require.NoError(t, yaml.Unmarshal(rc, &myNewConfig))
|
||||
theirFirewall := myNewConfig["firewall"].(map[string]any)
|
||||
theirFirewall := myNewConfig["firewall"].(map[interface{}]interface{})
|
||||
theirFirewall["inbound"] = []m{{
|
||||
"proto": "any",
|
||||
"port": "any",
|
||||
@@ -1292,108 +1225,134 @@ func TestV2NonPrimaryWithLighthouse(t *testing.T) {
|
||||
theirControl.Stop()
|
||||
}
|
||||
|
||||
func TestV2NonPrimaryWithOffNetLighthouse(t *testing.T) {
|
||||
ca, _, caKey, _ := cert_test.NewTestCaCert(cert.Version2, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
lhControl, lhVpnIpNet, lhUdpAddr, _ := newSimpleServer(cert.Version2, ca, caKey, "lh ", "2001::1/64", m{"lighthouse": m{"am_lighthouse": true}})
|
||||
|
||||
o := m{
|
||||
"static_host_map": m{
|
||||
lhVpnIpNet[0].Addr().String(): []string{lhUdpAddr.String()},
|
||||
func TestPSK(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
myPskMode nebula.PskMode
|
||||
theirPskMode nebula.PskMode
|
||||
}{
|
||||
// All accepting
|
||||
{
|
||||
name: "both accepting",
|
||||
myPskMode: nebula.PskAccepting,
|
||||
theirPskMode: nebula.PskAccepting,
|
||||
},
|
||||
"lighthouse": m{
|
||||
"hosts": []string{lhVpnIpNet[0].Addr().String()},
|
||||
"local_allow_list": m{
|
||||
// Try and block our lighthouse updates from using the actual addresses assigned to this computer
|
||||
// If we start discovering addresses the test router doesn't know about then test traffic cant flow
|
||||
"10.0.0.0/24": true,
|
||||
"::/0": false,
|
||||
},
|
||||
|
||||
// accepting and sending both ways
|
||||
{
|
||||
name: "accepting to sending",
|
||||
myPskMode: nebula.PskAccepting,
|
||||
theirPskMode: nebula.PskSending,
|
||||
},
|
||||
{
|
||||
name: "sending to accepting",
|
||||
myPskMode: nebula.PskSending,
|
||||
theirPskMode: nebula.PskAccepting,
|
||||
},
|
||||
|
||||
// All sending
|
||||
{
|
||||
name: "sending to sending",
|
||||
myPskMode: nebula.PskSending,
|
||||
theirPskMode: nebula.PskSending,
|
||||
},
|
||||
|
||||
// enforced and sending both ways
|
||||
{
|
||||
name: "enforced to sending",
|
||||
myPskMode: nebula.PskEnforced,
|
||||
theirPskMode: nebula.PskSending,
|
||||
},
|
||||
{
|
||||
name: "sending to enforced",
|
||||
myPskMode: nebula.PskSending,
|
||||
theirPskMode: nebula.PskEnforced,
|
||||
},
|
||||
|
||||
// All enforced
|
||||
{
|
||||
name: "both enforced",
|
||||
myPskMode: nebula.PskEnforced,
|
||||
theirPskMode: nebula.PskEnforced,
|
||||
},
|
||||
|
||||
// Enforced can technically handshake with an accepting node, but it is bad to be in this state
|
||||
{
|
||||
name: "enforced to accepting",
|
||||
myPskMode: nebula.PskEnforced,
|
||||
theirPskMode: nebula.PskAccepting,
|
||||
},
|
||||
}
|
||||
myControl, myVpnIpNet, _, _ := newSimpleServer(cert.Version2, ca, caKey, "me ", "10.128.0.2/24, ff::2/64", o)
|
||||
theirControl, theirVpnIpNet, _, _ := newSimpleServer(cert.Version2, ca, caKey, "them", "10.128.0.3/24, ff::3/64", o)
|
||||
|
||||
// Build a router so we don't have to reason who gets which packet
|
||||
r := router.NewR(t, lhControl, myControl, theirControl)
|
||||
defer r.RenderFlow()
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
var myPskSettings, theirPskSettings m
|
||||
|
||||
// Start the servers
|
||||
lhControl.Start()
|
||||
myControl.Start()
|
||||
theirControl.Start()
|
||||
switch test.myPskMode {
|
||||
case nebula.PskAccepting:
|
||||
myPskSettings = m{"psk": &m{"mode": "accepting", "keys": []string{"garbage0", "this is a key"}}}
|
||||
case nebula.PskSending:
|
||||
myPskSettings = m{"psk": &m{"mode": "sending", "keys": []string{"this is a key", "garbage1"}}}
|
||||
case nebula.PskEnforced:
|
||||
myPskSettings = m{"psk": &m{"mode": "enforced", "keys": []string{"this is a key", "garbage2"}}}
|
||||
}
|
||||
|
||||
t.Log("Stand up an ipv6 tunnel between me and them")
|
||||
assert.True(t, myVpnIpNet[1].Addr().Is6())
|
||||
assert.True(t, theirVpnIpNet[1].Addr().Is6())
|
||||
assertTunnel(t, myVpnIpNet[1].Addr(), theirVpnIpNet[1].Addr(), myControl, theirControl, r)
|
||||
switch test.theirPskMode {
|
||||
case nebula.PskAccepting:
|
||||
theirPskSettings = m{"psk": &m{"mode": "accepting", "keys": []string{"garbage3", "this is a key"}}}
|
||||
case nebula.PskSending:
|
||||
theirPskSettings = m{"psk": &m{"mode": "sending", "keys": []string{"this is a key", "garbage4"}}}
|
||||
case nebula.PskEnforced:
|
||||
theirPskSettings = m{"psk": &m{"mode": "enforced", "keys": []string{"this is a key", "garbage5"}}}
|
||||
}
|
||||
|
||||
lhControl.Stop()
|
||||
myControl.Stop()
|
||||
theirControl.Stop()
|
||||
}
|
||||
ca, _, caKey, _ := cert_test.NewTestCaCert(cert.Version2, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, nil)
|
||||
myControl, myVpnIp, myUdpAddr, _ := newSimpleServer(cert.Version2, ca, caKey, "me", "10.0.0.1/24", myPskSettings)
|
||||
theirControl, theirVpnIp, theirUdpAddr, _ := newSimpleServer(cert.Version2, ca, caKey, "them", "10.0.0.2/24", theirPskSettings)
|
||||
|
||||
func TestGoodHandshakeUnsafeDest(t *testing.T) {
|
||||
unsafePrefix := "192.168.6.0/24"
|
||||
ca, _, caKey, _ := cert_test.NewTestCaCert(cert.Version2, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServerWithUdpAndUnsafeNetworks(cert.Version2, ca, caKey, "spooky", "10.128.0.2/24", netip.MustParseAddrPort("10.64.0.2:4242"), unsafePrefix, nil)
|
||||
route := m{"route": unsafePrefix, "via": theirVpnIpNet[0].Addr().String()}
|
||||
myCfg := m{
|
||||
"tun": m{
|
||||
"unsafe_routes": []m{route},
|
||||
},
|
||||
myControl.InjectLightHouseAddr(theirVpnIp[0].Addr(), theirUdpAddr)
|
||||
r := router.NewR(t, myControl, theirControl)
|
||||
|
||||
// Start the servers
|
||||
myControl.Start()
|
||||
theirControl.Start()
|
||||
|
||||
t.Log("Route until we see our cached packet flow")
|
||||
myControl.InjectTunUDPPacket(theirVpnIp[0].Addr(), 80, myVpnIp[0].Addr(), 80, []byte("Hi from me"))
|
||||
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
|
||||
h := &header.H{}
|
||||
err := h.Parse(p.Data)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// If this is the stage 1 handshake packet and I am configured to send with a psk, my cert name should
|
||||
// not appear. It would likely be more obvious to unmarshal the payload and check but this works fine for now
|
||||
if test.myPskMode == nebula.PskEnforced || test.myPskMode == nebula.PskSending {
|
||||
if h.Type == 0 && h.MessageCounter == 1 {
|
||||
assert.NotContains(t, string(p.Data), "test me")
|
||||
}
|
||||
}
|
||||
|
||||
if p.To == theirUdpAddr && h.Type == 1 {
|
||||
return router.RouteAndExit
|
||||
}
|
||||
|
||||
return router.KeepRouting
|
||||
})
|
||||
|
||||
t.Log("My cached packet should be received by them")
|
||||
myCachedPacket := theirControl.GetFromTun(true)
|
||||
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIp[0].Addr(), theirVpnIp[0].Addr(), 80, 80)
|
||||
|
||||
t.Log("Test the tunnel with them")
|
||||
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIp, theirVpnIp, myControl, theirControl)
|
||||
assertTunnel(t, myVpnIp[0].Addr(), theirVpnIp[0].Addr(), myControl, theirControl, r)
|
||||
|
||||
myControl.Stop()
|
||||
theirControl.Stop()
|
||||
//TODO: assert hostmaps
|
||||
})
|
||||
}
|
||||
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(cert.Version2, ca, caKey, "me", "10.128.0.1/24", myCfg)
|
||||
t.Logf("my config %v", myConfig)
|
||||
// Put their info in our lighthouse
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet[0].Addr(), theirUdpAddr)
|
||||
|
||||
spookyDest := netip.MustParseAddr("192.168.6.4")
|
||||
|
||||
// Start the servers
|
||||
myControl.Start()
|
||||
theirControl.Start()
|
||||
|
||||
t.Log("Send a udp packet through to begin standing up the tunnel, this should come out the other side")
|
||||
myControl.InjectTunUDPPacket(spookyDest, 80, myVpnIpNet[0].Addr(), 80, []byte("Hi from me"))
|
||||
|
||||
t.Log("Have them consume my stage 0 packet. They have a tunnel now")
|
||||
theirControl.InjectUDPPacket(myControl.GetFromUDP(true))
|
||||
|
||||
t.Log("Get their stage 1 packet so that we can play with it")
|
||||
stage1Packet := theirControl.GetFromUDP(true)
|
||||
|
||||
t.Log("I consume a garbage packet with a proper nebula header for our tunnel")
|
||||
// this should log a statement and get ignored, allowing the real handshake packet to complete the tunnel
|
||||
badPacket := stage1Packet.Copy()
|
||||
badPacket.Data = badPacket.Data[:len(badPacket.Data)-header.Len]
|
||||
myControl.InjectUDPPacket(badPacket)
|
||||
|
||||
t.Log("Have me consume their real stage 1 packet. I have a tunnel now")
|
||||
myControl.InjectUDPPacket(stage1Packet)
|
||||
|
||||
t.Log("Wait until we see my cached packet come through")
|
||||
myControl.WaitForType(1, 0, theirControl)
|
||||
|
||||
t.Log("Make sure our host infos are correct")
|
||||
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet, theirVpnIpNet, myControl, theirControl)
|
||||
|
||||
t.Log("Get that cached packet and make sure it looks right")
|
||||
myCachedPacket := theirControl.GetFromTun(true)
|
||||
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet[0].Addr(), spookyDest, 80, 80)
|
||||
|
||||
//reply
|
||||
theirControl.InjectTunUDPPacket(myVpnIpNet[0].Addr(), 80, spookyDest, 80, []byte("Hi from the spookyman"))
|
||||
//wait for reply
|
||||
theirControl.WaitForType(1, 0, myControl)
|
||||
theirCachedPacket := myControl.GetFromTun(true)
|
||||
assertUdpPacket(t, []byte("Hi from the spookyman"), theirCachedPacket, spookyDest, myVpnIpNet[0].Addr(), 80, 80)
|
||||
|
||||
t.Log("Do a bidirectional tunnel test")
|
||||
r := router.NewR(t, myControl, theirControl)
|
||||
defer r.RenderFlow()
|
||||
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
|
||||
|
||||
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||
myControl.Stop()
|
||||
theirControl.Stop()
|
||||
}
|
||||
|
||||
@@ -22,14 +22,15 @@ import (
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/e2e/router"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.yaml.in/yaml/v3"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type m = map[string]any
|
||||
type m map[string]interface{}
|
||||
|
||||
// newSimpleServer creates a nebula instance with many assumptions
|
||||
func newSimpleServer(v cert.Version, caCrt cert.Certificate, caKey []byte, name string, sVpnNetworks string, overrides m) (*nebula.Control, []netip.Prefix, netip.AddrPort, *config.C) {
|
||||
l := NewTestLogger()
|
||||
|
||||
var vpnNetworks []netip.Prefix
|
||||
for _, sn := range strings.Split(sVpnNetworks, ",") {
|
||||
vpnIpNet, err := netip.ParsePrefix(strings.TrimSpace(sn))
|
||||
@@ -55,54 +56,7 @@ func newSimpleServer(v cert.Version, caCrt cert.Certificate, caKey []byte, name
|
||||
budpIp[3] = 239
|
||||
udpAddr = netip.AddrPortFrom(netip.AddrFrom16(budpIp), 4242)
|
||||
}
|
||||
return newSimpleServerWithUdp(v, caCrt, caKey, name, sVpnNetworks, udpAddr, overrides)
|
||||
}
|
||||
|
||||
func newSimpleServerWithUdp(v cert.Version, caCrt cert.Certificate, caKey []byte, name string, sVpnNetworks string, udpAddr netip.AddrPort, overrides m) (*nebula.Control, []netip.Prefix, netip.AddrPort, *config.C) {
|
||||
return newSimpleServerWithUdpAndUnsafeNetworks(v, caCrt, caKey, name, sVpnNetworks, udpAddr, "", overrides)
|
||||
}
|
||||
|
||||
func newSimpleServerWithUdpAndUnsafeNetworks(v cert.Version, caCrt cert.Certificate, caKey []byte, name string, sVpnNetworks string, udpAddr netip.AddrPort, sUnsafeNetworks string, overrides m) (*nebula.Control, []netip.Prefix, netip.AddrPort, *config.C) {
|
||||
l := NewTestLogger()
|
||||
|
||||
var vpnNetworks []netip.Prefix
|
||||
for _, sn := range strings.Split(sVpnNetworks, ",") {
|
||||
vpnIpNet, err := netip.ParsePrefix(strings.TrimSpace(sn))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
vpnNetworks = append(vpnNetworks, vpnIpNet)
|
||||
}
|
||||
|
||||
if len(vpnNetworks) == 0 {
|
||||
panic("no vpn networks")
|
||||
}
|
||||
|
||||
firewallInbound := []m{{
|
||||
"proto": "any",
|
||||
"port": "any",
|
||||
"host": "any",
|
||||
}}
|
||||
|
||||
var unsafeNetworks []netip.Prefix
|
||||
if sUnsafeNetworks != "" {
|
||||
firewallInbound = []m{{
|
||||
"proto": "any",
|
||||
"port": "any",
|
||||
"host": "any",
|
||||
"local_cidr": "0.0.0.0/0",
|
||||
}}
|
||||
|
||||
for _, sn := range strings.Split(sUnsafeNetworks, ",") {
|
||||
x, err := netip.ParsePrefix(strings.TrimSpace(sn))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
unsafeNetworks = append(unsafeNetworks, x)
|
||||
}
|
||||
}
|
||||
|
||||
_, _, myPrivKey, myPEM := cert_test.NewTestCert(v, cert.Curve_CURVE25519, caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnNetworks, unsafeNetworks, []string{})
|
||||
_, _, myPrivKey, myPEM := cert_test.NewTestCert(v, cert.Curve_CURVE25519, caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnNetworks, nil, []string{})
|
||||
|
||||
caB, err := caCrt.MarshalPEM()
|
||||
if err != nil {
|
||||
@@ -122,7 +76,11 @@ func newSimpleServerWithUdpAndUnsafeNetworks(v cert.Version, caCrt cert.Certific
|
||||
"port": "any",
|
||||
"host": "any",
|
||||
}},
|
||||
"inbound": firewallInbound,
|
||||
"inbound": []m{{
|
||||
"proto": "any",
|
||||
"port": "any",
|
||||
"host": "any",
|
||||
}},
|
||||
},
|
||||
//"handshakes": m{
|
||||
// "try_interval": "1s",
|
||||
@@ -171,109 +129,6 @@ func newSimpleServerWithUdpAndUnsafeNetworks(v cert.Version, caCrt cert.Certific
|
||||
return control, vpnNetworks, udpAddr, c
|
||||
}
|
||||
|
||||
// newServer creates a nebula instance with fewer assumptions
|
||||
func newServer(caCrt []cert.Certificate, certs []cert.Certificate, key []byte, overrides m) (*nebula.Control, []netip.Prefix, netip.AddrPort, *config.C) {
|
||||
l := NewTestLogger()
|
||||
|
||||
vpnNetworks := certs[len(certs)-1].Networks()
|
||||
|
||||
var udpAddr netip.AddrPort
|
||||
if vpnNetworks[0].Addr().Is4() {
|
||||
budpIp := vpnNetworks[0].Addr().As4()
|
||||
budpIp[1] -= 128
|
||||
udpAddr = netip.AddrPortFrom(netip.AddrFrom4(budpIp), 4242)
|
||||
} else {
|
||||
budpIp := vpnNetworks[0].Addr().As16()
|
||||
// beef for funsies
|
||||
budpIp[2] = 190
|
||||
budpIp[3] = 239
|
||||
udpAddr = netip.AddrPortFrom(netip.AddrFrom16(budpIp), 4242)
|
||||
}
|
||||
|
||||
caStr := ""
|
||||
for _, ca := range caCrt {
|
||||
x, err := ca.MarshalPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
caStr += string(x)
|
||||
}
|
||||
certStr := ""
|
||||
for _, c := range certs {
|
||||
x, err := c.MarshalPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
certStr += string(x)
|
||||
}
|
||||
|
||||
mc := m{
|
||||
"pki": m{
|
||||
"ca": caStr,
|
||||
"cert": certStr,
|
||||
"key": string(key),
|
||||
},
|
||||
//"tun": m{"disabled": true},
|
||||
"firewall": m{
|
||||
"outbound": []m{{
|
||||
"proto": "any",
|
||||
"port": "any",
|
||||
"host": "any",
|
||||
}},
|
||||
"inbound": []m{{
|
||||
"proto": "any",
|
||||
"port": "any",
|
||||
"host": "any",
|
||||
}},
|
||||
},
|
||||
//"handshakes": m{
|
||||
// "try_interval": "1s",
|
||||
//},
|
||||
"listen": m{
|
||||
"host": udpAddr.Addr().String(),
|
||||
"port": udpAddr.Port(),
|
||||
},
|
||||
"logging": m{
|
||||
"timestamp_format": fmt.Sprintf("%v 15:04:05.000000", certs[0].Name()),
|
||||
"level": l.Level.String(),
|
||||
},
|
||||
"timers": m{
|
||||
"pending_deletion_interval": 2,
|
||||
"connection_alive_interval": 2,
|
||||
},
|
||||
}
|
||||
|
||||
if overrides != nil {
|
||||
final := m{}
|
||||
err := mergo.Merge(&final, overrides, mergo.WithAppendSlice)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = mergo.Merge(&final, mc, mergo.WithAppendSlice)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
mc = final
|
||||
}
|
||||
|
||||
cb, err := yaml.Marshal(mc)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
c := config.NewC(l)
|
||||
cStr := string(cb)
|
||||
c.LoadString(cStr)
|
||||
|
||||
control, err := nebula.Main(c, false, "e2e-test", l, nil)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return control, vpnNetworks, udpAddr, c
|
||||
}
|
||||
|
||||
type doneCb func()
|
||||
|
||||
func deadline(t *testing.T, seconds time.Duration) doneCb {
|
||||
@@ -308,10 +163,10 @@ func assertHostInfoPair(t *testing.T, addrA, addrB netip.AddrPort, vpnNetsA, vpn
|
||||
// Get both host infos
|
||||
//TODO: CERT-V2 we may want to loop over each vpnAddr and assert all the things
|
||||
hBinA := controlA.GetHostInfoByVpnAddr(vpnNetsB[0].Addr(), false)
|
||||
require.NotNil(t, hBinA, "Host B was not found by vpnAddr in controlA")
|
||||
assert.NotNil(t, hBinA, "Host B was not found by vpnAddr in controlA")
|
||||
|
||||
hAinB := controlB.GetHostInfoByVpnAddr(vpnNetsA[0].Addr(), false)
|
||||
require.NotNil(t, hAinB, "Host A was not found by vpnAddr in controlB")
|
||||
assert.NotNil(t, hAinB, "Host A was not found by vpnAddr in controlB")
|
||||
|
||||
// Check that both vpn and real addr are correct
|
||||
assert.EqualValues(t, getAddrs(vpnNetsB), hBinA.VpnAddrs, "Host B VpnIp is wrong in control A")
|
||||
|
||||
@@ -111,10 +111,6 @@ type ExitFunc func(packet *udp.Packet, receiver *nebula.Control) ExitType
|
||||
func NewR(t testing.TB, controls ...*nebula.Control) *R {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
if err := os.MkdirAll("mermaid", 0755); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
r := &R{
|
||||
controls: make(map[netip.AddrPort]*nebula.Control),
|
||||
vpnControls: make(map[netip.Addr]*nebula.Control),
|
||||
@@ -194,6 +190,9 @@ func (r *R) renderFlow() {
|
||||
return
|
||||
}
|
||||
|
||||
if err := os.MkdirAll(filepath.Dir(r.fn), 0755); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
f, err := os.OpenFile(r.fn, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0644)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -700,7 +699,6 @@ func (r *R) FlushAll() {
|
||||
r.Unlock()
|
||||
panic("Can't FlushAll for host: " + p.To.String())
|
||||
}
|
||||
receiver.InjectUDPPacket(p)
|
||||
r.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,367 +0,0 @@
|
||||
//go:build e2e_testing
|
||||
// +build e2e_testing
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/cert_test"
|
||||
"github.com/slackhq/nebula/e2e/router"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
func TestDropInactiveTunnels(t *testing.T) {
|
||||
// The goal of this test is to ensure the shortest inactivity timeout will close the tunnel on both sides
|
||||
// under ideal conditions
|
||||
ca, _, caKey, _ := cert_test.NewTestCaCert(cert.Version1, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(cert.Version1, ca, caKey, "me", "10.128.0.1/24", m{"tunnels": m{"drop_inactive": true, "inactivity_timeout": "5s"}})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(cert.Version1, ca, caKey, "them", "10.128.0.2/24", m{"tunnels": m{"drop_inactive": true, "inactivity_timeout": "10m"}})
|
||||
|
||||
// Share our underlay information
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet[0].Addr(), theirUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(myVpnIpNet[0].Addr(), myUdpAddr)
|
||||
|
||||
// Start the servers
|
||||
myControl.Start()
|
||||
theirControl.Start()
|
||||
|
||||
r := router.NewR(t, myControl, theirControl)
|
||||
|
||||
r.Log("Assert the tunnel between me and them works")
|
||||
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
|
||||
|
||||
r.Log("Go inactive and wait for the tunnels to get dropped")
|
||||
waitStart := time.Now()
|
||||
for {
|
||||
myIndexes := len(myControl.GetHostmap().Indexes)
|
||||
theirIndexes := len(theirControl.GetHostmap().Indexes)
|
||||
if myIndexes == 0 && theirIndexes == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
since := time.Since(waitStart)
|
||||
r.Logf("my tunnels: %v; their tunnels: %v; duration: %v", myIndexes, theirIndexes, since)
|
||||
if since > time.Second*30 {
|
||||
t.Fatal("Tunnel should have been declared inactive after 5 seconds and before 30 seconds")
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
r.FlushAll()
|
||||
}
|
||||
|
||||
r.Logf("Inactive tunnels were dropped within %v", time.Since(waitStart))
|
||||
myControl.Stop()
|
||||
theirControl.Stop()
|
||||
}
|
||||
|
||||
func TestCertUpgrade(t *testing.T) {
|
||||
// The goal of this test is to ensure the shortest inactivity timeout will close the tunnel on both sides
|
||||
// under ideal conditions
|
||||
ca, _, caKey, _ := cert_test.NewTestCaCert(cert.Version1, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
caB, err := ca.MarshalPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ca2, _, caKey2, _ := cert_test.NewTestCaCert(cert.Version2, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
|
||||
ca2B, err := ca2.MarshalPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
caStr := fmt.Sprintf("%s\n%s", caB, ca2B)
|
||||
|
||||
myCert, _, myPrivKey, _ := cert_test.NewTestCert(cert.Version1, cert.Curve_CURVE25519, ca, caKey, "me", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{netip.MustParsePrefix("10.128.0.1/24")}, nil, []string{})
|
||||
_, myCert2Pem := cert_test.NewTestCertDifferentVersion(myCert, cert.Version2, ca2, caKey2)
|
||||
|
||||
theirCert, _, theirPrivKey, _ := cert_test.NewTestCert(cert.Version1, cert.Curve_CURVE25519, ca, caKey, "them", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{netip.MustParsePrefix("10.128.0.2/24")}, nil, []string{})
|
||||
theirCert2, _ := cert_test.NewTestCertDifferentVersion(theirCert, cert.Version2, ca2, caKey2)
|
||||
|
||||
myControl, myVpnIpNet, myUdpAddr, myC := newServer([]cert.Certificate{ca, ca2}, []cert.Certificate{myCert}, myPrivKey, m{})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newServer([]cert.Certificate{ca, ca2}, []cert.Certificate{theirCert, theirCert2}, theirPrivKey, m{})
|
||||
|
||||
// Share our underlay information
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet[0].Addr(), theirUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(myVpnIpNet[0].Addr(), myUdpAddr)
|
||||
|
||||
// Start the servers
|
||||
myControl.Start()
|
||||
theirControl.Start()
|
||||
|
||||
r := router.NewR(t, myControl, theirControl)
|
||||
defer r.RenderFlow()
|
||||
|
||||
r.Log("Assert the tunnel between me and them works")
|
||||
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
|
||||
r.Log("yay")
|
||||
//todo ???
|
||||
time.Sleep(1 * time.Second)
|
||||
r.FlushAll()
|
||||
|
||||
mc := m{
|
||||
"pki": m{
|
||||
"ca": caStr,
|
||||
"cert": string(myCert2Pem),
|
||||
"key": string(myPrivKey),
|
||||
},
|
||||
//"tun": m{"disabled": true},
|
||||
"firewall": myC.Settings["firewall"],
|
||||
//"handshakes": m{
|
||||
// "try_interval": "1s",
|
||||
//},
|
||||
"listen": myC.Settings["listen"],
|
||||
"logging": myC.Settings["logging"],
|
||||
"timers": myC.Settings["timers"],
|
||||
}
|
||||
|
||||
cb, err := yaml.Marshal(mc)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
r.Logf("reload new v2-only config")
|
||||
err = myC.ReloadConfigString(string(cb))
|
||||
assert.NoError(t, err)
|
||||
r.Log("yay, spin until their sees it")
|
||||
waitStart := time.Now()
|
||||
for {
|
||||
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
|
||||
c := theirControl.GetHostInfoByVpnAddr(myVpnIpNet[0].Addr(), false)
|
||||
if c == nil {
|
||||
r.Log("nil")
|
||||
} else {
|
||||
version := c.Cert.Version()
|
||||
r.Logf("version %d", version)
|
||||
if version == cert.Version2 {
|
||||
break
|
||||
}
|
||||
}
|
||||
since := time.Since(waitStart)
|
||||
if since > time.Second*10 {
|
||||
t.Fatal("Cert should be new by now")
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||
|
||||
myControl.Stop()
|
||||
theirControl.Stop()
|
||||
}
|
||||
|
||||
func TestCertDowngrade(t *testing.T) {
|
||||
// The goal of this test is to ensure the shortest inactivity timeout will close the tunnel on both sides
|
||||
// under ideal conditions
|
||||
ca, _, caKey, _ := cert_test.NewTestCaCert(cert.Version1, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
caB, err := ca.MarshalPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
ca2, _, caKey2, _ := cert_test.NewTestCaCert(cert.Version2, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
|
||||
ca2B, err := ca2.MarshalPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
caStr := fmt.Sprintf("%s\n%s", caB, ca2B)
|
||||
|
||||
myCert, _, myPrivKey, myCertPem := cert_test.NewTestCert(cert.Version1, cert.Curve_CURVE25519, ca, caKey, "me", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{netip.MustParsePrefix("10.128.0.1/24")}, nil, []string{})
|
||||
myCert2, _ := cert_test.NewTestCertDifferentVersion(myCert, cert.Version2, ca2, caKey2)
|
||||
|
||||
theirCert, _, theirPrivKey, _ := cert_test.NewTestCert(cert.Version1, cert.Curve_CURVE25519, ca, caKey, "them", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{netip.MustParsePrefix("10.128.0.2/24")}, nil, []string{})
|
||||
theirCert2, _ := cert_test.NewTestCertDifferentVersion(theirCert, cert.Version2, ca2, caKey2)
|
||||
|
||||
myControl, myVpnIpNet, myUdpAddr, myC := newServer([]cert.Certificate{ca, ca2}, []cert.Certificate{myCert2}, myPrivKey, m{})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newServer([]cert.Certificate{ca, ca2}, []cert.Certificate{theirCert, theirCert2}, theirPrivKey, m{})
|
||||
|
||||
// Share our underlay information
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet[0].Addr(), theirUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(myVpnIpNet[0].Addr(), myUdpAddr)
|
||||
|
||||
// Start the servers
|
||||
myControl.Start()
|
||||
theirControl.Start()
|
||||
|
||||
r := router.NewR(t, myControl, theirControl)
|
||||
defer r.RenderFlow()
|
||||
|
||||
r.Log("Assert the tunnel between me and them works")
|
||||
//assertTunnel(t, theirVpnIpNet[0].Addr(), myVpnIpNet[0].Addr(), theirControl, myControl, r)
|
||||
//r.Log("yay")
|
||||
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
|
||||
r.Log("yay")
|
||||
//todo ???
|
||||
time.Sleep(1 * time.Second)
|
||||
r.FlushAll()
|
||||
|
||||
mc := m{
|
||||
"pki": m{
|
||||
"ca": caStr,
|
||||
"cert": string(myCertPem),
|
||||
"key": string(myPrivKey),
|
||||
},
|
||||
"firewall": myC.Settings["firewall"],
|
||||
"listen": myC.Settings["listen"],
|
||||
"logging": myC.Settings["logging"],
|
||||
"timers": myC.Settings["timers"],
|
||||
}
|
||||
|
||||
cb, err := yaml.Marshal(mc)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
r.Logf("reload new v1-only config")
|
||||
err = myC.ReloadConfigString(string(cb))
|
||||
assert.NoError(t, err)
|
||||
r.Log("yay, spin until their sees it")
|
||||
waitStart := time.Now()
|
||||
for {
|
||||
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
|
||||
c := theirControl.GetHostInfoByVpnAddr(myVpnIpNet[0].Addr(), false)
|
||||
c2 := myControl.GetHostInfoByVpnAddr(theirVpnIpNet[0].Addr(), false)
|
||||
if c == nil || c2 == nil {
|
||||
r.Log("nil")
|
||||
} else {
|
||||
version := c.Cert.Version()
|
||||
theirVersion := c2.Cert.Version()
|
||||
r.Logf("version %d,%d", version, theirVersion)
|
||||
if version == cert.Version1 {
|
||||
break
|
||||
}
|
||||
}
|
||||
since := time.Since(waitStart)
|
||||
if since > time.Second*5 {
|
||||
r.Log("it is unusual that the cert is not new yet, but not a failure yet")
|
||||
}
|
||||
if since > time.Second*10 {
|
||||
r.Log("wtf")
|
||||
t.Fatal("Cert should be new by now")
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||
|
||||
myControl.Stop()
|
||||
theirControl.Stop()
|
||||
}
|
||||
|
||||
func TestCertMismatchCorrection(t *testing.T) {
|
||||
// The goal of this test is to ensure the shortest inactivity timeout will close the tunnel on both sides
|
||||
// under ideal conditions
|
||||
ca, _, caKey, _ := cert_test.NewTestCaCert(cert.Version1, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
ca2, _, caKey2, _ := cert_test.NewTestCaCert(cert.Version2, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
|
||||
myCert, _, myPrivKey, _ := cert_test.NewTestCert(cert.Version1, cert.Curve_CURVE25519, ca, caKey, "me", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{netip.MustParsePrefix("10.128.0.1/24")}, nil, []string{})
|
||||
myCert2, _ := cert_test.NewTestCertDifferentVersion(myCert, cert.Version2, ca2, caKey2)
|
||||
|
||||
theirCert, _, theirPrivKey, _ := cert_test.NewTestCert(cert.Version1, cert.Curve_CURVE25519, ca, caKey, "them", time.Now(), time.Now().Add(5*time.Minute), []netip.Prefix{netip.MustParsePrefix("10.128.0.2/24")}, nil, []string{})
|
||||
theirCert2, _ := cert_test.NewTestCertDifferentVersion(theirCert, cert.Version2, ca2, caKey2)
|
||||
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newServer([]cert.Certificate{ca, ca2}, []cert.Certificate{myCert2}, myPrivKey, m{})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newServer([]cert.Certificate{ca, ca2}, []cert.Certificate{theirCert, theirCert2}, theirPrivKey, m{})
|
||||
|
||||
// Share our underlay information
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet[0].Addr(), theirUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(myVpnIpNet[0].Addr(), myUdpAddr)
|
||||
|
||||
// Start the servers
|
||||
myControl.Start()
|
||||
theirControl.Start()
|
||||
|
||||
r := router.NewR(t, myControl, theirControl)
|
||||
defer r.RenderFlow()
|
||||
|
||||
r.Log("Assert the tunnel between me and them works")
|
||||
//assertTunnel(t, theirVpnIpNet[0].Addr(), myVpnIpNet[0].Addr(), theirControl, myControl, r)
|
||||
//r.Log("yay")
|
||||
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
|
||||
r.Log("yay")
|
||||
//todo ???
|
||||
time.Sleep(1 * time.Second)
|
||||
r.FlushAll()
|
||||
|
||||
waitStart := time.Now()
|
||||
for {
|
||||
assertTunnel(t, myVpnIpNet[0].Addr(), theirVpnIpNet[0].Addr(), myControl, theirControl, r)
|
||||
c := theirControl.GetHostInfoByVpnAddr(myVpnIpNet[0].Addr(), false)
|
||||
c2 := myControl.GetHostInfoByVpnAddr(theirVpnIpNet[0].Addr(), false)
|
||||
if c == nil || c2 == nil {
|
||||
r.Log("nil")
|
||||
} else {
|
||||
version := c.Cert.Version()
|
||||
theirVersion := c2.Cert.Version()
|
||||
r.Logf("version %d,%d", version, theirVersion)
|
||||
if version == theirVersion {
|
||||
break
|
||||
}
|
||||
}
|
||||
since := time.Since(waitStart)
|
||||
if since > time.Second*5 {
|
||||
r.Log("wtf")
|
||||
}
|
||||
if since > time.Second*10 {
|
||||
r.Log("wtf")
|
||||
t.Fatal("Cert should be new by now")
|
||||
}
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||
|
||||
myControl.Stop()
|
||||
theirControl.Stop()
|
||||
}
|
||||
|
||||
func TestCrossStackRelaysWork(t *testing.T) {
|
||||
ca, _, caKey, _ := cert_test.NewTestCaCert(cert.Version2, cert.Curve_CURVE25519, time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
myControl, myVpnIpNet, _, _ := newSimpleServer(cert.Version2, ca, caKey, "me ", "10.128.0.1/24,fc00::1/64", m{"relay": m{"use_relays": true}})
|
||||
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(cert.Version2, ca, caKey, "relay ", "10.128.0.128/24,fc00::128/64", m{"relay": m{"am_relay": true}})
|
||||
theirUdp := netip.MustParseAddrPort("10.0.0.2:4242")
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServerWithUdp(cert.Version2, ca, caKey, "them ", "fc00::2/64", theirUdp, m{"relay": m{"use_relays": true}})
|
||||
|
||||
//myVpnV4 := myVpnIpNet[0]
|
||||
myVpnV6 := myVpnIpNet[1]
|
||||
relayVpnV4 := relayVpnIpNet[0]
|
||||
relayVpnV6 := relayVpnIpNet[1]
|
||||
theirVpnV6 := theirVpnIpNet[0]
|
||||
|
||||
// Teach my how to get to the relay and that their can be reached via the relay
|
||||
myControl.InjectLightHouseAddr(relayVpnV4.Addr(), relayUdpAddr)
|
||||
myControl.InjectLightHouseAddr(relayVpnV6.Addr(), relayUdpAddr)
|
||||
myControl.InjectRelays(theirVpnV6.Addr(), []netip.Addr{relayVpnV6.Addr()})
|
||||
relayControl.InjectLightHouseAddr(theirVpnV6.Addr(), theirUdpAddr)
|
||||
|
||||
// Build a router so we don't have to reason who gets which packet
|
||||
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||
defer r.RenderFlow()
|
||||
|
||||
// Start the servers
|
||||
myControl.Start()
|
||||
relayControl.Start()
|
||||
theirControl.Start()
|
||||
|
||||
t.Log("Trigger a handshake from me to them via the relay")
|
||||
myControl.InjectTunUDPPacket(theirVpnV6.Addr(), 80, myVpnV6.Addr(), 80, []byte("Hi from me"))
|
||||
|
||||
p := r.RouteForAllUntilTxTun(theirControl)
|
||||
r.Log("Assert the tunnel works")
|
||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnV6.Addr(), theirVpnV6.Addr(), 80, 80)
|
||||
|
||||
t.Log("reply?")
|
||||
theirControl.InjectTunUDPPacket(myVpnV6.Addr(), 80, theirVpnV6.Addr(), 80, []byte("Hi from them"))
|
||||
p = r.RouteForAllUntilTxTun(myControl)
|
||||
assertUdpPacket(t, []byte("Hi from them"), p, theirVpnV6.Addr(), myVpnV6.Addr(), 80, 80)
|
||||
|
||||
r.RenderHostmaps("Final hostmaps", myControl, relayControl, theirControl)
|
||||
//t.Log("finish up")
|
||||
//myControl.Stop()
|
||||
//theirControl.Stop()
|
||||
//relayControl.Stop()
|
||||
}
|
||||
@@ -13,11 +13,43 @@ pki:
|
||||
# disconnect_invalid is a toggle to force a client to be disconnected if the certificate is expired or invalid.
|
||||
#disconnect_invalid: true
|
||||
|
||||
# initiating_version controls which certificate version is used when initiating handshakes.
|
||||
# default_version controls which certificate version is used in handshakes.
|
||||
# This setting only applies if both a v1 and a v2 certificate are configured, in which case it will default to `1`.
|
||||
# Once all hosts in the mesh are configured with both a v1 and v2 certificate then this should be changed to `2`.
|
||||
# After all hosts in the mesh are using a v2 certificate then v1 certificates are no longer needed.
|
||||
# initiating_version: 1
|
||||
# default_version: 1
|
||||
|
||||
# psk can be used to mask the contents of handshakes.
|
||||
psk:
|
||||
# `mode` defines how the pre shared keys can be used in a handshake.
|
||||
# `accepting` (the default) will initiate handshakes using an empty key and will try to use any keys provided when
|
||||
# receiving handshakes, including an empty key.
|
||||
# `sending` will initiate handshakes with the first key provided and will try to use any keys provided when
|
||||
# receiving handshakes, including an empty key.
|
||||
# `enforced` will initiate handshakes with the first psk key provided and will try to use any keys provided when
|
||||
# responding to handshakes. An empty key will not be allowed.
|
||||
#
|
||||
# To change a mesh from not using a psk to enforcing psk:
|
||||
# 1. Leave `mode` as `accepting` and configure `psk.keys` to match on all nodes in the mesh and reload.
|
||||
# 2. Change `mode` to `sending` on all nodes in the mesh and reload.
|
||||
# 3. Change `mode` to `enforced` on all nodes in the mesh and reload.
|
||||
#mode: accepting
|
||||
|
||||
# The keys provided are sent through hkdf to ensure the shared secret used in the noise protocol is the
|
||||
# correct byte length.
|
||||
#
|
||||
# Only the first key is used for outbound handshakes but all keys provided will be tried in the order specified, on
|
||||
# incoming handshakes. This is to allow for psk rotation.
|
||||
#
|
||||
# To rotate a primary key:
|
||||
# 1. Put the new key in the 2nd slot on every node in the mesh and reload.
|
||||
# 2. Move the key from the 2nd slot to the 1st slot, the old primary key is now in the 2nd slot, reload.
|
||||
# 3. Remove the old primary key once it is no longer in use on every node in the mesh and reload.
|
||||
#keys:
|
||||
# - shared secret string, this one is used in all outbound handshakes # This is the primary key used when sending handshakes
|
||||
# - this is a fallback key, received handshakes can use this
|
||||
# - another fallback, received handshakes can use this one too
|
||||
# - "\x68\x65\x6c\x6c\x6f\x20\x66\x72\x69\x65\x6e\x64\x73" # for raw bytes if you desire
|
||||
|
||||
# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
|
||||
# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
|
||||
@@ -239,28 +271,7 @@ tun:
|
||||
|
||||
# Unsafe routes allows you to route traffic over nebula to non-nebula nodes
|
||||
# Unsafe routes should be avoided unless you have hosts/services that cannot run nebula
|
||||
# Supports weighted ECMP if you define a list of gateways, this can be used for load balancing or redundancy to hosts outside of nebula
|
||||
# NOTES:
|
||||
# * You will only see a single gateway in the routing table if you are not on linux
|
||||
# * If a gateway is not reachable through the overlay another gateway will be selected to send the traffic through, ignoring weights
|
||||
#
|
||||
# unsafe_routes:
|
||||
# # Multiple gateways without defining a weight defaults to a weight of 1, this will balance traffic equally between the three gateways
|
||||
# - route: 192.168.87.0/24
|
||||
# via:
|
||||
# - gateway: 10.0.0.1
|
||||
# - gateway: 10.0.0.2
|
||||
# - gateway: 10.0.0.3
|
||||
# # Multiple gateways with a weight, this will balance traffic accordingly
|
||||
# - route: 192.168.87.0/24
|
||||
# via:
|
||||
# - gateway: 10.0.0.1
|
||||
# weight: 10
|
||||
# - gateway: 10.0.0.2
|
||||
# weight: 5
|
||||
#
|
||||
# NOTE: The nebula certificate of the "via" node(s) *MUST* have the "route" defined as a subnet in its certificate
|
||||
# `via`: single node or list of gateways to use for this route
|
||||
# NOTE: The nebula certificate of the "via" node *MUST* have the "route" defined as a subnet in its certificate
|
||||
# `mtu`: will default to tun mtu if this option is not specified
|
||||
# `metric`: will default to 0 if this option is not specified
|
||||
# `install`: will default to true, controls whether this route is installed in the systems routing table.
|
||||
@@ -275,10 +286,6 @@ tun:
|
||||
# On linux only, set to true to manage unsafe routes directly on the system route table with gateway routes instead of
|
||||
# in nebula configuration files. Default false, not reloadable.
|
||||
#use_system_route_table: false
|
||||
# Buffer size for reading routes updates. 0 means default system buffer size. (/proc/sys/net/core/rmem_default).
|
||||
# If using massive routes updates, for example BGP, you may need to increase this value to avoid packet loss.
|
||||
# SO_RCVBUFFORCE is used to avoid having to raise the system wide max
|
||||
#use_system_route_table_buffer_size: 0
|
||||
|
||||
# Configure logging level
|
||||
logging:
|
||||
@@ -338,19 +345,6 @@ logging:
|
||||
# after receiving the response for lighthouse queries
|
||||
#trigger_buffer: 64
|
||||
|
||||
# Tunnel manager settings
|
||||
#tunnels:
|
||||
# drop_inactive controls whether inactive tunnels are maintained or dropped after the inactive_timeout period has
|
||||
# elapsed.
|
||||
# In general, it is a good idea to enable this setting. It will be enabled by default in a future release.
|
||||
# This setting is reloadable
|
||||
#drop_inactive: false
|
||||
|
||||
# inactivity_timeout controls how long a tunnel MUST NOT see any inbound or outbound traffic before being considered
|
||||
# inactive and eligible to be dropped.
|
||||
# This setting is reloadable
|
||||
#inactivity_timeout: 10m
|
||||
|
||||
# Nebula security group configuration
|
||||
firewall:
|
||||
# Action to take when a packet is not allowed by the firewall rules.
|
||||
@@ -362,11 +356,11 @@ firewall:
|
||||
outbound_action: drop
|
||||
inbound_action: drop
|
||||
|
||||
# THIS FLAG IS DEPRECATED AND WILL BE REMOVED IN A FUTURE RELEASE. (Defaults to false.)
|
||||
# This setting only affects nebula hosts exposing unsafe_routes. When set to false, each inbound rule must contain a
|
||||
# `local_cidr` if the intention is to allow traffic to flow to an unsafe route. When set to true, every firewall rule
|
||||
# will apply to all configured unsafe_routes regardless of the actual destination of the packet, unless `local_cidr`
|
||||
# is explicitly defined. This is usually not the desired behavior and should be avoided!
|
||||
# Controls the default value for local_cidr. Default is true, will be deprecated after v1.9 and defaulted to false.
|
||||
# This setting only affects nebula hosts with subnets encoded in their certificate. A nebula host acting as an
|
||||
# unsafe router with `default_local_cidr_any: true` will expose their unsafe routes to every inbound rule regardless
|
||||
# of the actual destination for the packet. Setting this to false requires each inbound rule to contain a `local_cidr`
|
||||
# if the intention is to allow traffic to flow to an unsafe route.
|
||||
#default_local_cidr_any: false
|
||||
|
||||
conntrack:
|
||||
@@ -384,9 +378,11 @@ firewall:
|
||||
# group: `any` or a literal group name, ie `default-group`
|
||||
# groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
|
||||
# cidr: a remote CIDR, `0.0.0.0/0` is any ipv4 and `::/0` is any ipv6.
|
||||
# local_cidr: a local CIDR, `0.0.0.0/0` is any ipv4 and `::/0` is any ipv6. This can be used to filter destinations when using unsafe_routes.
|
||||
# By default, this is set to only the VPN (overlay) networks assigned via the certificate networks field unless `default_local_cidr_any` is set to true.
|
||||
# If there are unsafe_routes present in this config file, `local_cidr` should be set appropriately for the intended us case.
|
||||
# local_cidr: a local CIDR, `0.0.0.0/0` is any ipv4 and `::/0` is any ipv6. This could be used to filter destinations when using unsafe_routes.
|
||||
# If no unsafe networks are present in the certificate(s) or `default_local_cidr_any` is true then the default is any ipv4 or ipv6 network.
|
||||
# Otherwise the default is any vpn network assigned to via the certificate.
|
||||
# `default_local_cidr_any` defaults to false and is deprecated, it will be removed in a future release.
|
||||
# If there are unsafe routes present its best to set `local_cidr` to whatever best fits the situation.
|
||||
# ca_name: An issuing CA name
|
||||
# ca_sha: An issuing CA shasum
|
||||
|
||||
|
||||
@@ -5,12 +5,8 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/overlay"
|
||||
"github.com/slackhq/nebula/service"
|
||||
)
|
||||
|
||||
@@ -63,16 +59,7 @@ pki:
|
||||
if err := cfg.LoadString(configStr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger := logrus.New()
|
||||
logger.Out = os.Stdout
|
||||
|
||||
ctrl, err := nebula.Main(&cfg, false, "custom-app", logger, overlay.NewUserDeviceFromConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
svc, err := service.New(ctrl)
|
||||
svc, err := service.New(&cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
94
firewall.go
94
firewall.go
@@ -53,7 +53,7 @@ type Firewall struct {
|
||||
|
||||
// routableNetworks describes the vpn addresses as well as any unsafe networks issued to us in the certificate.
|
||||
// The vpn addresses are a full bit match while the unsafe networks only match the prefix
|
||||
routableNetworks *bart.Lite
|
||||
routableNetworks *bart.Table[struct{}]
|
||||
|
||||
// assignedNetworks is a list of vpn networks assigned to us in the certificate.
|
||||
assignedNetworks []netip.Prefix
|
||||
@@ -125,7 +125,7 @@ type firewallPort map[int32]*FirewallCA
|
||||
|
||||
type firewallLocalCIDR struct {
|
||||
Any bool
|
||||
LocalCIDR *bart.Lite
|
||||
LocalCIDR *bart.Table[struct{}]
|
||||
}
|
||||
|
||||
// NewFirewall creates a new Firewall object. A TimerWheel is created for you from the provided timeouts.
|
||||
@@ -148,17 +148,17 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D
|
||||
tmax = defaultTimeout
|
||||
}
|
||||
|
||||
routableNetworks := new(bart.Lite)
|
||||
routableNetworks := new(bart.Table[struct{}])
|
||||
var assignedNetworks []netip.Prefix
|
||||
for _, network := range c.Networks() {
|
||||
nprefix := netip.PrefixFrom(network.Addr(), network.Addr().BitLen())
|
||||
routableNetworks.Insert(nprefix)
|
||||
routableNetworks.Insert(nprefix, struct{}{})
|
||||
assignedNetworks = append(assignedNetworks, network)
|
||||
}
|
||||
|
||||
hasUnsafeNetworks := false
|
||||
for _, n := range c.UnsafeNetworks() {
|
||||
routableNetworks.Insert(n)
|
||||
routableNetworks.Insert(n, struct{}{})
|
||||
hasUnsafeNetworks = true
|
||||
}
|
||||
|
||||
@@ -331,7 +331,7 @@ func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw
|
||||
return nil
|
||||
}
|
||||
|
||||
rs, ok := r.([]any)
|
||||
rs, ok := r.([]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("%s failed to parse, should be an array of rules", table)
|
||||
}
|
||||
@@ -417,49 +417,36 @@ func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw
|
||||
return nil
|
||||
}
|
||||
|
||||
var ErrUnknownNetworkType = errors.New("unknown network type")
|
||||
var ErrPeerRejected = errors.New("remote address is not within a network that we handle")
|
||||
var ErrInvalidRemoteIP = errors.New("remote address is not in remote certificate networks")
|
||||
var ErrInvalidLocalIP = errors.New("local address is not in list of handled local addresses")
|
||||
var ErrInvalidRemoteIP = errors.New("remote IP is not in remote certificate subnets")
|
||||
var ErrInvalidLocalIP = errors.New("local IP is not in list of handled local IPs")
|
||||
var ErrNoMatchingRule = errors.New("no matching rule in firewall table")
|
||||
|
||||
// Drop returns an error if the packet should be dropped, explaining why. It
|
||||
// returns nil if the packet should not be dropped.
|
||||
func (f *Firewall) Drop(fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.CAPool, localCache firewall.ConntrackCache, now time.Time) error {
|
||||
func (f *Firewall) Drop(fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.CAPool, localCache firewall.ConntrackCache) error {
|
||||
// Check if we spoke to this tuple, if we did then allow this packet
|
||||
if f.inConns(fp, h, caPool, localCache, now) {
|
||||
if f.inConns(fp, h, caPool, localCache) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make sure remote address matches nebula certificate, and determine how to treat it
|
||||
if h.networks == nil {
|
||||
// Make sure remote address matches nebula certificate
|
||||
if h.networks != nil {
|
||||
_, ok := h.networks.Lookup(fp.RemoteAddr)
|
||||
if !ok {
|
||||
f.metrics(incoming).droppedRemoteAddr.Inc(1)
|
||||
return ErrInvalidRemoteIP
|
||||
}
|
||||
} else {
|
||||
// Simple case: Certificate has one address and no unsafe networks
|
||||
if h.vpnAddrs[0] != fp.RemoteAddr {
|
||||
f.metrics(incoming).droppedRemoteAddr.Inc(1)
|
||||
return ErrInvalidRemoteIP
|
||||
}
|
||||
} else {
|
||||
nwType, ok := h.networks.Lookup(fp.RemoteAddr)
|
||||
if !ok {
|
||||
f.metrics(incoming).droppedRemoteAddr.Inc(1)
|
||||
return ErrInvalidRemoteIP
|
||||
}
|
||||
switch nwType {
|
||||
case NetworkTypeVPN:
|
||||
break // nothing special
|
||||
case NetworkTypeVPNPeer:
|
||||
f.metrics(incoming).droppedRemoteAddr.Inc(1)
|
||||
return ErrPeerRejected // reject for now, one day this may have different FW rules
|
||||
case NetworkTypeUnsafe:
|
||||
break // nothing special, one day this may have different FW rules
|
||||
default:
|
||||
f.metrics(incoming).droppedRemoteAddr.Inc(1)
|
||||
return ErrUnknownNetworkType //should never happen
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure we are supposed to be handling this local ip address
|
||||
if !f.routableNetworks.Contains(fp.LocalAddr) {
|
||||
_, ok := f.routableNetworks.Lookup(fp.LocalAddr)
|
||||
if !ok {
|
||||
f.metrics(incoming).droppedLocalAddr.Inc(1)
|
||||
return ErrInvalidLocalIP
|
||||
}
|
||||
@@ -476,7 +463,7 @@ func (f *Firewall) Drop(fp firewall.Packet, incoming bool, h *HostInfo, caPool *
|
||||
}
|
||||
|
||||
// We always want to conntrack since it is a faster operation
|
||||
f.addConn(fp, incoming, now)
|
||||
f.addConn(fp, incoming)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -505,7 +492,7 @@ func (f *Firewall) EmitStats() {
|
||||
metrics.GetOrRegisterGauge("firewall.rules.hash", nil).Update(int64(f.GetRuleHashFNV()))
|
||||
}
|
||||
|
||||
func (f *Firewall) inConns(fp firewall.Packet, h *HostInfo, caPool *cert.CAPool, localCache firewall.ConntrackCache, now time.Time) bool {
|
||||
func (f *Firewall) inConns(fp firewall.Packet, h *HostInfo, caPool *cert.CAPool, localCache firewall.ConntrackCache) bool {
|
||||
if localCache != nil {
|
||||
if _, ok := localCache[fp]; ok {
|
||||
return true
|
||||
@@ -517,7 +504,7 @@ func (f *Firewall) inConns(fp firewall.Packet, h *HostInfo, caPool *cert.CAPool,
|
||||
// Purge every time we test
|
||||
ep, has := conntrack.TimerWheel.Purge()
|
||||
if has {
|
||||
f.evict(ep, now)
|
||||
f.evict(ep)
|
||||
}
|
||||
|
||||
c, ok := conntrack.Conns[fp]
|
||||
@@ -564,11 +551,11 @@ func (f *Firewall) inConns(fp firewall.Packet, h *HostInfo, caPool *cert.CAPool,
|
||||
|
||||
switch fp.Protocol {
|
||||
case firewall.ProtoTCP:
|
||||
c.Expires = now.Add(f.TCPTimeout)
|
||||
c.Expires = time.Now().Add(f.TCPTimeout)
|
||||
case firewall.ProtoUDP:
|
||||
c.Expires = now.Add(f.UDPTimeout)
|
||||
c.Expires = time.Now().Add(f.UDPTimeout)
|
||||
default:
|
||||
c.Expires = now.Add(f.DefaultTimeout)
|
||||
c.Expires = time.Now().Add(f.DefaultTimeout)
|
||||
}
|
||||
|
||||
conntrack.Unlock()
|
||||
@@ -580,7 +567,7 @@ func (f *Firewall) inConns(fp firewall.Packet, h *HostInfo, caPool *cert.CAPool,
|
||||
return true
|
||||
}
|
||||
|
||||
func (f *Firewall) addConn(fp firewall.Packet, incoming bool, now time.Time) {
|
||||
func (f *Firewall) addConn(fp firewall.Packet, incoming bool) {
|
||||
var timeout time.Duration
|
||||
c := &conn{}
|
||||
|
||||
@@ -596,7 +583,7 @@ func (f *Firewall) addConn(fp firewall.Packet, incoming bool, now time.Time) {
|
||||
conntrack := f.Conntrack
|
||||
conntrack.Lock()
|
||||
if _, ok := conntrack.Conns[fp]; !ok {
|
||||
conntrack.TimerWheel.Advance(now)
|
||||
conntrack.TimerWheel.Advance(time.Now())
|
||||
conntrack.TimerWheel.Add(fp, timeout)
|
||||
}
|
||||
|
||||
@@ -604,14 +591,14 @@ func (f *Firewall) addConn(fp firewall.Packet, incoming bool, now time.Time) {
|
||||
// firewall reload
|
||||
c.incoming = incoming
|
||||
c.rulesVersion = f.rulesVersion
|
||||
c.Expires = now.Add(timeout)
|
||||
c.Expires = time.Now().Add(timeout)
|
||||
conntrack.Conns[fp] = c
|
||||
conntrack.Unlock()
|
||||
}
|
||||
|
||||
// Evict checks if a conntrack entry has expired, if so it is removed, if not it is re-added to the wheel
|
||||
// Caller must own the connMutex lock!
|
||||
func (f *Firewall) evict(p firewall.Packet, now time.Time) {
|
||||
func (f *Firewall) evict(p firewall.Packet) {
|
||||
// Are we still tracking this conn?
|
||||
conntrack := f.Conntrack
|
||||
t, ok := conntrack.Conns[p]
|
||||
@@ -619,11 +606,11 @@ func (f *Firewall) evict(p firewall.Packet, now time.Time) {
|
||||
return
|
||||
}
|
||||
|
||||
newT := t.Expires.Sub(now)
|
||||
newT := t.Expires.Sub(time.Now())
|
||||
|
||||
// Timeout is in the future, re-add the timer
|
||||
if newT > 0 {
|
||||
conntrack.TimerWheel.Advance(now)
|
||||
conntrack.TimerWheel.Advance(time.Now())
|
||||
conntrack.TimerWheel.Add(p, newT)
|
||||
return
|
||||
}
|
||||
@@ -765,7 +752,7 @@ func (fc *FirewallCA) match(p firewall.Packet, c *cert.CachedCertificate, caPool
|
||||
func (fr *FirewallRule) addRule(f *Firewall, groups []string, host string, ip, localCIDR netip.Prefix) error {
|
||||
flc := func() *firewallLocalCIDR {
|
||||
return &firewallLocalCIDR{
|
||||
LocalCIDR: new(bart.Lite),
|
||||
LocalCIDR: new(bart.Table[struct{}]),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -892,7 +879,7 @@ func (flc *firewallLocalCIDR) addRule(f *Firewall, localIp netip.Prefix) error {
|
||||
}
|
||||
|
||||
for _, network := range f.assignedNetworks {
|
||||
flc.LocalCIDR.Insert(network)
|
||||
flc.LocalCIDR.Insert(network, struct{}{})
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -901,7 +888,7 @@ func (flc *firewallLocalCIDR) addRule(f *Firewall, localIp netip.Prefix) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
flc.LocalCIDR.Insert(localIp)
|
||||
flc.LocalCIDR.Insert(localIp, struct{}{})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -914,7 +901,8 @@ func (flc *firewallLocalCIDR) match(p firewall.Packet, c *cert.CachedCertificate
|
||||
return true
|
||||
}
|
||||
|
||||
return flc.LocalCIDR.Contains(p.LocalAddr)
|
||||
_, ok := flc.LocalCIDR.Lookup(p.LocalAddr)
|
||||
return ok
|
||||
}
|
||||
|
||||
type rule struct {
|
||||
@@ -930,15 +918,15 @@ type rule struct {
|
||||
CASha string
|
||||
}
|
||||
|
||||
func convertRule(l *logrus.Logger, p any, table string, i int) (rule, error) {
|
||||
func convertRule(l *logrus.Logger, p interface{}, table string, i int) (rule, error) {
|
||||
r := rule{}
|
||||
|
||||
m, ok := p.(map[string]any)
|
||||
m, ok := p.(map[interface{}]interface{})
|
||||
if !ok {
|
||||
return r, errors.New("could not parse rule")
|
||||
}
|
||||
|
||||
toString := func(k string, m map[string]any) string {
|
||||
toString := func(k string, m map[interface{}]interface{}) string {
|
||||
v, ok := m[k]
|
||||
if !ok {
|
||||
return ""
|
||||
@@ -956,7 +944,7 @@ func convertRule(l *logrus.Logger, p any, table string, i int) (rule, error) {
|
||||
r.CASha = toString("ca_sha", m)
|
||||
|
||||
// Make sure group isn't an array
|
||||
if v, ok := m["group"].([]any); ok {
|
||||
if v, ok := m["group"].([]interface{}); ok {
|
||||
if len(v) > 1 {
|
||||
return r, errors.New("group should contain a single value, an array with more than one entry was provided")
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"net/netip"
|
||||
)
|
||||
|
||||
type m = map[string]any
|
||||
type m map[string]interface{}
|
||||
|
||||
const (
|
||||
ProtoAny = 0 // When we want to handle HOPOPT (0) we can change this, if ever
|
||||
|
||||
492
firewall_test.go
492
firewall_test.go
@@ -8,8 +8,6 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gaissmai/bart"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/firewall"
|
||||
@@ -70,9 +68,6 @@ func TestFirewall_AddRule(t *testing.T) {
|
||||
ti, err := netip.ParsePrefix("1.2.3.4/32")
|
||||
require.NoError(t, err)
|
||||
|
||||
ti6, err := netip.ParsePrefix("fd12::34/128")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
// An empty rule is any
|
||||
assert.True(t, fw.InRules.TCP[1].Any.Any.Any)
|
||||
@@ -97,24 +92,12 @@ func TestFirewall_AddRule(t *testing.T) {
|
||||
_, ok := fw.OutRules.AnyProto[1].Any.CIDR.Get(ti)
|
||||
assert.True(t, ok)
|
||||
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
require.NoError(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", ti6, netip.Prefix{}, "", ""))
|
||||
assert.Nil(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||
_, ok = fw.OutRules.AnyProto[1].Any.CIDR.Get(ti6)
|
||||
assert.True(t, ok)
|
||||
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
require.NoError(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", netip.Prefix{}, ti, "", ""))
|
||||
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||
_, ok = fw.OutRules.AnyProto[1].Any.Any.LocalCIDR.Get(ti)
|
||||
assert.True(t, ok)
|
||||
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
require.NoError(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", netip.Prefix{}, ti6, "", ""))
|
||||
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||
_, ok = fw.OutRules.AnyProto[1].Any.Any.LocalCIDR.Get(ti6)
|
||||
assert.True(t, ok)
|
||||
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
require.NoError(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", netip.Prefix{}, netip.Prefix{}, "ca-name", ""))
|
||||
assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name")
|
||||
@@ -134,13 +117,6 @@ func TestFirewall_AddRule(t *testing.T) {
|
||||
require.NoError(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, netip.Prefix{}, "", ""))
|
||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any.Any)
|
||||
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
anyIp6, err := netip.ParsePrefix("::/0")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp6, netip.Prefix{}, "", ""))
|
||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any.Any)
|
||||
|
||||
// Test error conditions
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
require.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
@@ -151,8 +127,7 @@ func TestFirewall_Drop(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
ob := &bytes.Buffer{}
|
||||
l.SetOutput(ob)
|
||||
myVpnNetworksTable := new(bart.Lite)
|
||||
myVpnNetworksTable.Insert(netip.MustParsePrefix("1.1.1.1/8"))
|
||||
|
||||
p := firewall.Packet{
|
||||
LocalAddr: netip.MustParseAddr("1.2.3.4"),
|
||||
RemoteAddr: netip.MustParseAddr("1.2.3.4"),
|
||||
@@ -177,7 +152,7 @@ func TestFirewall_Drop(t *testing.T) {
|
||||
},
|
||||
vpnAddrs: []netip.Addr{netip.MustParseAddr("1.2.3.4")},
|
||||
}
|
||||
h.buildNetworks(myVpnNetworksTable, &c)
|
||||
h.buildNetworks(c.networks, c.unsafeNetworks)
|
||||
|
||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
@@ -224,85 +199,6 @@ func TestFirewall_Drop(t *testing.T) {
|
||||
require.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||
}
|
||||
|
||||
func TestFirewall_DropV6(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
ob := &bytes.Buffer{}
|
||||
l.SetOutput(ob)
|
||||
|
||||
myVpnNetworksTable := new(bart.Lite)
|
||||
myVpnNetworksTable.Insert(netip.MustParsePrefix("fd00::/7"))
|
||||
|
||||
p := firewall.Packet{
|
||||
LocalAddr: netip.MustParseAddr("fd12::34"),
|
||||
RemoteAddr: netip.MustParseAddr("fd12::34"),
|
||||
LocalPort: 10,
|
||||
RemotePort: 90,
|
||||
Protocol: firewall.ProtoUDP,
|
||||
Fragment: false,
|
||||
}
|
||||
|
||||
c := dummyCert{
|
||||
name: "host1",
|
||||
networks: []netip.Prefix{netip.MustParsePrefix("fd12::34/120")},
|
||||
groups: []string{"default-group"},
|
||||
issuer: "signer-shasum",
|
||||
}
|
||||
h := HostInfo{
|
||||
ConnectionState: &ConnectionState{
|
||||
peerCert: &cert.CachedCertificate{
|
||||
Certificate: &c,
|
||||
InvertedGroups: map[string]struct{}{"default-group": {}},
|
||||
},
|
||||
},
|
||||
vpnAddrs: []netip.Addr{netip.MustParseAddr("fd12::34")},
|
||||
}
|
||||
h.buildNetworks(myVpnNetworksTable, &c)
|
||||
|
||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
cp := cert.NewCAPool()
|
||||
|
||||
// Drop outbound
|
||||
assert.Equal(t, ErrNoMatchingRule, fw.Drop(p, false, &h, cp, nil))
|
||||
// Allow inbound
|
||||
resetConntrack(fw)
|
||||
require.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||
// Allow outbound because conntrack
|
||||
require.NoError(t, fw.Drop(p, false, &h, cp, nil))
|
||||
|
||||
// test remote mismatch
|
||||
oldRemote := p.RemoteAddr
|
||||
p.RemoteAddr = netip.MustParseAddr("fd12::56")
|
||||
assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrInvalidRemoteIP)
|
||||
p.RemoteAddr = oldRemote
|
||||
|
||||
// ensure signer doesn't get in the way of group checks
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum"))
|
||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum-bad"))
|
||||
assert.Equal(t, fw.Drop(p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||
|
||||
// test caSha doesn't drop on match
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum-bad"))
|
||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum"))
|
||||
require.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||
|
||||
// ensure ca name doesn't get in the way of group checks
|
||||
cp.CAs["signer-shasum"] = &cert.CachedCertificate{Certificate: &dummyCert{name: "ca-good"}}
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good", ""))
|
||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good-bad", ""))
|
||||
assert.Equal(t, fw.Drop(p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||
|
||||
// test caName doesn't drop on match
|
||||
cp.CAs["signer-shasum"] = &cert.CachedCertificate{Certificate: &dummyCert{name: "ca-good"}}
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good-bad", ""))
|
||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good", ""))
|
||||
require.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||
}
|
||||
|
||||
func BenchmarkFirewallTable_match(b *testing.B) {
|
||||
f := &Firewall{}
|
||||
ft := FirewallTable{
|
||||
@@ -312,10 +208,6 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
||||
pfix := netip.MustParsePrefix("172.1.1.1/32")
|
||||
_ = ft.TCP.addRule(f, 10, 10, []string{"good-group"}, "good-host", pfix, netip.Prefix{}, "", "")
|
||||
_ = ft.TCP.addRule(f, 100, 100, []string{"good-group"}, "good-host", netip.Prefix{}, pfix, "", "")
|
||||
|
||||
pfix6 := netip.MustParsePrefix("fd11::11/128")
|
||||
_ = ft.TCP.addRule(f, 10, 10, []string{"good-group"}, "good-host", pfix6, netip.Prefix{}, "", "")
|
||||
_ = ft.TCP.addRule(f, 100, 100, []string{"good-group"}, "good-host", netip.Prefix{}, pfix6, "", "")
|
||||
cp := cert.NewCAPool()
|
||||
|
||||
b.Run("fail on proto", func(b *testing.B) {
|
||||
@@ -347,15 +239,6 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalAddr: ip.Addr()}, true, c, cp))
|
||||
}
|
||||
})
|
||||
b.Run("pass proto, port, fail on local CIDRv6", func(b *testing.B) {
|
||||
c := &cert.CachedCertificate{
|
||||
Certificate: &dummyCert{},
|
||||
}
|
||||
ip := netip.MustParsePrefix("fd99::99/128")
|
||||
for n := 0; n < b.N; n++ {
|
||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalAddr: ip.Addr()}, true, c, cp))
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("pass proto, port, any local CIDR, fail all group, name, and cidr", func(b *testing.B) {
|
||||
c := &cert.CachedCertificate{
|
||||
@@ -369,18 +252,6 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp))
|
||||
}
|
||||
})
|
||||
b.Run("pass proto, port, any local CIDRv6, fail all group, name, and cidr", func(b *testing.B) {
|
||||
c := &cert.CachedCertificate{
|
||||
Certificate: &dummyCert{
|
||||
name: "nope",
|
||||
networks: []netip.Prefix{netip.MustParsePrefix("fd99::99/128")},
|
||||
},
|
||||
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp))
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("pass proto, port, specific local CIDR, fail all group, name, and cidr", func(b *testing.B) {
|
||||
c := &cert.CachedCertificate{
|
||||
@@ -394,18 +265,6 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalAddr: pfix.Addr()}, true, c, cp))
|
||||
}
|
||||
})
|
||||
b.Run("pass proto, port, specific local CIDRv6, fail all group, name, and cidr", func(b *testing.B) {
|
||||
c := &cert.CachedCertificate{
|
||||
Certificate: &dummyCert{
|
||||
name: "nope",
|
||||
networks: []netip.Prefix{netip.MustParsePrefix("fd99::99/128")},
|
||||
},
|
||||
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalAddr: pfix6.Addr()}, true, c, cp))
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("pass on group on any local cidr", func(b *testing.B) {
|
||||
c := &cert.CachedCertificate{
|
||||
@@ -430,17 +289,6 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
||||
assert.True(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalAddr: pfix.Addr()}, true, c, cp))
|
||||
}
|
||||
})
|
||||
b.Run("pass on group on specific local cidr6", func(b *testing.B) {
|
||||
c := &cert.CachedCertificate{
|
||||
Certificate: &dummyCert{
|
||||
name: "nope",
|
||||
},
|
||||
InvertedGroups: map[string]struct{}{"good-group": {}},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
assert.True(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalAddr: pfix6.Addr()}, true, c, cp))
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("pass on name", func(b *testing.B) {
|
||||
c := &cert.CachedCertificate{
|
||||
@@ -459,8 +307,6 @@ func TestFirewall_Drop2(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
ob := &bytes.Buffer{}
|
||||
l.SetOutput(ob)
|
||||
myVpnNetworksTable := new(bart.Lite)
|
||||
myVpnNetworksTable.Insert(netip.MustParsePrefix("1.1.1.1/8"))
|
||||
|
||||
p := firewall.Packet{
|
||||
LocalAddr: netip.MustParseAddr("1.2.3.4"),
|
||||
@@ -486,7 +332,7 @@ func TestFirewall_Drop2(t *testing.T) {
|
||||
},
|
||||
vpnAddrs: []netip.Addr{network.Addr()},
|
||||
}
|
||||
h.buildNetworks(myVpnNetworksTable, c.Certificate)
|
||||
h.buildNetworks(c.Certificate.Networks(), c.Certificate.UnsafeNetworks())
|
||||
|
||||
c1 := cert.CachedCertificate{
|
||||
Certificate: &dummyCert{
|
||||
@@ -501,7 +347,7 @@ func TestFirewall_Drop2(t *testing.T) {
|
||||
peerCert: &c1,
|
||||
},
|
||||
}
|
||||
h1.buildNetworks(myVpnNetworksTable, c1.Certificate)
|
||||
h1.buildNetworks(c1.Certificate.Networks(), c1.Certificate.UnsafeNetworks())
|
||||
|
||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c.Certificate)
|
||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group", "test-group"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
@@ -518,8 +364,6 @@ func TestFirewall_Drop3(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
ob := &bytes.Buffer{}
|
||||
l.SetOutput(ob)
|
||||
myVpnNetworksTable := new(bart.Lite)
|
||||
myVpnNetworksTable.Insert(netip.MustParsePrefix("1.1.1.1/8"))
|
||||
|
||||
p := firewall.Packet{
|
||||
LocalAddr: netip.MustParseAddr("1.2.3.4"),
|
||||
@@ -551,7 +395,7 @@ func TestFirewall_Drop3(t *testing.T) {
|
||||
},
|
||||
vpnAddrs: []netip.Addr{network.Addr()},
|
||||
}
|
||||
h1.buildNetworks(myVpnNetworksTable, c1.Certificate)
|
||||
h1.buildNetworks(c1.Certificate.Networks(), c1.Certificate.UnsafeNetworks())
|
||||
|
||||
c2 := cert.CachedCertificate{
|
||||
Certificate: &dummyCert{
|
||||
@@ -566,7 +410,7 @@ func TestFirewall_Drop3(t *testing.T) {
|
||||
},
|
||||
vpnAddrs: []netip.Addr{network.Addr()},
|
||||
}
|
||||
h2.buildNetworks(myVpnNetworksTable, c2.Certificate)
|
||||
h2.buildNetworks(c2.Certificate.Networks(), c2.Certificate.UnsafeNetworks())
|
||||
|
||||
c3 := cert.CachedCertificate{
|
||||
Certificate: &dummyCert{
|
||||
@@ -581,7 +425,7 @@ func TestFirewall_Drop3(t *testing.T) {
|
||||
},
|
||||
vpnAddrs: []netip.Addr{network.Addr()},
|
||||
}
|
||||
h3.buildNetworks(myVpnNetworksTable, c3.Certificate)
|
||||
h3.buildNetworks(c3.Certificate.Networks(), c3.Certificate.UnsafeNetworks())
|
||||
|
||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c.Certificate)
|
||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "host1", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
@@ -603,50 +447,10 @@ func TestFirewall_Drop3(t *testing.T) {
|
||||
require.NoError(t, fw.Drop(p, true, &h1, cp, nil))
|
||||
}
|
||||
|
||||
func TestFirewall_Drop3V6(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
ob := &bytes.Buffer{}
|
||||
l.SetOutput(ob)
|
||||
myVpnNetworksTable := new(bart.Lite)
|
||||
myVpnNetworksTable.Insert(netip.MustParsePrefix("fd00::/7"))
|
||||
|
||||
p := firewall.Packet{
|
||||
LocalAddr: netip.MustParseAddr("fd12::34"),
|
||||
RemoteAddr: netip.MustParseAddr("fd12::34"),
|
||||
LocalPort: 1,
|
||||
RemotePort: 1,
|
||||
Protocol: firewall.ProtoUDP,
|
||||
Fragment: false,
|
||||
}
|
||||
|
||||
network := netip.MustParsePrefix("fd12::34/120")
|
||||
c := cert.CachedCertificate{
|
||||
Certificate: &dummyCert{
|
||||
name: "host-owner",
|
||||
networks: []netip.Prefix{network},
|
||||
},
|
||||
}
|
||||
h := HostInfo{
|
||||
ConnectionState: &ConnectionState{
|
||||
peerCert: &c,
|
||||
},
|
||||
vpnAddrs: []netip.Addr{network.Addr()},
|
||||
}
|
||||
h.buildNetworks(myVpnNetworksTable, c.Certificate)
|
||||
|
||||
// Test a remote address match
|
||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c.Certificate)
|
||||
cp := cert.NewCAPool()
|
||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", netip.MustParsePrefix("fd12::34/120"), netip.Prefix{}, "", ""))
|
||||
require.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||
}
|
||||
|
||||
func TestFirewall_DropConntrackReload(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
ob := &bytes.Buffer{}
|
||||
l.SetOutput(ob)
|
||||
myVpnNetworksTable := new(bart.Lite)
|
||||
myVpnNetworksTable.Insert(netip.MustParsePrefix("1.1.1.1/8"))
|
||||
|
||||
p := firewall.Packet{
|
||||
LocalAddr: netip.MustParseAddr("1.2.3.4"),
|
||||
@@ -673,7 +477,7 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
|
||||
},
|
||||
vpnAddrs: []netip.Addr{network.Addr()},
|
||||
}
|
||||
h.buildNetworks(myVpnNetworksTable, c.Certificate)
|
||||
h.buildNetworks(c.Certificate.Networks(), c.Certificate.UnsafeNetworks())
|
||||
|
||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c.Certificate)
|
||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
@@ -706,52 +510,6 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
|
||||
assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrNoMatchingRule)
|
||||
}
|
||||
|
||||
func TestFirewall_DropIPSpoofing(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
ob := &bytes.Buffer{}
|
||||
l.SetOutput(ob)
|
||||
myVpnNetworksTable := new(bart.Lite)
|
||||
myVpnNetworksTable.Insert(netip.MustParsePrefix("192.0.2.1/24"))
|
||||
|
||||
c := cert.CachedCertificate{
|
||||
Certificate: &dummyCert{
|
||||
name: "host-owner",
|
||||
networks: []netip.Prefix{netip.MustParsePrefix("192.0.2.1/24")},
|
||||
},
|
||||
}
|
||||
|
||||
c1 := cert.CachedCertificate{
|
||||
Certificate: &dummyCert{
|
||||
name: "host",
|
||||
networks: []netip.Prefix{netip.MustParsePrefix("192.0.2.2/24")},
|
||||
unsafeNetworks: []netip.Prefix{netip.MustParsePrefix("198.51.100.0/24")},
|
||||
},
|
||||
}
|
||||
h1 := HostInfo{
|
||||
ConnectionState: &ConnectionState{
|
||||
peerCert: &c1,
|
||||
},
|
||||
vpnAddrs: []netip.Addr{c1.Certificate.Networks()[0].Addr()},
|
||||
}
|
||||
h1.buildNetworks(myVpnNetworksTable, c1.Certificate)
|
||||
|
||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, c.Certificate)
|
||||
|
||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
cp := cert.NewCAPool()
|
||||
|
||||
// Packet spoofed by `c1`. Note that the remote addr is not a valid one.
|
||||
p := firewall.Packet{
|
||||
LocalAddr: netip.MustParseAddr("192.0.2.1"),
|
||||
RemoteAddr: netip.MustParseAddr("192.0.2.3"),
|
||||
LocalPort: 1,
|
||||
RemotePort: 1,
|
||||
Protocol: firewall.ProtoUDP,
|
||||
Fragment: false,
|
||||
}
|
||||
assert.Equal(t, fw.Drop(p, true, &h1, cp, nil), ErrInvalidRemoteIP)
|
||||
}
|
||||
|
||||
func BenchmarkLookup(b *testing.B) {
|
||||
ml := func(m map[string]struct{}, a [][]string) {
|
||||
for n := 0; n < b.N; n++ {
|
||||
@@ -873,53 +631,53 @@ func TestNewFirewallFromConfig(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
conf := config.NewC(l)
|
||||
conf.Settings["firewall"] = map[string]any{"outbound": "asdf"}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": "asdf"}
|
||||
_, err = NewFirewallFromConfig(l, cs, conf)
|
||||
require.EqualError(t, err, "firewall.outbound failed to parse, should be an array of rules")
|
||||
|
||||
// Test both port and code
|
||||
conf = config.NewC(l)
|
||||
conf.Settings["firewall"] = map[string]any{"outbound": []any{map[string]any{"port": "1", "code": "2"}}}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "code": "2"}}}
|
||||
_, err = NewFirewallFromConfig(l, cs, conf)
|
||||
require.EqualError(t, err, "firewall.outbound rule #0; only one of port or code should be provided")
|
||||
|
||||
// Test missing host, group, cidr, ca_name and ca_sha
|
||||
conf = config.NewC(l)
|
||||
conf.Settings["firewall"] = map[string]any{"outbound": []any{map[string]any{}}}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{}}}
|
||||
_, err = NewFirewallFromConfig(l, cs, conf)
|
||||
require.EqualError(t, err, "firewall.outbound rule #0; at least one of host, group, cidr, local_cidr, ca_name, or ca_sha must be provided")
|
||||
|
||||
// Test code/port error
|
||||
conf = config.NewC(l)
|
||||
conf.Settings["firewall"] = map[string]any{"outbound": []any{map[string]any{"code": "a", "host": "testh"}}}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "a", "host": "testh"}}}
|
||||
_, err = NewFirewallFromConfig(l, cs, conf)
|
||||
require.EqualError(t, err, "firewall.outbound rule #0; code was not a number; `a`")
|
||||
|
||||
conf.Settings["firewall"] = map[string]any{"outbound": []any{map[string]any{"port": "a", "host": "testh"}}}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "a", "host": "testh"}}}
|
||||
_, err = NewFirewallFromConfig(l, cs, conf)
|
||||
require.EqualError(t, err, "firewall.outbound rule #0; port was not a number; `a`")
|
||||
|
||||
// Test proto error
|
||||
conf = config.NewC(l)
|
||||
conf.Settings["firewall"] = map[string]any{"outbound": []any{map[string]any{"code": "1", "host": "testh"}}}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "host": "testh"}}}
|
||||
_, err = NewFirewallFromConfig(l, cs, conf)
|
||||
require.EqualError(t, err, "firewall.outbound rule #0; proto was not understood; ``")
|
||||
|
||||
// Test cidr parse error
|
||||
conf = config.NewC(l)
|
||||
conf.Settings["firewall"] = map[string]any{"outbound": []any{map[string]any{"code": "1", "cidr": "testh", "proto": "any"}}}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "cidr": "testh", "proto": "any"}}}
|
||||
_, err = NewFirewallFromConfig(l, cs, conf)
|
||||
require.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; netip.ParsePrefix(\"testh\"): no '/'")
|
||||
|
||||
// Test local_cidr parse error
|
||||
conf = config.NewC(l)
|
||||
conf.Settings["firewall"] = map[string]any{"outbound": []any{map[string]any{"code": "1", "local_cidr": "testh", "proto": "any"}}}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "local_cidr": "testh", "proto": "any"}}}
|
||||
_, err = NewFirewallFromConfig(l, cs, conf)
|
||||
require.EqualError(t, err, "firewall.outbound rule #0; local_cidr did not parse; netip.ParsePrefix(\"testh\"): no '/'")
|
||||
|
||||
// Test both group and groups
|
||||
conf = config.NewC(l)
|
||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "group": "a", "groups": []string{"b", "c"}}}}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a", "groups": []string{"b", "c"}}}}
|
||||
_, err = NewFirewallFromConfig(l, cs, conf)
|
||||
require.EqualError(t, err, "firewall.inbound rule #0; only one of group or groups should be defined, both provided")
|
||||
}
|
||||
@@ -929,28 +687,28 @@ func TestAddFirewallRulesFromConfig(t *testing.T) {
|
||||
// Test adding tcp rule
|
||||
conf := config.NewC(l)
|
||||
mf := &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[string]any{"outbound": []any{map[string]any{"port": "1", "proto": "tcp", "host": "a"}}}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "tcp", "host": "a"}}}
|
||||
require.NoError(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||
|
||||
// Test adding udp rule
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[string]any{"outbound": []any{map[string]any{"port": "1", "proto": "udp", "host": "a"}}}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "udp", "host": "a"}}}
|
||||
require.NoError(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||
|
||||
// Test adding icmp rule
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[string]any{"outbound": []any{map[string]any{"port": "1", "proto": "icmp", "host": "a"}}}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "icmp", "host": "a"}}}
|
||||
require.NoError(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||
|
||||
// Test adding any rule
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "host": "a"}}}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
|
||||
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||
|
||||
@@ -958,64 +716,49 @@ func TestAddFirewallRulesFromConfig(t *testing.T) {
|
||||
cidr := netip.MustParsePrefix("10.0.0.0/8")
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "cidr": cidr.String()}}}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "cidr": cidr.String()}}}
|
||||
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: cidr, localIp: netip.Prefix{}}, mf.lastCall)
|
||||
|
||||
// Test adding rule with local_cidr
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "local_cidr": cidr.String()}}}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "local_cidr": cidr.String()}}}
|
||||
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: cidr}, mf.lastCall)
|
||||
|
||||
// Test adding rule with cidr ipv6
|
||||
cidr6 := netip.MustParsePrefix("fd00::/8")
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "cidr": cidr6.String()}}}
|
||||
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: cidr6, localIp: netip.Prefix{}}, mf.lastCall)
|
||||
|
||||
// Test adding rule with local_cidr ipv6
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "local_cidr": cidr6.String()}}}
|
||||
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: cidr6}, mf.lastCall)
|
||||
|
||||
// Test adding rule with ca_sha
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "ca_sha": "12312313123"}}}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_sha": "12312313123"}}}
|
||||
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: netip.Prefix{}, caSha: "12312313123"}, mf.lastCall)
|
||||
|
||||
// Test adding rule with ca_name
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "ca_name": "root01"}}}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_name": "root01"}}}
|
||||
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: netip.Prefix{}, caName: "root01"}, mf.lastCall)
|
||||
|
||||
// Test single group
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "group": "a"}}}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a"}}}
|
||||
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||
|
||||
// Test single groups
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "groups": "a"}}}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": "a"}}}
|
||||
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||
|
||||
// Test multiple AND groups
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}}
|
||||
require.NoError(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||
|
||||
@@ -1023,7 +766,7 @@ func TestAddFirewallRulesFromConfig(t *testing.T) {
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
mf.nextCallReturn = errors.New("test error")
|
||||
conf.Settings["firewall"] = map[string]any{"inbound": []any{map[string]any{"port": "1", "proto": "any", "host": "a"}}}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
|
||||
require.EqualError(t, AddFirewallRulesFromConfig(l, true, conf, mf), "firewall.inbound rule #0; `test error`")
|
||||
}
|
||||
|
||||
@@ -1033,8 +776,8 @@ func TestFirewall_convertRule(t *testing.T) {
|
||||
l.SetOutput(ob)
|
||||
|
||||
// Ensure group array of 1 is converted and a warning is printed
|
||||
c := map[string]any{
|
||||
"group": []any{"group1"},
|
||||
c := map[interface{}]interface{}{
|
||||
"group": []interface{}{"group1"},
|
||||
}
|
||||
|
||||
r, err := convertRule(l, c, "test", 1)
|
||||
@@ -1044,17 +787,17 @@ func TestFirewall_convertRule(t *testing.T) {
|
||||
|
||||
// Ensure group array of > 1 is errord
|
||||
ob.Reset()
|
||||
c = map[string]any{
|
||||
"group": []any{"group1", "group2"},
|
||||
c = map[interface{}]interface{}{
|
||||
"group": []interface{}{"group1", "group2"},
|
||||
}
|
||||
|
||||
r, err = convertRule(l, c, "test", 1)
|
||||
assert.Empty(t, ob.String())
|
||||
assert.Equal(t, "", ob.String())
|
||||
require.Error(t, err, "group should contain a single value, an array with more than one entry was provided")
|
||||
|
||||
// Make sure a well formed group is alright
|
||||
ob.Reset()
|
||||
c = map[string]any{
|
||||
c = map[interface{}]interface{}{
|
||||
"group": "group1",
|
||||
}
|
||||
|
||||
@@ -1063,171 +806,6 @@ func TestFirewall_convertRule(t *testing.T) {
|
||||
assert.Equal(t, "group1", r.Group)
|
||||
}
|
||||
|
||||
type testcase struct {
|
||||
h *HostInfo
|
||||
p firewall.Packet
|
||||
c cert.Certificate
|
||||
err error
|
||||
}
|
||||
|
||||
func (c *testcase) Test(t *testing.T, fw *Firewall) {
|
||||
t.Helper()
|
||||
cp := cert.NewCAPool()
|
||||
resetConntrack(fw)
|
||||
err := fw.Drop(c.p, true, c.h, cp, nil)
|
||||
if c.err == nil {
|
||||
require.NoError(t, err, "failed to not drop remote address %s", c.p.RemoteAddr)
|
||||
} else {
|
||||
require.ErrorIs(t, c.err, err, "failed to drop remote address %s", c.p.RemoteAddr)
|
||||
}
|
||||
}
|
||||
|
||||
func buildTestCase(setup testsetup, err error, theirPrefixes ...netip.Prefix) testcase {
|
||||
c1 := dummyCert{
|
||||
name: "host1",
|
||||
networks: theirPrefixes,
|
||||
groups: []string{"default-group"},
|
||||
issuer: "signer-shasum",
|
||||
}
|
||||
h := HostInfo{
|
||||
ConnectionState: &ConnectionState{
|
||||
peerCert: &cert.CachedCertificate{
|
||||
Certificate: &c1,
|
||||
InvertedGroups: map[string]struct{}{"default-group": {}},
|
||||
},
|
||||
},
|
||||
vpnAddrs: make([]netip.Addr, len(theirPrefixes)),
|
||||
}
|
||||
for i := range theirPrefixes {
|
||||
h.vpnAddrs[i] = theirPrefixes[i].Addr()
|
||||
}
|
||||
h.buildNetworks(setup.myVpnNetworksTable, &c1)
|
||||
p := firewall.Packet{
|
||||
LocalAddr: setup.c.Networks()[0].Addr(), //todo?
|
||||
RemoteAddr: theirPrefixes[0].Addr(),
|
||||
LocalPort: 10,
|
||||
RemotePort: 90,
|
||||
Protocol: firewall.ProtoUDP,
|
||||
Fragment: false,
|
||||
}
|
||||
return testcase{
|
||||
h: &h,
|
||||
p: p,
|
||||
c: &c1,
|
||||
err: err,
|
||||
}
|
||||
}
|
||||
|
||||
type testsetup struct {
|
||||
c dummyCert
|
||||
myVpnNetworksTable *bart.Lite
|
||||
fw *Firewall
|
||||
}
|
||||
|
||||
func newSetup(t *testing.T, l *logrus.Logger, myPrefixes ...netip.Prefix) testsetup {
|
||||
c := dummyCert{
|
||||
name: "me",
|
||||
networks: myPrefixes,
|
||||
groups: []string{"default-group"},
|
||||
issuer: "signer-shasum",
|
||||
}
|
||||
|
||||
return newSetupFromCert(t, l, c)
|
||||
}
|
||||
|
||||
func newSetupFromCert(t *testing.T, l *logrus.Logger, c dummyCert) testsetup {
|
||||
myVpnNetworksTable := new(bart.Lite)
|
||||
for _, prefix := range c.Networks() {
|
||||
myVpnNetworksTable.Insert(prefix)
|
||||
}
|
||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
require.NoError(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
|
||||
return testsetup{
|
||||
c: c,
|
||||
fw: fw,
|
||||
myVpnNetworksTable: myVpnNetworksTable,
|
||||
}
|
||||
}
|
||||
|
||||
func TestFirewall_Drop_EnforceIPMatch(t *testing.T) {
|
||||
t.Parallel()
|
||||
l := test.NewLogger()
|
||||
ob := &bytes.Buffer{}
|
||||
l.SetOutput(ob)
|
||||
|
||||
myPrefix := netip.MustParsePrefix("1.1.1.1/8")
|
||||
// for now, it's okay that these are all "incoming", the logic this test tries to check doesn't care about in/out
|
||||
t.Run("allow inbound all matching", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
setup := newSetup(t, l, myPrefix)
|
||||
tc := buildTestCase(setup, nil, netip.MustParsePrefix("1.2.3.4/24"))
|
||||
tc.Test(t, setup.fw)
|
||||
})
|
||||
t.Run("allow inbound local matching", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
setup := newSetup(t, l, myPrefix)
|
||||
tc := buildTestCase(setup, ErrInvalidLocalIP, netip.MustParsePrefix("1.2.3.4/24"))
|
||||
tc.p.LocalAddr = netip.MustParseAddr("1.2.3.8")
|
||||
tc.Test(t, setup.fw)
|
||||
})
|
||||
t.Run("block inbound remote mismatched", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
setup := newSetup(t, l, myPrefix)
|
||||
tc := buildTestCase(setup, ErrInvalidRemoteIP, netip.MustParsePrefix("1.2.3.4/24"))
|
||||
tc.p.RemoteAddr = netip.MustParseAddr("9.9.9.9")
|
||||
tc.Test(t, setup.fw)
|
||||
})
|
||||
t.Run("Block a vpn peer packet", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
setup := newSetup(t, l, myPrefix)
|
||||
tc := buildTestCase(setup, ErrPeerRejected, netip.MustParsePrefix("2.2.2.2/24"))
|
||||
tc.Test(t, setup.fw)
|
||||
})
|
||||
twoPrefixes := []netip.Prefix{
|
||||
netip.MustParsePrefix("1.2.3.4/24"), netip.MustParsePrefix("2.2.2.2/24"),
|
||||
}
|
||||
t.Run("allow inbound one matching", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
setup := newSetup(t, l, myPrefix)
|
||||
tc := buildTestCase(setup, nil, twoPrefixes...)
|
||||
tc.Test(t, setup.fw)
|
||||
})
|
||||
t.Run("block inbound multimismatch", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
setup := newSetup(t, l, myPrefix)
|
||||
tc := buildTestCase(setup, ErrInvalidRemoteIP, twoPrefixes...)
|
||||
tc.p.RemoteAddr = netip.MustParseAddr("9.9.9.9")
|
||||
tc.Test(t, setup.fw)
|
||||
})
|
||||
t.Run("allow inbound 2nd one matching", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
setup2 := newSetup(t, l, netip.MustParsePrefix("2.2.2.1/24"))
|
||||
tc := buildTestCase(setup2, nil, twoPrefixes...)
|
||||
tc.p.RemoteAddr = twoPrefixes[1].Addr()
|
||||
tc.Test(t, setup2.fw)
|
||||
})
|
||||
t.Run("allow inbound unsafe route", func(t *testing.T) {
|
||||
t.Parallel()
|
||||
unsafePrefix := netip.MustParsePrefix("192.168.0.0/24")
|
||||
c := dummyCert{
|
||||
name: "me",
|
||||
networks: []netip.Prefix{myPrefix},
|
||||
unsafeNetworks: []netip.Prefix{unsafePrefix},
|
||||
groups: []string{"default-group"},
|
||||
issuer: "signer-shasum",
|
||||
}
|
||||
unsafeSetup := newSetupFromCert(t, l, c)
|
||||
tc := buildTestCase(unsafeSetup, nil, twoPrefixes...)
|
||||
tc.p.LocalAddr = netip.MustParseAddr("192.168.0.3")
|
||||
tc.err = ErrNoMatchingRule
|
||||
tc.Test(t, unsafeSetup.fw) //should hit firewall and bounce off
|
||||
require.NoError(t, unsafeSetup.fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", netip.Prefix{}, unsafePrefix, "", ""))
|
||||
tc.err = nil
|
||||
tc.Test(t, unsafeSetup.fw) //should pass
|
||||
})
|
||||
}
|
||||
|
||||
type addRuleCall struct {
|
||||
incoming bool
|
||||
proto uint8
|
||||
|
||||
50
go.mod
50
go.mod
@@ -1,39 +1,40 @@
|
||||
module github.com/slackhq/nebula
|
||||
|
||||
go 1.25
|
||||
go 1.23.6
|
||||
|
||||
toolchain go1.23.7
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.2
|
||||
dario.cat/mergo v1.0.1
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
|
||||
github.com/armon/go-radix v1.0.0
|
||||
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
|
||||
github.com/flynn/noise v1.1.0
|
||||
github.com/gaissmai/bart v0.25.0
|
||||
github.com/gaissmai/bart v0.18.1
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/google/gopacket v1.1.19
|
||||
github.com/kardianos/service v1.2.4
|
||||
github.com/miekg/dns v1.1.68
|
||||
github.com/kardianos/service v1.2.2
|
||||
github.com/miekg/dns v1.1.63
|
||||
github.com/miekg/pkcs11 v1.1.2-0.20231115102856-9078ad6b9d4b
|
||||
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f
|
||||
github.com/prometheus/client_golang v1.23.2
|
||||
github.com/prometheus/client_golang v1.21.1
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
||||
github.com/stefanberger/go-pkcs11uri v0.0.0-20230803200340-78284954bff6
|
||||
github.com/stretchr/testify v1.11.1
|
||||
github.com/vishvananda/netlink v1.3.1
|
||||
go.yaml.in/yaml/v3 v3.0.4
|
||||
golang.org/x/crypto v0.44.0
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/vishvananda/netlink v1.3.0
|
||||
golang.org/x/crypto v0.36.0
|
||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090
|
||||
golang.org/x/net v0.46.0
|
||||
golang.org/x/sync v0.18.0
|
||||
golang.org/x/sys v0.38.0
|
||||
golang.org/x/term v0.37.0
|
||||
golang.org/x/net v0.37.0
|
||||
golang.org/x/sync v0.12.0
|
||||
golang.org/x/sys v0.31.0
|
||||
golang.org/x/term v0.30.0
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
|
||||
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3
|
||||
google.golang.org/protobuf v1.36.10
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
google.golang.org/protobuf v1.36.5
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gvisor.dev/gvisor v0.0.0-20240423190808-9d7a357edefe
|
||||
)
|
||||
|
||||
@@ -42,14 +43,15 @@ require (
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/google/btree v1.1.2 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.2 // indirect
|
||||
github.com/prometheus/common v0.66.1 // indirect
|
||||
github.com/prometheus/procfs v0.16.1 // indirect
|
||||
github.com/vishvananda/netns v0.0.5 // indirect
|
||||
go.yaml.in/yaml/v2 v2.4.2 // indirect
|
||||
golang.org/x/mod v0.24.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
golang.org/x/tools v0.33.0 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.62.0 // indirect
|
||||
github.com/prometheus/procfs v0.15.1 // indirect
|
||||
github.com/vishvananda/netns v0.0.4 // indirect
|
||||
golang.org/x/mod v0.18.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/tools v0.22.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
97
go.sum
97
go.sum
@@ -1,6 +1,6 @@
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
|
||||
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
|
||||
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
|
||||
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
@@ -24,8 +24,8 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
||||
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
github.com/gaissmai/bart v0.25.0 h1:eqiokVPqM3F94vJ0bTHXHtH91S8zkKL+bKh+BsGOsJM=
|
||||
github.com/gaissmai/bart v0.25.0/go.mod h1:GREWQfTLRWz/c5FTOsIw+KkscuFkIV5t8Rp7Nd1Td5c=
|
||||
github.com/gaissmai/bart v0.18.1 h1:bX2j560JC1MJpoEDevBGvXL5OZ1mkls320Vl8Igb5QQ=
|
||||
github.com/gaissmai/bart v0.18.1/go.mod h1:JJzMAhNF5Rjo4SF4jWBrANuJfqY+FvsFhW7t1UZJ+XY=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
@@ -53,8 +53,8 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||
@@ -64,12 +64,12 @@ github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kardianos/service v1.2.4 h1:XNlGtZOYNx2u91urOdg/Kfmc+gfmuIo1Dd3rEi2OgBk=
|
||||
github.com/kardianos/service v1.2.4/go.mod h1:E4V9ufUuY82F7Ztlu1eN9VXWIQxg8NoLQlmFe0MtrXc=
|
||||
github.com/kardianos/service v1.2.2 h1:ZvePhAHfvo0A7Mftk/tEzqEZ7Q4lgnR8sGz4xu1YX60=
|
||||
github.com/kardianos/service v1.2.2/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
@@ -83,8 +83,8 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
|
||||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA=
|
||||
github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps=
|
||||
github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=
|
||||
github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs=
|
||||
github.com/miekg/pkcs11 v1.1.2-0.20231115102856-9078ad6b9d4b h1:J/AzCvg5z0Hn1rqZUJjpbzALUmkKX0Zwbc/i4fw7Sfk=
|
||||
github.com/miekg/pkcs11 v1.1.2-0.20231115102856-9078ad6b9d4b/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -106,24 +106,24 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=
|
||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||
github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk=
|
||||
github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||
github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E=
|
||||
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.66.1 h1:h5E0h5/Y8niHc5DlaLlWLArTQI7tMrsfQjHV+d9ZoGs=
|
||||
github.com/prometheus/common v0.66.1/go.mod h1:gcaUsgf3KfRSwHY4dIMXLPV0K/Wg1oZ8+SbZk/HH/dA=
|
||||
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
|
||||
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.16.1 h1:hZ15bTNuirocR6u0JZ6BAHHmwS1p8B4P6MRqxtzMyRg=
|
||||
github.com/prometheus/procfs v0.16.1/go.mod h1:teAbpZRB1iIAJYREa1LsoWUXykVXA1KlTmWl8x/U+Is=
|
||||
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
|
||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
@@ -143,35 +143,29 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
|
||||
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
|
||||
github.com/vishvananda/netlink v1.3.1 h1:3AEMt62VKqz90r0tmNhog0r/PpWKmrEShJU0wJW6bV0=
|
||||
github.com/vishvananda/netlink v1.3.1/go.mod h1:ARtKouGSTGchR8aMwmkzC0qiNPrrWO5JS/XMVl45+b4=
|
||||
github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY=
|
||||
github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
|
||||
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/vishvananda/netlink v1.3.0 h1:X7l42GfcV4S6E4vHTsw48qbrV+9PVojNfIhZcwQdrZk=
|
||||
github.com/vishvananda/netlink v1.3.0/go.mod h1:i6NetklAujEcC6fK0JPjT8qSwWyO0HLn4UKG+hGqeJs=
|
||||
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
|
||||
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||
go.yaml.in/yaml/v2 v2.4.2 h1:DzmwEr2rDGHl7lsFgAHxmNz/1NlQ7xLIrlN2h5d1eGI=
|
||||
go.yaml.in/yaml/v2 v2.4.2/go.mod h1:081UH+NErpNdqlCXm3TtEran0rJZGxAYx9hb/ELlsPU=
|
||||
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
|
||||
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
|
||||
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
|
||||
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
|
||||
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
|
||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 h1:Di6/M8l0O2lCLc6VVRWhgCiApHV8MnQurBnFSHsQtNY=
|
||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
|
||||
golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
|
||||
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
|
||||
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -182,8 +176,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
|
||||
golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
|
||||
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
|
||||
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -191,8 +185,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
|
||||
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
|
||||
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
@@ -203,29 +197,30 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
|
||||
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
|
||||
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
|
||||
golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
|
||||
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
|
||||
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.33.0 h1:4qz2S3zmRxbGIhDIAgjxvFutSvH5EfnsYrRBj0UI0bc=
|
||||
golang.org/x/tools v0.33.0/go.mod h1:CIJMaWEY88juyUfo7UbgPqbC8rU2OqfAV1h2Qp0oMYI=
|
||||
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
|
||||
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@@ -244,8 +239,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
|
||||
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -256,6 +251,8 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
|
||||
254
handshake_ix.go
254
handshake_ix.go
@@ -2,6 +2,7 @@ package nebula
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/flynn/noise"
|
||||
@@ -22,17 +23,13 @@ func ixHandshakeStage0(f *Interface, hh *HandshakeHostInfo) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// If we're connecting to a v6 address we must use a v2 cert
|
||||
cs := f.pki.getCertState()
|
||||
v := cs.initiatingVersion
|
||||
if hh.initiatingVersionOverride != cert.VersionPre1 {
|
||||
v = hh.initiatingVersionOverride
|
||||
} else if v < cert.Version2 {
|
||||
// If we're connecting to a v6 address we should encourage use of a V2 cert
|
||||
for _, a := range hh.hostinfo.vpnAddrs {
|
||||
if a.Is6() {
|
||||
v = cert.Version2
|
||||
break
|
||||
}
|
||||
v := cs.defaultVersion
|
||||
for _, a := range hh.hostinfo.vpnAddrs {
|
||||
if a.Is6() {
|
||||
v = cert.Version2
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,10 +48,9 @@ func ixHandshakeStage0(f *Interface, hh *HandshakeHostInfo) bool {
|
||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).
|
||||
WithField("certVersion", v).
|
||||
Error("Unable to handshake with host because no certificate handshake bytes is available")
|
||||
return false
|
||||
}
|
||||
|
||||
ci, err := NewConnectionState(f.l, cs, crt, true, noise.HandshakeIX)
|
||||
ci, err := NewConnectionState(f.l, cs, crt, true, noise.HandshakeIX, cs.psk.primary)
|
||||
if err != nil {
|
||||
f.l.WithError(err).WithField("vpnAddrs", hh.hostinfo.vpnAddrs).
|
||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).
|
||||
@@ -75,8 +71,7 @@ func ixHandshakeStage0(f *Interface, hh *HandshakeHostInfo) bool {
|
||||
|
||||
hsBytes, err := hs.Marshal()
|
||||
if err != nil {
|
||||
f.l.WithError(err).WithField("vpnAddrs", hh.hostinfo.vpnAddrs).
|
||||
WithField("certVersion", v).
|
||||
f.l.WithError(err).WithField("vpnAddrs", hh.hostinfo.vpnAddrs).WithField("certVersion", v).
|
||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
|
||||
return false
|
||||
}
|
||||
@@ -105,39 +100,57 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
||||
if crt == nil {
|
||||
f.l.WithField("udpAddr", addr).
|
||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).
|
||||
WithField("certVersion", cs.initiatingVersion).
|
||||
WithField("certVersion", cs.defaultVersion).
|
||||
Error("Unable to handshake with host because no certificate is available")
|
||||
return
|
||||
}
|
||||
|
||||
ci, err := NewConnectionState(f.l, cs, crt, false, noise.HandshakeIX)
|
||||
if err != nil {
|
||||
f.l.WithError(err).WithField("udpAddr", addr).
|
||||
var (
|
||||
err error
|
||||
ci *ConnectionState
|
||||
msg []byte
|
||||
)
|
||||
|
||||
hs := &NebulaHandshake{}
|
||||
|
||||
for _, psk := range cs.psk.keys {
|
||||
ci, err = NewConnectionState(f.l, cs, crt, false, noise.HandshakeIX, psk)
|
||||
if err != nil {
|
||||
//TODO: should be bother logging this, if we have multiple psks and the error is unrelated it will be verbose.
|
||||
f.l.WithError(err).WithField("udpAddr", addr).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||
Error("Failed to create connection state")
|
||||
continue
|
||||
}
|
||||
|
||||
msg, _, _, err = ci.H.ReadMessage(nil, packet[header.Len:])
|
||||
if err != nil {
|
||||
// Calls to ReadMessage with an incorrect psk should fail, try the next one if we have one
|
||||
continue
|
||||
}
|
||||
|
||||
// Sometimes ReadMessage returns fine with a nil psk even if the handshake is using a psk, ensure our protobuf
|
||||
// comes out clean as well
|
||||
err = hs.Unmarshal(msg)
|
||||
if err == nil {
|
||||
// There was no error, we can continue with this handshake
|
||||
break
|
||||
}
|
||||
|
||||
// The unmarshal failed, try the next psk if we have one
|
||||
}
|
||||
|
||||
// We finished with an error, log it and get out
|
||||
if err != nil || hs.Details == nil {
|
||||
// We aren't logging the error here because we can't be sure of the failure when using psk
|
||||
f.l.WithField("udpAddr", addr).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||
Error("Failed to create connection state")
|
||||
Error("Was unable to decrypt the handshake")
|
||||
return
|
||||
}
|
||||
|
||||
// Mark packet 1 as seen so it doesn't show up as missed
|
||||
ci.window.Update(f.l, 1)
|
||||
|
||||
msg, _, _, err := ci.H.ReadMessage(nil, packet[header.Len:])
|
||||
if err != nil {
|
||||
f.l.WithError(err).WithField("udpAddr", addr).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||
Error("Failed to call noise.ReadMessage")
|
||||
return
|
||||
}
|
||||
|
||||
hs := &NebulaHandshake{}
|
||||
err = hs.Unmarshal(msg)
|
||||
if err != nil || hs.Details == nil {
|
||||
f.l.WithError(err).WithField("udpAddr", addr).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||
Error("Failed unmarshal handshake message")
|
||||
return
|
||||
}
|
||||
|
||||
rc, err := cert.Recombine(cert.Version(hs.Details.CertVersion), hs.Details.Cert, ci.H.PeerStatic(), ci.Curve())
|
||||
if err != nil {
|
||||
f.l.WithError(err).WithField("udpAddr", addr).
|
||||
@@ -148,8 +161,8 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
||||
|
||||
remoteCert, err := f.pki.GetCAPool().VerifyCertificate(time.Now(), rc)
|
||||
if err != nil {
|
||||
fp, fperr := rc.Fingerprint()
|
||||
if fperr != nil {
|
||||
fp, err := rc.Fingerprint()
|
||||
if err != nil {
|
||||
fp = "<error generating certificate fingerprint>"
|
||||
}
|
||||
|
||||
@@ -168,19 +181,16 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
||||
|
||||
if remoteCert.Certificate.Version() != ci.myCert.Version() {
|
||||
// We started off using the wrong certificate version, lets see if we can match the version that was sent to us
|
||||
myCertOtherVersion := cs.getCertificate(remoteCert.Certificate.Version())
|
||||
if myCertOtherVersion == nil {
|
||||
if f.l.Level >= logrus.DebugLevel {
|
||||
f.l.WithError(err).WithFields(m{
|
||||
"udpAddr": addr,
|
||||
"handshake": m{"stage": 1, "style": "ix_psk0"},
|
||||
"cert": remoteCert,
|
||||
}).Debug("Might be unable to handshake with host due to missing certificate version")
|
||||
}
|
||||
} else {
|
||||
// Record the certificate we are actually using
|
||||
ci.myCert = myCertOtherVersion
|
||||
rc := cs.getCertificate(remoteCert.Certificate.Version())
|
||||
if rc == nil {
|
||||
f.l.WithError(err).WithField("udpAddr", addr).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).WithField("cert", remoteCert).
|
||||
Info("Unable to handshake with host due to missing certificate version")
|
||||
return
|
||||
}
|
||||
|
||||
// Record the certificate we are actually using
|
||||
ci.myCert = rc
|
||||
}
|
||||
|
||||
if len(remoteCert.Certificate.Networks()) == 0 {
|
||||
@@ -191,28 +201,40 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
||||
return
|
||||
}
|
||||
|
||||
var vpnAddrs []netip.Addr
|
||||
var filteredNetworks []netip.Prefix
|
||||
certName := remoteCert.Certificate.Name()
|
||||
certVersion := remoteCert.Certificate.Version()
|
||||
fingerprint := remoteCert.Fingerprint
|
||||
issuer := remoteCert.Certificate.Issuer()
|
||||
vpnNetworks := remoteCert.Certificate.Networks()
|
||||
|
||||
anyVpnAddrsInCommon := false
|
||||
vpnAddrs := make([]netip.Addr, len(vpnNetworks))
|
||||
for i, network := range vpnNetworks {
|
||||
if f.myVpnAddrsTable.Contains(network.Addr()) {
|
||||
f.l.WithField("vpnNetworks", vpnNetworks).WithField("udpAddr", addr).
|
||||
for _, network := range remoteCert.Certificate.Networks() {
|
||||
vpnAddr := network.Addr()
|
||||
_, found := f.myVpnAddrsTable.Lookup(vpnAddr)
|
||||
if found {
|
||||
f.l.WithField("vpnAddr", vpnAddr).WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("certVersion", certVersion).
|
||||
WithField("fingerprint", fingerprint).
|
||||
WithField("issuer", issuer).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Refusing to handshake with myself")
|
||||
return
|
||||
}
|
||||
vpnAddrs[i] = network.Addr()
|
||||
if f.myVpnNetworksTable.Contains(network.Addr()) {
|
||||
anyVpnAddrsInCommon = true
|
||||
|
||||
// vpnAddrs outside our vpn networks are of no use to us, filter them out
|
||||
if _, ok := f.myVpnNetworksTable.Lookup(vpnAddr); !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
filteredNetworks = append(filteredNetworks, network)
|
||||
vpnAddrs = append(vpnAddrs, vpnAddr)
|
||||
}
|
||||
|
||||
if len(vpnAddrs) == 0 {
|
||||
f.l.WithError(err).WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("fingerprint", fingerprint).
|
||||
WithField("issuer", issuer).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("No usable vpn addresses from host, refusing handshake")
|
||||
return
|
||||
}
|
||||
|
||||
if addr.IsValid() {
|
||||
@@ -228,7 +250,6 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
||||
if err != nil {
|
||||
f.l.WithError(err).WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("certVersion", certVersion).
|
||||
WithField("fingerprint", fingerprint).
|
||||
WithField("issuer", issuer).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to generate index")
|
||||
@@ -243,36 +264,30 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
||||
HandshakePacket: make(map[uint8][]byte, 0),
|
||||
lastHandshakeTime: hs.Details.Time,
|
||||
relayState: RelayState{
|
||||
relays: nil,
|
||||
relays: map[netip.Addr]struct{}{},
|
||||
relayForByAddr: map[netip.Addr]*Relay{},
|
||||
relayForByIdx: map[uint32]*Relay{},
|
||||
},
|
||||
}
|
||||
|
||||
msgRxL := f.l.WithFields(m{
|
||||
"vpnAddrs": vpnAddrs,
|
||||
"udpAddr": addr,
|
||||
"certName": certName,
|
||||
"certVersion": certVersion,
|
||||
"fingerprint": fingerprint,
|
||||
"issuer": issuer,
|
||||
"initiatorIndex": hs.Details.InitiatorIndex,
|
||||
"responderIndex": hs.Details.ResponderIndex,
|
||||
"remoteIndex": h.RemoteIndex,
|
||||
"handshake": m{"stage": 1, "style": "ix_psk0"},
|
||||
})
|
||||
|
||||
if anyVpnAddrsInCommon {
|
||||
msgRxL.Info("Handshake message received")
|
||||
} else {
|
||||
//todo warn if not lighthouse or relay?
|
||||
msgRxL.Info("Handshake message received, but no vpnNetworks in common.")
|
||||
}
|
||||
f.l.WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("fingerprint", fingerprint).
|
||||
WithField("issuer", issuer).
|
||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||
Info("Handshake message received")
|
||||
|
||||
hs.Details.ResponderIndex = myIndex
|
||||
hs.Details.Cert = cs.getHandshakeBytes(ci.myCert.Version())
|
||||
if hs.Details.Cert == nil {
|
||||
msgRxL.WithField("myCertVersion", ci.myCert.Version()).
|
||||
f.l.WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("fingerprint", fingerprint).
|
||||
WithField("issuer", issuer).
|
||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||
WithField("certVersion", ci.myCert.Version()).
|
||||
Error("Unable to handshake with host because no certificate handshake bytes is available")
|
||||
return
|
||||
}
|
||||
@@ -285,7 +300,6 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
||||
if err != nil {
|
||||
f.l.WithError(err).WithField("vpnAddrs", hostinfo.vpnAddrs).WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("certVersion", certVersion).
|
||||
WithField("fingerprint", fingerprint).
|
||||
WithField("issuer", issuer).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
|
||||
@@ -297,7 +311,6 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
||||
if err != nil {
|
||||
f.l.WithError(err).WithField("vpnAddrs", hostinfo.vpnAddrs).WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("certVersion", certVersion).
|
||||
WithField("fingerprint", fingerprint).
|
||||
WithField("issuer", issuer).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage")
|
||||
@@ -305,7 +318,6 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
||||
} else if dKey == nil || eKey == nil {
|
||||
f.l.WithField("vpnAddrs", hostinfo.vpnAddrs).WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("certVersion", certVersion).
|
||||
WithField("fingerprint", fingerprint).
|
||||
WithField("issuer", issuer).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).Error("Noise did not arrive at a key")
|
||||
@@ -330,7 +342,7 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
||||
|
||||
hostinfo.remotes = f.lightHouse.QueryCache(vpnAddrs)
|
||||
hostinfo.SetRemote(addr)
|
||||
hostinfo.buildNetworks(f.myVpnNetworksTable, remoteCert.Certificate)
|
||||
hostinfo.buildNetworks(filteredNetworks, remoteCert.Certificate.UnsafeNetworks())
|
||||
|
||||
existing, err := f.handshakeManager.CheckAndComplete(hostinfo, 0, f)
|
||||
if err != nil {
|
||||
@@ -373,7 +385,6 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
||||
// This means there was an existing tunnel and this handshake was older than the one we are currently based on
|
||||
f.l.WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("certVersion", certVersion).
|
||||
WithField("oldHandshakeTime", existing.lastHandshakeTime).
|
||||
WithField("newHandshakeTime", hostinfo.lastHandshakeTime).
|
||||
WithField("fingerprint", fingerprint).
|
||||
@@ -389,7 +400,6 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
||||
// This means we failed to insert because of collision on localIndexId. Just let the next handshake packet retry
|
||||
f.l.WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("certVersion", certVersion).
|
||||
WithField("fingerprint", fingerprint).
|
||||
WithField("issuer", issuer).
|
||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||
@@ -402,7 +412,6 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
||||
// And we forget to update it here
|
||||
f.l.WithError(err).WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("certVersion", certVersion).
|
||||
WithField("fingerprint", fingerprint).
|
||||
WithField("issuer", issuer).
|
||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||
@@ -419,7 +428,6 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
||||
if err != nil {
|
||||
f.l.WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("certVersion", certVersion).
|
||||
WithField("fingerprint", fingerprint).
|
||||
WithField("issuer", issuer).
|
||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||
@@ -428,7 +436,6 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
||||
} else {
|
||||
f.l.WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("certVersion", certVersion).
|
||||
WithField("fingerprint", fingerprint).
|
||||
WithField("issuer", issuer).
|
||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||
@@ -447,7 +454,6 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
||||
f.SendVia(via.relayHI, via.relay, msg, make([]byte, 12), make([]byte, mtu), false)
|
||||
f.l.WithField("vpnAddrs", vpnAddrs).WithField("relay", via.relayHI.vpnAddrs[0]).
|
||||
WithField("certName", certName).
|
||||
WithField("certVersion", certVersion).
|
||||
WithField("fingerprint", fingerprint).
|
||||
WithField("issuer", issuer).
|
||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||
@@ -455,9 +461,9 @@ func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet
|
||||
Info("Handshake message sent")
|
||||
}
|
||||
|
||||
f.connectionManager.AddTrafficWatch(hostinfo)
|
||||
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
|
||||
|
||||
hostinfo.remotes.RefreshFromHandshake(vpnAddrs)
|
||||
hostinfo.remotes.ResetBlockedRemotes()
|
||||
|
||||
return
|
||||
}
|
||||
@@ -552,7 +558,6 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
|
||||
|
||||
vpnNetworks := remoteCert.Certificate.Networks()
|
||||
certName := remoteCert.Certificate.Name()
|
||||
certVersion := remoteCert.Certificate.Version()
|
||||
fingerprint := remoteCert.Fingerprint
|
||||
issuer := remoteCert.Certificate.Issuer()
|
||||
|
||||
@@ -571,26 +576,32 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
|
||||
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnAddrs[0])
|
||||
}
|
||||
|
||||
correctHostResponded := false
|
||||
anyVpnAddrsInCommon := false
|
||||
vpnAddrs := make([]netip.Addr, len(vpnNetworks))
|
||||
for i, network := range vpnNetworks {
|
||||
vpnAddrs[i] = network.Addr()
|
||||
if f.myVpnNetworksTable.Contains(network.Addr()) {
|
||||
anyVpnAddrsInCommon = true
|
||||
}
|
||||
if hostinfo.vpnAddrs[0] == network.Addr() {
|
||||
// todo is it more correct to see if any of hostinfo.vpnAddrs are in the cert? it should have len==1, but one day it might not?
|
||||
correctHostResponded = true
|
||||
var vpnAddrs []netip.Addr
|
||||
var filteredNetworks []netip.Prefix
|
||||
for _, network := range vpnNetworks {
|
||||
// vpnAddrs outside our vpn networks are of no use to us, filter them out
|
||||
vpnAddr := network.Addr()
|
||||
if _, ok := f.myVpnNetworksTable.Lookup(vpnAddr); !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
filteredNetworks = append(filteredNetworks, network)
|
||||
vpnAddrs = append(vpnAddrs, vpnAddr)
|
||||
}
|
||||
|
||||
if len(vpnAddrs) == 0 {
|
||||
f.l.WithError(err).WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("fingerprint", fingerprint).
|
||||
WithField("issuer", issuer).
|
||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).Error("No usable vpn addresses from host, refusing handshake")
|
||||
return true
|
||||
}
|
||||
|
||||
// Ensure the right host responded
|
||||
if !correctHostResponded {
|
||||
if !slices.Contains(vpnAddrs, hostinfo.vpnAddrs[0]) {
|
||||
f.l.WithField("intendedVpnAddrs", hostinfo.vpnAddrs).WithField("haveVpnNetworks", vpnNetworks).
|
||||
WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("certVersion", certVersion).
|
||||
WithField("udpAddr", addr).WithField("certName", certName).
|
||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||
Info("Incorrect host responded to handshake")
|
||||
|
||||
@@ -598,7 +609,6 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
|
||||
f.handshakeManager.DeleteHostInfo(hostinfo)
|
||||
|
||||
// Create a new hostinfo/handshake for the intended vpn ip
|
||||
//TODO is hostinfo.vpnAddrs[0] always the address to use?
|
||||
f.handshakeManager.StartHandshake(hostinfo.vpnAddrs[0], func(newHH *HandshakeHostInfo) {
|
||||
// Block the current used address
|
||||
newHH.hostinfo.remotes = hostinfo.remotes
|
||||
@@ -625,29 +635,23 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
|
||||
ci.window.Update(f.l, 2)
|
||||
|
||||
duration := time.Since(hh.startTime).Nanoseconds()
|
||||
msgRxL := f.l.WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", addr).
|
||||
f.l.WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("certVersion", certVersion).
|
||||
WithField("fingerprint", fingerprint).
|
||||
WithField("issuer", issuer).
|
||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||
WithField("durationNs", duration).
|
||||
WithField("sentCachedPackets", len(hh.packetStore))
|
||||
if anyVpnAddrsInCommon {
|
||||
msgRxL.Info("Handshake message received")
|
||||
} else {
|
||||
//todo warn if not lighthouse or relay?
|
||||
msgRxL.Info("Handshake message received, but no vpnNetworks in common.")
|
||||
}
|
||||
WithField("sentCachedPackets", len(hh.packetStore)).
|
||||
Info("Handshake message received")
|
||||
|
||||
// Build up the radix for the firewall if we have subnets in the cert
|
||||
hostinfo.vpnAddrs = vpnAddrs
|
||||
hostinfo.buildNetworks(f.myVpnNetworksTable, remoteCert.Certificate)
|
||||
hostinfo.buildNetworks(filteredNetworks, remoteCert.Certificate.UnsafeNetworks())
|
||||
|
||||
// Complete our handshake and update metrics, this will replace any existing tunnels for the vpnAddrs here
|
||||
f.handshakeManager.Complete(hostinfo, f)
|
||||
f.connectionManager.AddTrafficWatch(hostinfo)
|
||||
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
|
||||
|
||||
if f.l.Level >= logrus.DebugLevel {
|
||||
hostinfo.logger(f.l).Debugf("Sending %d stored packets", len(hh.packetStore))
|
||||
@@ -662,7 +666,7 @@ func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *Ha
|
||||
f.cachedPacketMetrics.sent.Inc(int64(len(hh.packetStore)))
|
||||
}
|
||||
|
||||
hostinfo.remotes.RefreshFromHandshake(vpnAddrs)
|
||||
hostinfo.remotes.ResetBlockedRemotes()
|
||||
f.metricHandshakes.Update(duration)
|
||||
|
||||
return false
|
||||
|
||||
@@ -68,12 +68,11 @@ type HandshakeManager struct {
|
||||
type HandshakeHostInfo struct {
|
||||
sync.Mutex
|
||||
|
||||
startTime time.Time // Time that we first started trying with this handshake
|
||||
ready bool // Is the handshake ready
|
||||
initiatingVersionOverride cert.Version // Should we use a non-default cert version for this handshake?
|
||||
counter int64 // How many attempts have we made so far
|
||||
lastRemotes []netip.AddrPort // Remotes that we sent to during the previous attempt
|
||||
packetStore []*cachedPacket // A set of packets to be transmitted once the handshake completes
|
||||
startTime time.Time // Time that we first started trying with this handshake
|
||||
ready bool // Is the handshake ready
|
||||
counter int64 // How many attempts have we made so far
|
||||
lastRemotes []netip.AddrPort // Remotes that we sent to during the previous attempt
|
||||
packetStore []*cachedPacket // A set of packets to be transmitted once the handshake completes
|
||||
|
||||
hostinfo *HostInfo
|
||||
}
|
||||
@@ -269,13 +268,14 @@ func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered
|
||||
hostinfo.logger(hm.l).WithField("relays", hostinfo.remotes.relays).Info("Attempt to relay through hosts")
|
||||
// Send a RelayRequest to all known Relay IP's
|
||||
for _, relay := range hostinfo.remotes.relays {
|
||||
// Don't relay through the host I'm trying to connect to
|
||||
// Don't relay to myself
|
||||
if relay == vpnIp {
|
||||
continue
|
||||
}
|
||||
|
||||
// Don't relay to myself
|
||||
if hm.f.myVpnAddrsTable.Contains(relay) {
|
||||
// Don't relay through the host I'm trying to connect to
|
||||
_, found := hm.f.myVpnAddrsTable.Lookup(relay)
|
||||
if found {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -451,7 +451,7 @@ func (hm *HandshakeManager) StartHandshake(vpnAddr netip.Addr, cacheCb func(*Han
|
||||
vpnAddrs: []netip.Addr{vpnAddr},
|
||||
HandshakePacket: make(map[uint8][]byte, 0),
|
||||
relayState: RelayState{
|
||||
relays: nil,
|
||||
relays: map[netip.Addr]struct{}{},
|
||||
relayForByAddr: map[netip.Addr]*Relay{},
|
||||
relayForByIdx: map[uint32]*Relay{},
|
||||
},
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/slackhq/nebula/test"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func Test_NewHandshakeManagerVpnIp(t *testing.T) {
|
||||
@@ -23,11 +24,15 @@ func Test_NewHandshakeManagerVpnIp(t *testing.T) {
|
||||
|
||||
lh := newTestLighthouse()
|
||||
|
||||
psk, err := NewPsk(PskAccepting, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
cs := &CertState{
|
||||
initiatingVersion: cert.Version1,
|
||||
privateKey: []byte{},
|
||||
v1Cert: &dummyCert{version: cert.Version1},
|
||||
v1HandshakeBytes: []byte{},
|
||||
defaultVersion: cert.Version1,
|
||||
privateKey: []byte{},
|
||||
v1Cert: &dummyCert{version: cert.Version1},
|
||||
v1HandshakeBytes: []byte{},
|
||||
psk: psk,
|
||||
}
|
||||
|
||||
blah := NewHandshakeManager(l, mainHM, lh, &udp.NoopConn{}, defaultHandshakeConfig)
|
||||
@@ -98,5 +103,5 @@ func (mw *mockEncWriter) GetHostInfo(_ netip.Addr) *HostInfo {
|
||||
}
|
||||
|
||||
func (mw *mockEncWriter) GetCertState() *CertState {
|
||||
return &CertState{initiatingVersion: cert.Version2}
|
||||
return &CertState{defaultVersion: cert.Version2}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ import (
|
||||
// |-----------------------------------------------------------------------|
|
||||
// | payload... |
|
||||
|
||||
type m = map[string]any
|
||||
type m map[string]interface{}
|
||||
|
||||
const (
|
||||
Version uint8 = 1
|
||||
|
||||
81
hostmap.go
81
hostmap.go
@@ -4,7 +4,6 @@ import (
|
||||
"errors"
|
||||
"net"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@@ -17,10 +16,12 @@ import (
|
||||
"github.com/slackhq/nebula/header"
|
||||
)
|
||||
|
||||
// const ProbeLen = 100
|
||||
const defaultPromoteEvery = 1000 // Count of packets sent before we try moving a tunnel to a preferred underlay ip address
|
||||
const defaultReQueryEvery = 5000 // Count of packets sent before re-querying a hostinfo to the lighthouse
|
||||
const defaultReQueryWait = time.Minute // Minimum amount of seconds to wait before re-querying a hostinfo the lighthouse. Evaluated every ReQueryEvery
|
||||
const MaxRemotes = 10
|
||||
const maxRecvError = 4
|
||||
|
||||
// MaxHostInfosPerVpnIp is the max number of hostinfos we will track for a given vpn ip
|
||||
// 5 allows for an initial handshake and each host pair re-handshaking twice
|
||||
@@ -67,7 +68,7 @@ type HostMap struct {
|
||||
type RelayState struct {
|
||||
sync.RWMutex
|
||||
|
||||
relays []netip.Addr // Ordered set of VpnAddrs of Hosts to use as relays to access this peer
|
||||
relays map[netip.Addr]struct{} // Set of vpnAddr's of Hosts to use as relays to access this peer
|
||||
// For data race avoidance, the contents of a *Relay are treated immutably. To update a *Relay, copy the existing data,
|
||||
// modify what needs to be updated, and store the new modified copy in the relayForByIp and relayForByIdx maps (with
|
||||
// the RelayState Lock held)
|
||||
@@ -78,12 +79,7 @@ type RelayState struct {
|
||||
func (rs *RelayState) DeleteRelay(ip netip.Addr) {
|
||||
rs.Lock()
|
||||
defer rs.Unlock()
|
||||
for idx, val := range rs.relays {
|
||||
if val == ip {
|
||||
rs.relays = append(rs.relays[:idx], rs.relays[idx+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
delete(rs.relays, ip)
|
||||
}
|
||||
|
||||
func (rs *RelayState) UpdateRelayForByIpState(vpnIp netip.Addr, state int) {
|
||||
@@ -128,16 +124,16 @@ func (rs *RelayState) GetRelayForByAddr(addr netip.Addr) (*Relay, bool) {
|
||||
func (rs *RelayState) InsertRelayTo(ip netip.Addr) {
|
||||
rs.Lock()
|
||||
defer rs.Unlock()
|
||||
if !slices.Contains(rs.relays, ip) {
|
||||
rs.relays = append(rs.relays, ip)
|
||||
}
|
||||
rs.relays[ip] = struct{}{}
|
||||
}
|
||||
|
||||
func (rs *RelayState) CopyRelayIps() []netip.Addr {
|
||||
ret := make([]netip.Addr, len(rs.relays))
|
||||
rs.RLock()
|
||||
defer rs.RUnlock()
|
||||
copy(ret, rs.relays)
|
||||
ret := make([]netip.Addr, 0, len(rs.relays))
|
||||
for ip := range rs.relays {
|
||||
ret = append(ret, ip)
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
@@ -212,18 +208,6 @@ func (rs *RelayState) InsertRelay(ip netip.Addr, idx uint32, r *Relay) {
|
||||
rs.relayForByIdx[idx] = r
|
||||
}
|
||||
|
||||
type NetworkType uint8
|
||||
|
||||
const (
|
||||
NetworkTypeUnknown NetworkType = iota
|
||||
// NetworkTypeVPN is a network that overlaps one or more of the vpnNetworks in our certificate
|
||||
NetworkTypeVPN
|
||||
// NetworkTypeVPNPeer is a network that does not overlap one of our networks
|
||||
NetworkTypeVPNPeer
|
||||
// NetworkTypeUnsafe is a network from Certificate.UnsafeNetworks()
|
||||
NetworkTypeUnsafe
|
||||
)
|
||||
|
||||
type HostInfo struct {
|
||||
remote netip.AddrPort
|
||||
remotes *RemoteList
|
||||
@@ -235,10 +219,11 @@ type HostInfo struct {
|
||||
// vpnAddrs is a list of vpn addresses assigned to this host that are within our own vpn networks
|
||||
// The host may have other vpn addresses that are outside our
|
||||
// vpn networks but were removed because they are not usable
|
||||
vpnAddrs []netip.Addr
|
||||
vpnAddrs []netip.Addr
|
||||
recvError atomic.Uint32
|
||||
|
||||
// networks is a combination of specific vpn addresses (not prefixes!) and full unsafe networks assigned to this host.
|
||||
networks *bart.Table[NetworkType]
|
||||
// networks are both all vpn and unsafe networks assigned to this host
|
||||
networks *bart.Table[struct{}]
|
||||
relayState RelayState
|
||||
|
||||
// HandshakePacket records the packets used to create this hostinfo
|
||||
@@ -265,14 +250,6 @@ type HostInfo struct {
|
||||
// Used to track other hostinfos for this vpn ip since only 1 can be primary
|
||||
// Synchronised via hostmap lock and not the hostinfo lock.
|
||||
next, prev *HostInfo
|
||||
|
||||
//TODO: in, out, and others might benefit from being an atomic.Int32. We could collapse connectionManager pendingDeletion, relayUsed, and in/out into this 1 thing
|
||||
in, out, pendingDeletion atomic.Bool
|
||||
|
||||
// lastUsed tracks the last time ConnectionManager checked the tunnel and it was in use.
|
||||
// This value will be behind against actual tunnel utilization in the hot path.
|
||||
// This should only be used by the ConnectionManagers ticker routine.
|
||||
lastUsed time.Time
|
||||
}
|
||||
|
||||
type ViaSender struct {
|
||||
@@ -742,26 +719,26 @@ func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote netip.AddrPort) b
|
||||
return false
|
||||
}
|
||||
|
||||
// buildNetworks fills in the networks field of HostInfo. It accepts a cert.Certificate so you never ever mix the network types up.
|
||||
func (i *HostInfo) buildNetworks(myVpnNetworksTable *bart.Lite, c cert.Certificate) {
|
||||
if len(c.Networks()) == 1 && len(c.UnsafeNetworks()) == 0 {
|
||||
if myVpnNetworksTable.Contains(c.Networks()[0].Addr()) {
|
||||
return // Simple case, no BART needed
|
||||
}
|
||||
func (i *HostInfo) RecvErrorExceeded() bool {
|
||||
if i.recvError.Add(1) >= maxRecvError {
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (i *HostInfo) buildNetworks(networks, unsafeNetworks []netip.Prefix) {
|
||||
if len(networks) == 1 && len(unsafeNetworks) == 0 {
|
||||
// Simple case, no CIDRTree needed
|
||||
return
|
||||
}
|
||||
|
||||
i.networks = new(bart.Table[NetworkType])
|
||||
for _, network := range c.Networks() {
|
||||
nprefix := netip.PrefixFrom(network.Addr(), network.Addr().BitLen())
|
||||
if myVpnNetworksTable.Contains(network.Addr()) {
|
||||
i.networks.Insert(nprefix, NetworkTypeVPN)
|
||||
} else {
|
||||
i.networks.Insert(nprefix, NetworkTypeVPNPeer)
|
||||
}
|
||||
i.networks = new(bart.Table[struct{}])
|
||||
for _, network := range networks {
|
||||
i.networks.Insert(network, struct{}{})
|
||||
}
|
||||
|
||||
for _, network := range c.UnsafeNetworks() {
|
||||
i.networks.Insert(network, NetworkTypeUnsafe)
|
||||
for _, network := range unsafeNetworks {
|
||||
i.networks.Insert(network, struct{}{})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestHostMap_MakePrimary(t *testing.T) {
|
||||
@@ -211,36 +210,8 @@ func TestHostMap_reload(t *testing.T) {
|
||||
assert.Empty(t, hm.GetPreferredRanges())
|
||||
|
||||
c.ReloadConfigString("preferred_ranges: [1.1.1.0/24, 10.1.1.0/24]")
|
||||
assert.Equal(t, []string{"1.1.1.0/24", "10.1.1.0/24"}, toS(hm.GetPreferredRanges()))
|
||||
assert.EqualValues(t, []string{"1.1.1.0/24", "10.1.1.0/24"}, toS(hm.GetPreferredRanges()))
|
||||
|
||||
c.ReloadConfigString("preferred_ranges: [1.1.1.1/32]")
|
||||
assert.Equal(t, []string{"1.1.1.1/32"}, toS(hm.GetPreferredRanges()))
|
||||
}
|
||||
|
||||
func TestHostMap_RelayState(t *testing.T) {
|
||||
h1 := &HostInfo{vpnAddrs: []netip.Addr{netip.MustParseAddr("0.0.0.1")}, localIndexId: 1}
|
||||
a1 := netip.MustParseAddr("::1")
|
||||
a2 := netip.MustParseAddr("2001::1")
|
||||
|
||||
h1.relayState.InsertRelayTo(a1)
|
||||
assert.Equal(t, []netip.Addr{a1}, h1.relayState.relays)
|
||||
h1.relayState.InsertRelayTo(a2)
|
||||
assert.Equal(t, []netip.Addr{a1, a2}, h1.relayState.relays)
|
||||
// Ensure that the first relay added is the first one returned in the copy
|
||||
currentRelays := h1.relayState.CopyRelayIps()
|
||||
require.Len(t, currentRelays, 2)
|
||||
assert.Equal(t, a1, currentRelays[0])
|
||||
|
||||
// Deleting the last one in the list works ok
|
||||
h1.relayState.DeleteRelay(a2)
|
||||
assert.Equal(t, []netip.Addr{a1}, h1.relayState.relays)
|
||||
|
||||
// Deleting an element not in the list works ok
|
||||
h1.relayState.DeleteRelay(a2)
|
||||
assert.Equal(t, []netip.Addr{a1}, h1.relayState.relays)
|
||||
|
||||
// Deleting the only element in the list works ok
|
||||
h1.relayState.DeleteRelay(a1)
|
||||
assert.Equal(t, []netip.Addr{}, h1.relayState.relays)
|
||||
|
||||
assert.EqualValues(t, []string{"1.1.1.1/32"}, toS(hm.GetPreferredRanges()))
|
||||
}
|
||||
|
||||
202
inside.go
202
inside.go
@@ -2,18 +2,15 @@ package nebula
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/firewall"
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/noiseutil"
|
||||
"github.com/slackhq/nebula/packet"
|
||||
"github.com/slackhq/nebula/routing"
|
||||
)
|
||||
|
||||
func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet, nb []byte, out *packet.Packet, q int, localCache firewall.ConntrackCache, now time.Time) {
|
||||
func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet, nb, out []byte, q int, localCache firewall.ConntrackCache) {
|
||||
err := newPacket(packet, false, fwPacket)
|
||||
if err != nil {
|
||||
if f.l.Level >= logrus.DebugLevel {
|
||||
@@ -24,12 +21,14 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
|
||||
|
||||
// Ignore local broadcast packets
|
||||
if f.dropLocalBroadcast {
|
||||
if f.myBroadcastAddrsTable.Contains(fwPacket.RemoteAddr) {
|
||||
_, found := f.myBroadcastAddrsTable.Lookup(fwPacket.RemoteAddr)
|
||||
if found {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if f.myVpnAddrsTable.Contains(fwPacket.RemoteAddr) {
|
||||
_, found := f.myVpnAddrsTable.Lookup(fwPacket.RemoteAddr)
|
||||
if found {
|
||||
// Immediately forward packets from self to self.
|
||||
// This should only happen on Darwin-based and FreeBSD hosts, which
|
||||
// routes packets from the Nebula addr to the Nebula addr through the Nebula
|
||||
@@ -50,12 +49,12 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
|
||||
return
|
||||
}
|
||||
|
||||
hostinfo, ready := f.getOrHandshakeConsiderRouting(fwPacket, func(hh *HandshakeHostInfo) {
|
||||
hostinfo, ready := f.getOrHandshake(fwPacket.RemoteAddr, func(hh *HandshakeHostInfo) {
|
||||
hh.cachePacket(f.l, header.Message, 0, packet, f.sendMessageNow, f.cachedPacketMetrics)
|
||||
})
|
||||
|
||||
if hostinfo == nil {
|
||||
f.rejectInside(packet, out.Payload, q) //todo vector?
|
||||
f.rejectInside(packet, out, q)
|
||||
if f.l.Level >= logrus.DebugLevel {
|
||||
f.l.WithField("vpnAddr", fwPacket.RemoteAddr).
|
||||
WithField("fwPacket", fwPacket).
|
||||
@@ -68,11 +67,12 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
|
||||
return
|
||||
}
|
||||
|
||||
dropReason := f.firewall.Drop(*fwPacket, false, hostinfo, f.pki.GetCAPool(), localCache, now)
|
||||
dropReason := f.firewall.Drop(*fwPacket, false, hostinfo, f.pki.GetCAPool(), localCache)
|
||||
if dropReason == nil {
|
||||
f.sendNoMetricsDelayed(header.Message, 0, hostinfo.ConnectionState, hostinfo, netip.AddrPort{}, packet, nb, out, q)
|
||||
f.sendNoMetrics(header.Message, 0, hostinfo.ConnectionState, hostinfo, netip.AddrPort{}, packet, nb, out, q)
|
||||
|
||||
} else {
|
||||
f.rejectInside(packet, out.Payload, q) //todo vector?
|
||||
f.rejectInside(packet, out, q)
|
||||
if f.l.Level >= logrus.DebugLevel {
|
||||
hostinfo.logger(f.l).
|
||||
WithField("fwPacket", fwPacket).
|
||||
@@ -121,93 +121,22 @@ func (f *Interface) rejectOutside(packet []byte, ci *ConnectionState, hostinfo *
|
||||
f.sendNoMetrics(header.Message, 0, ci, hostinfo, netip.AddrPort{}, out, nb, packet, q)
|
||||
}
|
||||
|
||||
// Handshake will attempt to initiate a tunnel with the provided vpn address. This is a no-op if the tunnel is already established or being established
|
||||
// it does not check if it is within our vpn networks!
|
||||
func (f *Interface) Handshake(vpnAddr netip.Addr) {
|
||||
f.handshakeManager.GetOrHandshake(vpnAddr, nil)
|
||||
f.getOrHandshake(vpnAddr, nil)
|
||||
}
|
||||
|
||||
// getOrHandshakeNoRouting returns nil if the vpnAddr is not routable.
|
||||
// getOrHandshake returns nil if the vpnAddr is not routable.
|
||||
// If the 2nd return var is false then the hostinfo is not ready to be used in a tunnel
|
||||
func (f *Interface) getOrHandshakeNoRouting(vpnAddr netip.Addr, cacheCallback func(*HandshakeHostInfo)) (*HostInfo, bool) {
|
||||
if f.myVpnNetworksTable.Contains(vpnAddr) {
|
||||
return f.handshakeManager.GetOrHandshake(vpnAddr, cacheCallback)
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// getOrHandshakeConsiderRouting will try to find the HostInfo to handle this packet, starting a handshake if necessary.
|
||||
// If the 2nd return var is false then the hostinfo is not ready to be used in a tunnel.
|
||||
func (f *Interface) getOrHandshakeConsiderRouting(fwPacket *firewall.Packet, cacheCallback func(*HandshakeHostInfo)) (*HostInfo, bool) {
|
||||
destinationAddr := fwPacket.RemoteAddr
|
||||
|
||||
hostinfo, ready := f.getOrHandshakeNoRouting(destinationAddr, cacheCallback)
|
||||
|
||||
// Host is inside the mesh, no routing required
|
||||
if hostinfo != nil {
|
||||
return hostinfo, ready
|
||||
}
|
||||
|
||||
gateways := f.inside.RoutesFor(destinationAddr)
|
||||
|
||||
switch len(gateways) {
|
||||
case 0:
|
||||
return nil, false
|
||||
case 1:
|
||||
// Single gateway route
|
||||
return f.handshakeManager.GetOrHandshake(gateways[0].Addr(), cacheCallback)
|
||||
default:
|
||||
// Multi gateway route, perform ECMP categorization
|
||||
gatewayAddr, balancingOk := routing.BalancePacket(fwPacket, gateways)
|
||||
|
||||
if !balancingOk {
|
||||
// This happens if the gateway buckets were not calculated, this _should_ never happen
|
||||
f.l.Error("Gateway buckets not calculated, fallback from ECMP to random routing. Please report this bug.")
|
||||
}
|
||||
|
||||
var handshakeInfoForChosenGateway *HandshakeHostInfo
|
||||
var hhReceiver = func(hh *HandshakeHostInfo) {
|
||||
handshakeInfoForChosenGateway = hh
|
||||
}
|
||||
|
||||
// Store the handshakeHostInfo for later.
|
||||
// If this node is not reachable we will attempt other nodes, if none are reachable we will
|
||||
// cache the packet for this gateway.
|
||||
if hostinfo, ready = f.handshakeManager.GetOrHandshake(gatewayAddr, hhReceiver); ready {
|
||||
return hostinfo, true
|
||||
}
|
||||
|
||||
// It appears the selected gateway cannot be reached, find another gateway to fallback on.
|
||||
// The current implementation breaks ECMP but that seems better than no connectivity.
|
||||
// If ECMP is also required when a gateway is down then connectivity status
|
||||
// for each gateway needs to be kept and the weights recalculated when they go up or down.
|
||||
// This would also need to interact with unsafe_route updates through reloading the config or
|
||||
// use of the use_system_route_table option
|
||||
|
||||
if f.l.Level >= logrus.DebugLevel {
|
||||
f.l.WithField("destination", destinationAddr).
|
||||
WithField("originalGateway", gatewayAddr).
|
||||
Debugln("Calculated gateway for ECMP not available, attempting other gateways")
|
||||
}
|
||||
|
||||
for i := range gateways {
|
||||
// Skip the gateway that failed previously
|
||||
if gateways[i].Addr() == gatewayAddr {
|
||||
continue
|
||||
}
|
||||
|
||||
// We do not need the HandshakeHostInfo since we cache the packet in the originally chosen gateway
|
||||
if hostinfo, ready = f.handshakeManager.GetOrHandshake(gateways[i].Addr(), nil); ready {
|
||||
return hostinfo, true
|
||||
}
|
||||
}
|
||||
|
||||
// No gateways reachable, cache the packet in the originally chosen gateway
|
||||
cacheCallback(handshakeInfoForChosenGateway)
|
||||
return hostinfo, false
|
||||
func (f *Interface) getOrHandshake(vpnAddr netip.Addr, cacheCallback func(*HandshakeHostInfo)) (*HostInfo, bool) {
|
||||
_, found := f.myVpnNetworksTable.Lookup(vpnAddr)
|
||||
if !found {
|
||||
vpnAddr = f.inside.RouteFor(vpnAddr)
|
||||
if !vpnAddr.IsValid() {
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
return f.handshakeManager.GetOrHandshake(vpnAddr, cacheCallback)
|
||||
}
|
||||
|
||||
func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, nb, out []byte) {
|
||||
@@ -219,7 +148,7 @@ func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubTyp
|
||||
}
|
||||
|
||||
// check if packet is in outbound fw rules
|
||||
dropReason := f.firewall.Drop(*fp, false, hostinfo, f.pki.GetCAPool(), nil, time.Now())
|
||||
dropReason := f.firewall.Drop(*fp, false, hostinfo, f.pki.GetCAPool(), nil)
|
||||
if dropReason != nil {
|
||||
if f.l.Level >= logrus.DebugLevel {
|
||||
f.l.WithField("fwPacket", fp).
|
||||
@@ -232,10 +161,9 @@ func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubTyp
|
||||
f.sendNoMetrics(header.Message, st, hostinfo.ConnectionState, hostinfo, netip.AddrPort{}, p, nb, out, 0)
|
||||
}
|
||||
|
||||
// SendMessageToVpnAddr handles real addr:port lookup and sends to the current best known address for vpnAddr.
|
||||
// This function ignores myVpnNetworksTable, and will always attempt to treat the address as a vpnAddr
|
||||
// SendMessageToVpnAddr handles real addr:port lookup and sends to the current best known address for vpnAddr
|
||||
func (f *Interface) SendMessageToVpnAddr(t header.MessageType, st header.MessageSubType, vpnAddr netip.Addr, p, nb, out []byte) {
|
||||
hostInfo, ready := f.handshakeManager.GetOrHandshake(vpnAddr, func(hh *HandshakeHostInfo) {
|
||||
hostInfo, ready := f.getOrHandshake(vpnAddr, func(hh *HandshakeHostInfo) {
|
||||
hh.cachePacket(f.l, t, st, p, f.SendMessageToHostInfo, f.cachedPacketMetrics)
|
||||
})
|
||||
|
||||
@@ -290,7 +218,7 @@ func (f *Interface) SendVia(via *HostInfo,
|
||||
c := via.ConnectionState.messageCounter.Add(1)
|
||||
|
||||
out = header.Encode(out, header.Version, header.Message, header.MessageRelay, relay.RemoteIndex, c)
|
||||
f.connectionManager.Out(via)
|
||||
f.connectionManager.Out(via.localIndexId)
|
||||
|
||||
// Authenticate the header and payload, but do not encrypt for this message type.
|
||||
// The payload consists of the inner, unencrypted Nebula header, as well as the end-to-end encrypted payload.
|
||||
@@ -358,7 +286,7 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
|
||||
|
||||
//l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p)
|
||||
out = header.Encode(out, header.Version, t, st, hostinfo.remoteIndexId, c)
|
||||
f.connectionManager.Out(hostinfo)
|
||||
f.connectionManager.Out(hostinfo.localIndexId)
|
||||
|
||||
// Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against
|
||||
// all our addrs and enable a faster roaming.
|
||||
@@ -411,81 +339,3 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Interface) sendNoMetricsDelayed(t header.MessageType, st header.MessageSubType, ci *ConnectionState, hostinfo *HostInfo, remote netip.AddrPort, p, nb []byte, out *packet.Packet, q int) {
|
||||
if ci.eKey == nil {
|
||||
return
|
||||
}
|
||||
useRelay := !remote.IsValid() && !hostinfo.remote.IsValid()
|
||||
fullOut := out.Payload
|
||||
|
||||
if useRelay {
|
||||
if len(out.Payload) < header.Len {
|
||||
// out always has a capacity of mtu, but not always a length greater than the header.Len.
|
||||
// Grow it to make sure the next operation works.
|
||||
out.Payload = out.Payload[:header.Len]
|
||||
}
|
||||
// Save a header's worth of data at the front of the 'out' buffer.
|
||||
out.Payload = out.Payload[header.Len:]
|
||||
}
|
||||
|
||||
if noiseutil.EncryptLockNeeded {
|
||||
// NOTE: for goboring AESGCMTLS we need to lock because of the nonce check
|
||||
ci.writeLock.Lock()
|
||||
}
|
||||
c := ci.messageCounter.Add(1)
|
||||
|
||||
//l.WithField("trace", string(debug.Stack())).Error("out Header ", &Header{Version, t, st, 0, hostinfo.remoteIndexId, c}, p)
|
||||
out.Payload = header.Encode(out.Payload, header.Version, t, st, hostinfo.remoteIndexId, c)
|
||||
f.connectionManager.Out(hostinfo)
|
||||
|
||||
// Query our LH if we haven't since the last time we've been rebound, this will cause the remote to punch against
|
||||
// all our addrs and enable a faster roaming.
|
||||
if t != header.CloseTunnel && hostinfo.lastRebindCount != f.rebindCount {
|
||||
//NOTE: there is an update hole if a tunnel isn't used and exactly 256 rebinds occur before the tunnel is
|
||||
// finally used again. This tunnel would eventually be torn down and recreated if this action didn't help.
|
||||
f.lightHouse.QueryServer(hostinfo.vpnAddrs[0])
|
||||
hostinfo.lastRebindCount = f.rebindCount
|
||||
if f.l.Level >= logrus.DebugLevel {
|
||||
f.l.WithField("vpnAddrs", hostinfo.vpnAddrs).Debug("Lighthouse update triggered for punch due to rebind counter")
|
||||
}
|
||||
}
|
||||
|
||||
var err error
|
||||
out.Payload, err = ci.eKey.EncryptDanger(out.Payload, out.Payload, p, c, nb)
|
||||
if noiseutil.EncryptLockNeeded {
|
||||
ci.writeLock.Unlock()
|
||||
}
|
||||
if err != nil {
|
||||
hostinfo.logger(f.l).WithError(err).
|
||||
WithField("udpAddr", remote).WithField("counter", c).
|
||||
WithField("attemptedCounter", c).
|
||||
Error("Failed to encrypt outgoing packet")
|
||||
return
|
||||
}
|
||||
|
||||
if remote.IsValid() {
|
||||
err = f.writers[q].Prep(out, remote)
|
||||
if err != nil {
|
||||
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", remote).Error("Failed to write outgoing packet")
|
||||
}
|
||||
} else if hostinfo.remote.IsValid() {
|
||||
err = f.writers[q].Prep(out, hostinfo.remote)
|
||||
if err != nil {
|
||||
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", remote).Error("Failed to write outgoing packet")
|
||||
}
|
||||
} else {
|
||||
// Try to send via a relay
|
||||
for _, relayIP := range hostinfo.relayState.CopyRelayIps() {
|
||||
relayHostInfo, relay, err := f.hostMap.QueryVpnAddrsRelayFor(hostinfo.vpnAddrs, relayIP)
|
||||
if err != nil {
|
||||
hostinfo.relayState.DeleteRelay(relayIP)
|
||||
hostinfo.logger(f.l).WithField("relay", relayIP).WithError(err).Info("sendNoMetrics failed to find HostInfo")
|
||||
continue
|
||||
}
|
||||
//todo vector!!
|
||||
f.SendVia(relayHostInfo, relay, out.Payload, nb, fullOut[:header.Len+len(out.Payload)], true)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
145
interface.go
145
interface.go
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/netip"
|
||||
"os"
|
||||
"runtime"
|
||||
@@ -17,31 +18,29 @@ import (
|
||||
"github.com/slackhq/nebula/firewall"
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/overlay"
|
||||
"github.com/slackhq/nebula/packet"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
)
|
||||
|
||||
const mtu = 9001
|
||||
const batch = 1024 //todo config!
|
||||
|
||||
type InterfaceConfig struct {
|
||||
HostMap *HostMap
|
||||
Outside udp.Conn
|
||||
Inside overlay.Device
|
||||
pki *PKI
|
||||
Cipher string
|
||||
Firewall *Firewall
|
||||
ServeDns bool
|
||||
HandshakeManager *HandshakeManager
|
||||
lightHouse *LightHouse
|
||||
connectionManager *connectionManager
|
||||
DropLocalBroadcast bool
|
||||
DropMulticast bool
|
||||
routines int
|
||||
MessageMetrics *MessageMetrics
|
||||
version string
|
||||
relayManager *relayManager
|
||||
punchy *Punchy
|
||||
HostMap *HostMap
|
||||
Outside udp.Conn
|
||||
Inside overlay.Device
|
||||
pki *PKI
|
||||
Firewall *Firewall
|
||||
ServeDns bool
|
||||
HandshakeManager *HandshakeManager
|
||||
lightHouse *LightHouse
|
||||
checkInterval time.Duration
|
||||
pendingDeletionInterval time.Duration
|
||||
DropLocalBroadcast bool
|
||||
DropMulticast bool
|
||||
routines int
|
||||
MessageMetrics *MessageMetrics
|
||||
version string
|
||||
relayManager *relayManager
|
||||
punchy *Punchy
|
||||
|
||||
tryPromoteEvery uint32
|
||||
reQueryEvery uint32
|
||||
@@ -62,11 +61,11 @@ type Interface struct {
|
||||
serveDns bool
|
||||
createTime time.Time
|
||||
lightHouse *LightHouse
|
||||
myBroadcastAddrsTable *bart.Lite
|
||||
myVpnAddrs []netip.Addr // A list of addresses assigned to us via our certificate
|
||||
myVpnAddrsTable *bart.Lite
|
||||
myVpnNetworks []netip.Prefix // A list of networks assigned to us via our certificate
|
||||
myVpnNetworksTable *bart.Lite
|
||||
myBroadcastAddrsTable *bart.Table[struct{}]
|
||||
myVpnAddrs []netip.Addr // A list of addresses assigned to us via our certificate
|
||||
myVpnAddrsTable *bart.Table[struct{}] // A table of addresses assigned to us via our certificate
|
||||
myVpnNetworks []netip.Prefix // A list of networks assigned to us via our certificate
|
||||
myVpnNetworksTable *bart.Table[struct{}] // A table of networks assigned to us via our certificate
|
||||
dropLocalBroadcast bool
|
||||
dropMulticast bool
|
||||
routines int
|
||||
@@ -87,18 +86,12 @@ type Interface struct {
|
||||
conntrackCacheTimeout time.Duration
|
||||
|
||||
writers []udp.Conn
|
||||
readers []overlay.TunDev
|
||||
readers []io.ReadWriteCloser
|
||||
|
||||
metricHandshakes metrics.Histogram
|
||||
messageMetrics *MessageMetrics
|
||||
cachedPacketMetrics *cachedPacketMetrics
|
||||
|
||||
listenInN int
|
||||
listenOutN int
|
||||
|
||||
listenInMetric metrics.Histogram
|
||||
listenOutMetric metrics.Histogram
|
||||
|
||||
l *logrus.Logger
|
||||
}
|
||||
|
||||
@@ -164,9 +157,6 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
||||
if c.Firewall == nil {
|
||||
return nil, errors.New("no firewall rules")
|
||||
}
|
||||
if c.connectionManager == nil {
|
||||
return nil, errors.New("no connection manager")
|
||||
}
|
||||
|
||||
cs := c.pki.getCertState()
|
||||
ifce := &Interface{
|
||||
@@ -184,14 +174,14 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
||||
routines: c.routines,
|
||||
version: c.version,
|
||||
writers: make([]udp.Conn, c.routines),
|
||||
readers: make([]overlay.TunDev, c.routines),
|
||||
readers: make([]io.ReadWriteCloser, c.routines),
|
||||
myVpnNetworks: cs.myVpnNetworks,
|
||||
myVpnNetworksTable: cs.myVpnNetworksTable,
|
||||
myVpnAddrs: cs.myVpnAddrs,
|
||||
myVpnAddrsTable: cs.myVpnAddrsTable,
|
||||
myBroadcastAddrsTable: cs.myVpnBroadcastAddrsTable,
|
||||
relayManager: c.relayManager,
|
||||
connectionManager: c.connectionManager,
|
||||
|
||||
conntrackCacheTimeout: c.ConntrackCacheTimeout,
|
||||
|
||||
metricHandshakes: metrics.GetOrRegisterHistogram("handshakes", nil, metrics.NewExpDecaySample(1028, 0.015)),
|
||||
@@ -203,14 +193,12 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
||||
|
||||
l: c.l,
|
||||
}
|
||||
ifce.listenInMetric = metrics.GetOrRegisterHistogram("vhost.listenIn.n", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||
ifce.listenOutMetric = metrics.GetOrRegisterHistogram("vhost.listenOut.n", nil, metrics.NewExpDecaySample(1028, 0.015))
|
||||
|
||||
ifce.tryPromoteEvery.Store(c.tryPromoteEvery)
|
||||
ifce.reQueryEvery.Store(c.reQueryEvery)
|
||||
ifce.reQueryWait.Store(int64(c.reQueryWait))
|
||||
|
||||
ifce.connectionManager.intf = ifce
|
||||
ifce.connectionManager = newConnectionManager(ctx, c.l, ifce, c.checkInterval, c.pendingDeletionInterval, c.punchy)
|
||||
|
||||
return ifce, nil
|
||||
}
|
||||
@@ -234,7 +222,7 @@ func (f *Interface) activate() {
|
||||
metrics.GetOrRegisterGauge("routines", nil).Update(int64(f.routines))
|
||||
|
||||
// Prepare n tun queues
|
||||
var reader overlay.TunDev = f.inside
|
||||
var reader io.ReadWriteCloser = f.inside
|
||||
for i := 0; i < f.routines; i++ {
|
||||
if i > 0 {
|
||||
reader, err = f.inside.NewMultiQueueReader()
|
||||
@@ -263,71 +251,40 @@ func (f *Interface) run() {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Interface) listenOut(q int) {
|
||||
func (f *Interface) listenOut(i int) {
|
||||
runtime.LockOSThread()
|
||||
|
||||
var li udp.Conn
|
||||
if q > 0 {
|
||||
li = f.writers[q]
|
||||
if i > 0 {
|
||||
li = f.writers[i]
|
||||
} else {
|
||||
li = f.outside
|
||||
}
|
||||
|
||||
ctCache := firewall.NewConntrackCacheTicker(f.conntrackCacheTimeout)
|
||||
lhh := f.lightHouse.NewRequestHandler()
|
||||
|
||||
outPackets := make([]*packet.OutPacket, batch)
|
||||
for i := 0; i < batch; i++ {
|
||||
outPackets[i] = packet.NewOut()
|
||||
}
|
||||
|
||||
plaintext := make([]byte, udp.MTU)
|
||||
h := &header.H{}
|
||||
fwPacket := &firewall.Packet{}
|
||||
nb := make([]byte, 12, 12)
|
||||
|
||||
toSend := make([][]byte, batch)
|
||||
|
||||
li.ListenOut(func(pkts []*packet.Packet) {
|
||||
toSend = toSend[:0]
|
||||
for i := range outPackets {
|
||||
outPackets[i].Valid = false
|
||||
outPackets[i].SegCounter = 0
|
||||
}
|
||||
|
||||
f.readOutsidePacketsMany(pkts, outPackets, h, fwPacket, lhh, nb, q, ctCache.Get(f.l), time.Now())
|
||||
//we opportunistically tx, but try to also send stragglers
|
||||
if _, err := f.readers[q].WriteMany(outPackets, q); err != nil {
|
||||
f.l.WithError(err).Error("Failed to send packets")
|
||||
}
|
||||
//todo I broke this
|
||||
//n := len(toSend)
|
||||
//if f.l.Level == logrus.DebugLevel {
|
||||
// f.listenOutMetric.Update(int64(n))
|
||||
//}
|
||||
//f.listenOutN = n
|
||||
|
||||
li.ListenOut(func(fromUdpAddr netip.AddrPort, payload []byte) {
|
||||
f.readOutsidePackets(fromUdpAddr, nil, plaintext[:0], payload, h, fwPacket, lhh, nb, i, ctCache.Get(f.l))
|
||||
})
|
||||
}
|
||||
|
||||
func (f *Interface) listenIn(reader overlay.TunDev, queueNum int) {
|
||||
func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) {
|
||||
runtime.LockOSThread()
|
||||
|
||||
packet := make([]byte, mtu)
|
||||
out := make([]byte, mtu)
|
||||
fwPacket := &firewall.Packet{}
|
||||
nb := make([]byte, 12, 12)
|
||||
|
||||
conntrackCache := firewall.NewConntrackCacheTicker(f.conntrackCacheTimeout)
|
||||
|
||||
packets := make([]*packet.VirtIOPacket, batch)
|
||||
outPackets := make([]*packet.Packet, batch)
|
||||
for i := 0; i < batch; i++ {
|
||||
packets[i] = packet.NewVIO()
|
||||
outPackets[i] = packet.New(false) //todo?
|
||||
}
|
||||
|
||||
for {
|
||||
n, err := reader.ReadMany(packets, queueNum)
|
||||
|
||||
//todo!!
|
||||
n, err := reader.Read(packet)
|
||||
if err != nil {
|
||||
if errors.Is(err, os.ErrClosed) && f.closed.Load() {
|
||||
return
|
||||
@@ -338,22 +295,7 @@ func (f *Interface) listenIn(reader overlay.TunDev, queueNum int) {
|
||||
os.Exit(2)
|
||||
}
|
||||
|
||||
if f.l.Level == logrus.DebugLevel {
|
||||
f.listenInMetric.Update(int64(n))
|
||||
}
|
||||
f.listenInN = n
|
||||
|
||||
now := time.Now()
|
||||
for i, pkt := range packets[:n] {
|
||||
outPackets[i].OutLen = -1
|
||||
f.consumeInsidePacket(pkt.Payload, fwPacket, nb, outPackets[i], queueNum, conntrackCache.Get(f.l), now)
|
||||
reader.RecycleRxSeg(pkt, i == (n-1), queueNum) //todo handle err?
|
||||
pkt.Reset()
|
||||
}
|
||||
_, err = f.writers[queueNum].WriteBatch(outPackets[:n])
|
||||
if err != nil {
|
||||
f.l.WithError(err).Error("Error while writing outbound packets")
|
||||
}
|
||||
f.consumeInsidePacket(packet[:n], fwPacket, nb, out, i, conntrackCache.Get(f.l))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -468,7 +410,7 @@ func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
|
||||
udpStats := udp.NewUDPStatsEmitter(f.writers)
|
||||
|
||||
certExpirationGauge := metrics.GetOrRegisterGauge("certificate.ttl_seconds", nil)
|
||||
certInitiatingVersion := metrics.GetOrRegisterGauge("certificate.initiating_version", nil)
|
||||
certDefaultVersion := metrics.GetOrRegisterGauge("certificate.default_version", nil)
|
||||
certMaxVersion := metrics.GetOrRegisterGauge("certificate.max_version", nil)
|
||||
|
||||
for {
|
||||
@@ -483,7 +425,7 @@ func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
|
||||
certState := f.pki.getCertState()
|
||||
defaultCrt := certState.GetDefaultCertificate()
|
||||
certExpirationGauge.Update(int64(defaultCrt.NotAfter().Sub(time.Now()) / time.Second))
|
||||
certInitiatingVersion.Update(int64(defaultCrt.Version()))
|
||||
certDefaultVersion.Update(int64(defaultCrt.Version()))
|
||||
|
||||
// Report the max certificate version we are capable of using
|
||||
if certState.v2Cert != nil {
|
||||
@@ -491,11 +433,6 @@ func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
|
||||
} else {
|
||||
certMaxVersion.Update(int64(certState.v1Cert.Version()))
|
||||
}
|
||||
if f.l.Level != logrus.DebugLevel {
|
||||
f.listenInMetric.Update(int64(f.listenInN))
|
||||
f.listenOutMetric.Update(int64(f.listenOutN))
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
283
lighthouse.go
283
lighthouse.go
@@ -24,7 +24,6 @@ import (
|
||||
)
|
||||
|
||||
var ErrHostNotKnown = errors.New("host not known")
|
||||
var ErrBadDetailsVpnAddr = errors.New("invalid packet, malformed detailsVpnAddr")
|
||||
|
||||
type LightHouse struct {
|
||||
//TODO: We need a timer wheel to kick out vpnAddrs that haven't reported in a long time
|
||||
@@ -33,7 +32,7 @@ type LightHouse struct {
|
||||
amLighthouse bool
|
||||
|
||||
myVpnNetworks []netip.Prefix
|
||||
myVpnNetworksTable *bart.Lite
|
||||
myVpnNetworksTable *bart.Table[struct{}]
|
||||
punchConn udp.Conn
|
||||
punchy *Punchy
|
||||
|
||||
@@ -57,7 +56,7 @@ type LightHouse struct {
|
||||
// staticList exists to avoid having a bool in each addrMap entry
|
||||
// since static should be rare
|
||||
staticList atomic.Pointer[map[netip.Addr]struct{}]
|
||||
lighthouses atomic.Pointer[[]netip.Addr]
|
||||
lighthouses atomic.Pointer[map[netip.Addr]struct{}]
|
||||
|
||||
interval atomic.Int64
|
||||
updateCancel context.CancelFunc
|
||||
@@ -108,7 +107,7 @@ func NewLightHouseFromConfig(ctx context.Context, l *logrus.Logger, c *config.C,
|
||||
queryChan: make(chan netip.Addr, c.GetUint32("handshakes.query_buffer", 64)),
|
||||
l: l,
|
||||
}
|
||||
lighthouses := make([]netip.Addr, 0)
|
||||
lighthouses := make(map[netip.Addr]struct{})
|
||||
h.lighthouses.Store(&lighthouses)
|
||||
staticList := make(map[netip.Addr]struct{})
|
||||
h.staticList.Store(&staticList)
|
||||
@@ -144,7 +143,7 @@ func (lh *LightHouse) GetStaticHostList() map[netip.Addr]struct{} {
|
||||
return *lh.staticList.Load()
|
||||
}
|
||||
|
||||
func (lh *LightHouse) GetLighthouses() []netip.Addr {
|
||||
func (lh *LightHouse) GetLighthouses() map[netip.Addr]struct{} {
|
||||
return *lh.lighthouses.Load()
|
||||
}
|
||||
|
||||
@@ -202,7 +201,8 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
||||
|
||||
//TODO: we could technically insert all returned addrs instead of just the first one if a dns lookup was used
|
||||
addr := addrs[0].Unmap()
|
||||
if lh.myVpnNetworksTable.Contains(addr) {
|
||||
_, found := lh.myVpnNetworksTable.Lookup(addr)
|
||||
if found {
|
||||
lh.l.WithField("addr", rawAddr).WithField("entry", i+1).
|
||||
Warn("Ignoring lighthouse.advertise_addrs report because it is within the nebula network range")
|
||||
continue
|
||||
@@ -307,12 +307,13 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
||||
}
|
||||
|
||||
if initial || c.HasChanged("lighthouse.hosts") {
|
||||
lhList, err := lh.parseLighthouses(c)
|
||||
lhMap := make(map[netip.Addr]struct{})
|
||||
err := lh.parseLighthouses(c, lhMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
lh.lighthouses.Store(&lhList)
|
||||
lh.lighthouses.Store(&lhMap)
|
||||
if !initial {
|
||||
//NOTE: we are not tearing down existing lighthouse connections because they might be used for non lighthouse traffic
|
||||
lh.l.Info("lighthouse.hosts has changed")
|
||||
@@ -346,38 +347,37 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lh *LightHouse) parseLighthouses(c *config.C) ([]netip.Addr, error) {
|
||||
func (lh *LightHouse) parseLighthouses(c *config.C, lhMap map[netip.Addr]struct{}) error {
|
||||
lhs := c.GetStringSlice("lighthouse.hosts", []string{})
|
||||
if lh.amLighthouse && len(lhs) != 0 {
|
||||
lh.l.Warn("lighthouse.am_lighthouse enabled on node but upstream lighthouses exist in config")
|
||||
}
|
||||
out := make([]netip.Addr, len(lhs))
|
||||
|
||||
for i, host := range lhs {
|
||||
addr, err := netip.ParseAddr(host)
|
||||
if err != nil {
|
||||
return nil, util.NewContextualError("Unable to parse lighthouse host entry", m{"host": host, "entry": i + 1}, err)
|
||||
return util.NewContextualError("Unable to parse lighthouse host entry", m{"host": host, "entry": i + 1}, err)
|
||||
}
|
||||
|
||||
if !lh.myVpnNetworksTable.Contains(addr) {
|
||||
lh.l.WithFields(m{"vpnAddr": addr, "networks": lh.myVpnNetworks}).
|
||||
Warn("lighthouse host is not within our networks, lighthouse functionality will work but layer 3 network traffic to the lighthouse will not")
|
||||
_, found := lh.myVpnNetworksTable.Lookup(addr)
|
||||
if !found {
|
||||
return util.NewContextualError("lighthouse host is not in our networks, invalid", m{"vpnAddr": addr, "networks": lh.myVpnNetworks}, nil)
|
||||
}
|
||||
out[i] = addr
|
||||
lhMap[addr] = struct{}{}
|
||||
}
|
||||
|
||||
if !lh.amLighthouse && len(out) == 0 {
|
||||
if !lh.amLighthouse && len(lhMap) == 0 {
|
||||
lh.l.Warn("No lighthouse.hosts configured, this host will only be able to initiate tunnels with static_host_map entries")
|
||||
}
|
||||
|
||||
staticList := lh.GetStaticHostList()
|
||||
for i := range out {
|
||||
if _, ok := staticList[out[i]]; !ok {
|
||||
return nil, fmt.Errorf("lighthouse %s does not have a static_host_map entry", out[i])
|
||||
for lhAddr, _ := range lhMap {
|
||||
if _, ok := staticList[lhAddr]; !ok {
|
||||
return fmt.Errorf("lighthouse %s does not have a static_host_map entry", lhAddr)
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func getStaticMapCadence(c *config.C) (time.Duration, error) {
|
||||
@@ -422,7 +422,7 @@ func (lh *LightHouse) loadStaticMap(c *config.C, staticList map[netip.Addr]struc
|
||||
return err
|
||||
}
|
||||
|
||||
shm := c.GetMap("static_host_map", map[string]any{})
|
||||
shm := c.GetMap("static_host_map", map[interface{}]interface{}{})
|
||||
i := 0
|
||||
|
||||
for k, v := range shm {
|
||||
@@ -431,14 +431,14 @@ func (lh *LightHouse) loadStaticMap(c *config.C, staticList map[netip.Addr]struc
|
||||
return util.NewContextualError("Unable to parse static_host_map entry", m{"host": k, "entry": i + 1}, err)
|
||||
}
|
||||
|
||||
if !lh.myVpnNetworksTable.Contains(vpnAddr) {
|
||||
lh.l.WithFields(m{"vpnAddr": vpnAddr, "networks": lh.myVpnNetworks, "entry": i + 1}).
|
||||
Warn("static_host_map key is not within our networks, layer 3 network traffic to this host will not work")
|
||||
_, found := lh.myVpnNetworksTable.Lookup(vpnAddr)
|
||||
if !found {
|
||||
return util.NewContextualError("static_host_map key is not in our network, invalid", m{"vpnAddr": vpnAddr, "networks": lh.myVpnNetworks, "entry": i + 1}, nil)
|
||||
}
|
||||
|
||||
vals, ok := v.([]any)
|
||||
vals, ok := v.([]interface{})
|
||||
if !ok {
|
||||
vals = []any{v}
|
||||
vals = []interface{}{v}
|
||||
}
|
||||
remoteAddrs := []string{}
|
||||
for _, v := range vals {
|
||||
@@ -489,7 +489,7 @@ func (lh *LightHouse) QueryCache(vpnAddrs []netip.Addr) *RemoteList {
|
||||
lh.Lock()
|
||||
defer lh.Unlock()
|
||||
// Add an entry if we don't already have one
|
||||
return lh.unlockedGetRemoteList(vpnAddrs) //todo CERT-V2 this contains addrmap lookups we could potentially skip
|
||||
return lh.unlockedGetRemoteList(vpnAddrs)
|
||||
}
|
||||
|
||||
// queryAndPrepMessage is a lock helper on RemoteList, assisting the caller to build a lighthouse message containing
|
||||
@@ -522,15 +522,11 @@ func (lh *LightHouse) queryAndPrepMessage(vpnAddr netip.Addr, f func(*cache) (in
|
||||
}
|
||||
|
||||
func (lh *LightHouse) DeleteVpnAddrs(allVpnAddrs []netip.Addr) {
|
||||
// First we check the static host map. If any of the VpnAddrs to be deleted are present, do nothing.
|
||||
staticList := lh.GetStaticHostList()
|
||||
for _, addr := range allVpnAddrs {
|
||||
if _, ok := staticList[addr]; ok {
|
||||
return
|
||||
}
|
||||
// First we check the static mapping
|
||||
// and do nothing if it is there
|
||||
if _, ok := lh.GetStaticHostList()[allVpnAddrs[0]]; ok {
|
||||
return
|
||||
}
|
||||
|
||||
// None of the VpnAddrs were present. Now we can do the deletes.
|
||||
lh.Lock()
|
||||
rm, ok := lh.addrMap[allVpnAddrs[0]]
|
||||
if ok {
|
||||
@@ -572,7 +568,7 @@ func (lh *LightHouse) addStaticRemotes(i int, d time.Duration, network string, t
|
||||
am.unlockedSetHostnamesResults(hr)
|
||||
|
||||
for _, addrPort := range hr.GetAddrs() {
|
||||
if !lh.shouldAdd([]netip.Addr{vpnAddr}, addrPort.Addr()) {
|
||||
if !lh.shouldAdd(vpnAddr, addrPort.Addr()) {
|
||||
continue
|
||||
}
|
||||
switch {
|
||||
@@ -634,37 +630,31 @@ func (lh *LightHouse) addCalculatedRemotes(vpnAddr netip.Addr) bool {
|
||||
return len(calculatedV4) > 0 || len(calculatedV6) > 0
|
||||
}
|
||||
|
||||
// unlockedGetRemoteList assumes you have the lh lock
|
||||
// unlockedGetRemoteList
|
||||
// assumes you have the lh lock
|
||||
func (lh *LightHouse) unlockedGetRemoteList(allAddrs []netip.Addr) *RemoteList {
|
||||
// before we go and make a new remotelist, we need to make sure we don't have one for any of this set of vpnaddrs yet
|
||||
for i, addr := range allAddrs {
|
||||
am, ok := lh.addrMap[addr]
|
||||
if ok {
|
||||
if i != 0 {
|
||||
lh.addrMap[allAddrs[0]] = am
|
||||
}
|
||||
return am
|
||||
am, ok := lh.addrMap[allAddrs[0]]
|
||||
if !ok {
|
||||
am = NewRemoteList(allAddrs, func(a netip.Addr) bool { return lh.shouldAdd(allAddrs[0], a) })
|
||||
for _, addr := range allAddrs {
|
||||
lh.addrMap[addr] = am
|
||||
}
|
||||
}
|
||||
|
||||
am := NewRemoteList(allAddrs, lh.shouldAdd)
|
||||
for _, addr := range allAddrs {
|
||||
lh.addrMap[addr] = am
|
||||
}
|
||||
return am
|
||||
}
|
||||
|
||||
func (lh *LightHouse) shouldAdd(vpnAddrs []netip.Addr, to netip.Addr) bool {
|
||||
allow := lh.GetRemoteAllowList().AllowAll(vpnAddrs, to)
|
||||
func (lh *LightHouse) shouldAdd(vpnAddr netip.Addr, to netip.Addr) bool {
|
||||
allow := lh.GetRemoteAllowList().Allow(vpnAddr, to)
|
||||
if lh.l.Level >= logrus.TraceLevel {
|
||||
lh.l.WithField("vpnAddrs", vpnAddrs).WithField("udpAddr", to).WithField("allow", allow).
|
||||
lh.l.WithField("vpnAddr", vpnAddr).WithField("udpAddr", to).WithField("allow", allow).
|
||||
Trace("remoteAllowList.Allow")
|
||||
}
|
||||
if !allow {
|
||||
return false
|
||||
}
|
||||
|
||||
if lh.myVpnNetworksTable.Contains(to) {
|
||||
_, found := lh.myVpnNetworksTable.Lookup(to)
|
||||
if found {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -684,7 +674,8 @@ func (lh *LightHouse) unlockedShouldAddV4(vpnAddr netip.Addr, to *V4AddrPort) bo
|
||||
return false
|
||||
}
|
||||
|
||||
if lh.myVpnNetworksTable.Contains(udpAddr.Addr()) {
|
||||
_, found := lh.myVpnNetworksTable.Lookup(udpAddr.Addr())
|
||||
if found {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -704,7 +695,8 @@ func (lh *LightHouse) unlockedShouldAddV6(vpnAddr netip.Addr, to *V6AddrPort) bo
|
||||
return false
|
||||
}
|
||||
|
||||
if lh.myVpnNetworksTable.Contains(udpAddr.Addr()) {
|
||||
_, found := lh.myVpnNetworksTable.Lookup(udpAddr.Addr())
|
||||
if found {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -712,22 +704,19 @@ func (lh *LightHouse) unlockedShouldAddV6(vpnAddr netip.Addr, to *V6AddrPort) bo
|
||||
}
|
||||
|
||||
func (lh *LightHouse) IsLighthouseAddr(vpnAddr netip.Addr) bool {
|
||||
l := lh.GetLighthouses()
|
||||
for i := range l {
|
||||
if l[i] == vpnAddr {
|
||||
return true
|
||||
}
|
||||
if _, ok := lh.GetLighthouses()[vpnAddr]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (lh *LightHouse) IsAnyLighthouseAddr(vpnAddrs []netip.Addr) bool {
|
||||
// TODO: CERT-V2 IsLighthouseAddr should be sufficient, we just need to update the vpnAddrs for lighthouses after a handshake
|
||||
// so that we know all the lighthouse vpnAddrs, not just the ones we were configured to talk to initially
|
||||
func (lh *LightHouse) IsAnyLighthouseAddr(vpnAddr []netip.Addr) bool {
|
||||
l := lh.GetLighthouses()
|
||||
for i := range vpnAddrs {
|
||||
for j := range l {
|
||||
if l[j] == vpnAddrs[i] {
|
||||
return true
|
||||
}
|
||||
for _, a := range vpnAddr {
|
||||
if _, ok := l[a]; ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
@@ -769,12 +758,12 @@ func (lh *LightHouse) innerQueryServer(addr netip.Addr, nb, out []byte) {
|
||||
queried := 0
|
||||
lighthouses := lh.GetLighthouses()
|
||||
|
||||
for _, lhVpnAddr := range lighthouses {
|
||||
for lhVpnAddr := range lighthouses {
|
||||
hi := lh.ifce.GetHostInfo(lhVpnAddr)
|
||||
if hi != nil {
|
||||
v = hi.ConnectionState.myCert.Version()
|
||||
} else {
|
||||
v = lh.ifce.GetCertState().initiatingVersion
|
||||
v = lh.ifce.GetCertState().defaultVersion
|
||||
}
|
||||
|
||||
if v == cert.Version1 {
|
||||
@@ -867,7 +856,8 @@ func (lh *LightHouse) SendUpdate() {
|
||||
|
||||
lal := lh.GetLocalAllowList()
|
||||
for _, e := range localAddrs(lh.l, lal) {
|
||||
if lh.myVpnNetworksTable.Contains(e) {
|
||||
_, found := lh.myVpnNetworksTable.Lookup(e)
|
||||
if found {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -887,13 +877,13 @@ func (lh *LightHouse) SendUpdate() {
|
||||
updated := 0
|
||||
lighthouses := lh.GetLighthouses()
|
||||
|
||||
for _, lhVpnAddr := range lighthouses {
|
||||
for lhVpnAddr := range lighthouses {
|
||||
var v cert.Version
|
||||
hi := lh.ifce.GetHostInfo(lhVpnAddr)
|
||||
if hi != nil {
|
||||
v = hi.ConnectionState.myCert.Version()
|
||||
} else {
|
||||
v = lh.ifce.GetCertState().initiatingVersion
|
||||
v = lh.ifce.GetCertState().defaultVersion
|
||||
}
|
||||
if v == cert.Version1 {
|
||||
if v1Update == nil {
|
||||
@@ -945,6 +935,7 @@ func (lh *LightHouse) SendUpdate() {
|
||||
V4AddrPorts: v4,
|
||||
V6AddrPorts: v6,
|
||||
RelayVpnAddrs: relays,
|
||||
VpnAddr: netAddrToProtoAddr(lh.myVpnNetworks[0].Addr()),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1064,19 +1055,19 @@ func (lhh *LightHouseHandler) handleHostQuery(n *NebulaMeta, fromVpnAddrs []neti
|
||||
return
|
||||
}
|
||||
|
||||
queryVpnAddr, useVersion, err := n.Details.GetVpnAddrAndVersion()
|
||||
if err != nil {
|
||||
useVersion := cert.Version1
|
||||
var queryVpnAddr netip.Addr
|
||||
if n.Details.OldVpnAddr != 0 {
|
||||
b := [4]byte{}
|
||||
binary.BigEndian.PutUint32(b[:], n.Details.OldVpnAddr)
|
||||
queryVpnAddr = netip.AddrFrom4(b)
|
||||
useVersion = 1
|
||||
} else if n.Details.VpnAddr != nil {
|
||||
queryVpnAddr = protoAddrToNetAddr(n.Details.VpnAddr)
|
||||
useVersion = 2
|
||||
} else {
|
||||
if lhh.l.Level >= logrus.DebugLevel {
|
||||
lhh.l.WithField("from", fromVpnAddrs).WithField("details", n.Details).
|
||||
Debugln("Dropping malformed HostQuery")
|
||||
}
|
||||
return
|
||||
}
|
||||
if useVersion == cert.Version1 && queryVpnAddr.Is6() {
|
||||
// this case really shouldn't be possible to represent, but reject it anyway.
|
||||
if lhh.l.Level >= logrus.DebugLevel {
|
||||
lhh.l.WithField("vpnAddrs", fromVpnAddrs).WithField("queryVpnAddr", queryVpnAddr).
|
||||
Debugln("invalid vpn addr for v1 handleHostQuery")
|
||||
lhh.l.WithField("from", fromVpnAddrs).WithField("details", n.Details).Debugln("Dropping malformed HostQuery")
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -1085,6 +1076,9 @@ func (lhh *LightHouseHandler) handleHostQuery(n *NebulaMeta, fromVpnAddrs []neti
|
||||
n = lhh.resetMeta()
|
||||
n.Type = NebulaMeta_HostQueryReply
|
||||
if useVersion == cert.Version1 {
|
||||
if !queryVpnAddr.Is4() {
|
||||
return 0, fmt.Errorf("invalid vpn addr for v1 handleHostQuery")
|
||||
}
|
||||
b := queryVpnAddr.As4()
|
||||
n.Details.OldVpnAddr = binary.BigEndian.Uint32(b[:])
|
||||
} else {
|
||||
@@ -1120,7 +1114,7 @@ func (lhh *LightHouseHandler) sendHostPunchNotification(n *NebulaMeta, fromVpnAd
|
||||
targetHI := lhh.lh.ifce.GetHostInfo(punchNotifDest)
|
||||
var useVersion cert.Version
|
||||
if targetHI == nil {
|
||||
useVersion = lhh.lh.ifce.GetCertState().initiatingVersion
|
||||
useVersion = lhh.lh.ifce.GetCertState().defaultVersion
|
||||
} else {
|
||||
crt := targetHI.GetCert().Certificate
|
||||
useVersion = crt.Version()
|
||||
@@ -1129,9 +1123,8 @@ func (lhh *LightHouseHandler) sendHostPunchNotification(n *NebulaMeta, fromVpnAd
|
||||
if ok {
|
||||
whereToPunch = newDest
|
||||
} else {
|
||||
if lhh.l.Level >= logrus.DebugLevel {
|
||||
lhh.l.WithField("to", crt.Networks()).Debugln("unable to punch to host, no addresses in common")
|
||||
}
|
||||
//TODO: CERT-V2 this means the destination will have no addresses in common with the punch-ee
|
||||
//choosing to do nothing for now, but maybe we return an error?
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1190,17 +1183,19 @@ func (lhh *LightHouseHandler) coalesceAnswers(v cert.Version, c *cache, n *Nebul
|
||||
if !r.Is4() {
|
||||
continue
|
||||
}
|
||||
|
||||
b = r.As4()
|
||||
n.Details.OldRelayVpnAddrs = append(n.Details.OldRelayVpnAddrs, binary.BigEndian.Uint32(b[:]))
|
||||
}
|
||||
|
||||
} else if v == cert.Version2 {
|
||||
for _, r := range c.relay.relay {
|
||||
n.Details.RelayVpnAddrs = append(n.Details.RelayVpnAddrs, netAddrToProtoAddr(r))
|
||||
}
|
||||
|
||||
} else {
|
||||
if lhh.l.Level >= logrus.DebugLevel {
|
||||
lhh.l.WithField("version", v).Debug("unsupported protocol version")
|
||||
}
|
||||
//TODO: CERT-V2 don't panic
|
||||
panic("unsupported version")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1210,16 +1205,18 @@ func (lhh *LightHouseHandler) handleHostQueryReply(n *NebulaMeta, fromVpnAddrs [
|
||||
return
|
||||
}
|
||||
|
||||
certVpnAddr, _, err := n.Details.GetVpnAddrAndVersion()
|
||||
if err != nil {
|
||||
if lhh.l.Level >= logrus.DebugLevel {
|
||||
lhh.l.WithError(err).WithField("vpnAddrs", fromVpnAddrs).Error("dropping malformed HostQueryReply")
|
||||
}
|
||||
return
|
||||
lhh.lh.Lock()
|
||||
|
||||
var certVpnAddr netip.Addr
|
||||
if n.Details.OldVpnAddr != 0 {
|
||||
b := [4]byte{}
|
||||
binary.BigEndian.PutUint32(b[:], n.Details.OldVpnAddr)
|
||||
certVpnAddr = netip.AddrFrom4(b)
|
||||
} else if n.Details.VpnAddr != nil {
|
||||
certVpnAddr = protoAddrToNetAddr(n.Details.VpnAddr)
|
||||
}
|
||||
relays := n.Details.GetRelays()
|
||||
|
||||
lhh.lh.Lock()
|
||||
am := lhh.lh.unlockedGetRemoteList([]netip.Addr{certVpnAddr})
|
||||
am.Lock()
|
||||
lhh.lh.Unlock()
|
||||
@@ -1244,24 +1241,27 @@ func (lhh *LightHouseHandler) handleHostUpdateNotification(n *NebulaMeta, fromVp
|
||||
return
|
||||
}
|
||||
|
||||
// not using GetVpnAddrAndVersion because we don't want to error on a blank detailsVpnAddr
|
||||
var detailsVpnAddr netip.Addr
|
||||
var useVersion cert.Version
|
||||
if n.Details.OldVpnAddr != 0 { //v1 always sets this field
|
||||
useVersion := cert.Version1
|
||||
if n.Details.OldVpnAddr != 0 {
|
||||
b := [4]byte{}
|
||||
binary.BigEndian.PutUint32(b[:], n.Details.OldVpnAddr)
|
||||
detailsVpnAddr = netip.AddrFrom4(b)
|
||||
useVersion = cert.Version1
|
||||
} else if n.Details.VpnAddr != nil { //this field is "optional" in v2, but if it's set, we should enforce it
|
||||
} else if n.Details.VpnAddr != nil {
|
||||
detailsVpnAddr = protoAddrToNetAddr(n.Details.VpnAddr)
|
||||
useVersion = cert.Version2
|
||||
} else {
|
||||
detailsVpnAddr = netip.Addr{}
|
||||
useVersion = cert.Version2
|
||||
if lhh.l.Level >= logrus.DebugLevel {
|
||||
lhh.l.WithField("details", n.Details).Debugf("dropping invalid HostUpdateNotification")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
//Simple check that the host sent this not someone else, if detailsVpnAddr is filled
|
||||
if detailsVpnAddr.IsValid() && !slices.Contains(fromVpnAddrs, detailsVpnAddr) {
|
||||
//TODO: CERT-V2 hosts with only v2 certs cannot provide their ipv6 addr when contacting the lighthouse via v4?
|
||||
//TODO: CERT-V2 why do we care about the vpnAddr in the packet? We know where it came from, right?
|
||||
//Simple check that the host sent this not someone else
|
||||
if !slices.Contains(fromVpnAddrs, detailsVpnAddr) {
|
||||
if lhh.l.Level >= logrus.DebugLevel {
|
||||
lhh.l.WithField("vpnAddrs", fromVpnAddrs).WithField("answer", detailsVpnAddr).Debugln("Host sent invalid update")
|
||||
}
|
||||
@@ -1275,24 +1275,24 @@ func (lhh *LightHouseHandler) handleHostUpdateNotification(n *NebulaMeta, fromVp
|
||||
am.Lock()
|
||||
lhh.lh.Unlock()
|
||||
|
||||
am.unlockedSetV4(fromVpnAddrs[0], fromVpnAddrs[0], n.Details.V4AddrPorts, lhh.lh.unlockedShouldAddV4)
|
||||
am.unlockedSetV6(fromVpnAddrs[0], fromVpnAddrs[0], n.Details.V6AddrPorts, lhh.lh.unlockedShouldAddV6)
|
||||
am.unlockedSetV4(fromVpnAddrs[0], detailsVpnAddr, n.Details.V4AddrPorts, lhh.lh.unlockedShouldAddV4)
|
||||
am.unlockedSetV6(fromVpnAddrs[0], detailsVpnAddr, n.Details.V6AddrPorts, lhh.lh.unlockedShouldAddV6)
|
||||
am.unlockedSetRelay(fromVpnAddrs[0], relays)
|
||||
am.Unlock()
|
||||
|
||||
n = lhh.resetMeta()
|
||||
n.Type = NebulaMeta_HostUpdateNotificationAck
|
||||
switch useVersion {
|
||||
case cert.Version1:
|
||||
|
||||
if useVersion == cert.Version1 {
|
||||
if !fromVpnAddrs[0].Is4() {
|
||||
lhh.l.WithField("vpnAddrs", fromVpnAddrs).Error("Can not send HostUpdateNotificationAck for a ipv6 vpn ip in a v1 message")
|
||||
return
|
||||
}
|
||||
vpnAddrB := fromVpnAddrs[0].As4()
|
||||
n.Details.OldVpnAddr = binary.BigEndian.Uint32(vpnAddrB[:])
|
||||
case cert.Version2:
|
||||
// do nothing, we want to send a blank message
|
||||
default:
|
||||
} else if useVersion == cert.Version2 {
|
||||
n.Details.VpnAddr = netAddrToProtoAddr(fromVpnAddrs[0])
|
||||
} else {
|
||||
lhh.l.WithField("useVersion", useVersion).Error("invalid protocol version")
|
||||
return
|
||||
}
|
||||
@@ -1310,20 +1310,13 @@ func (lhh *LightHouseHandler) handleHostUpdateNotification(n *NebulaMeta, fromVp
|
||||
func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, fromVpnAddrs []netip.Addr, w EncWriter) {
|
||||
//It's possible the lighthouse is communicating with us using a non primary vpn addr,
|
||||
//which means we need to compare all fromVpnAddrs against all configured lighthouse vpn addrs.
|
||||
//maybe one day we'll have a better idea, if it matters.
|
||||
if !lhh.lh.IsAnyLighthouseAddr(fromVpnAddrs) {
|
||||
return
|
||||
}
|
||||
|
||||
detailsVpnAddr, _, err := n.Details.GetVpnAddrAndVersion()
|
||||
if err != nil {
|
||||
if lhh.l.Level >= logrus.DebugLevel {
|
||||
lhh.l.WithField("details", n.Details).WithError(err).Debugln("dropping invalid HostPunchNotification")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
empty := []byte{0}
|
||||
punch := func(vpnPeer netip.AddrPort, logVpnAddr netip.Addr) {
|
||||
punch := func(vpnPeer netip.AddrPort) {
|
||||
if !vpnPeer.IsValid() {
|
||||
return
|
||||
}
|
||||
@@ -1335,38 +1328,48 @@ func (lhh *LightHouseHandler) handleHostPunchNotification(n *NebulaMeta, fromVpn
|
||||
}()
|
||||
|
||||
if lhh.l.Level >= logrus.DebugLevel {
|
||||
var logVpnAddr netip.Addr
|
||||
if n.Details.OldVpnAddr != 0 {
|
||||
b := [4]byte{}
|
||||
binary.BigEndian.PutUint32(b[:], n.Details.OldVpnAddr)
|
||||
logVpnAddr = netip.AddrFrom4(b)
|
||||
} else if n.Details.VpnAddr != nil {
|
||||
logVpnAddr = protoAddrToNetAddr(n.Details.VpnAddr)
|
||||
}
|
||||
lhh.l.Debugf("Punching on %v for %v", vpnPeer, logVpnAddr)
|
||||
}
|
||||
}
|
||||
|
||||
remoteAllowList := lhh.lh.GetRemoteAllowList()
|
||||
for _, a := range n.Details.V4AddrPorts {
|
||||
b := protoV4AddrPortToNetAddrPort(a)
|
||||
if remoteAllowList.Allow(detailsVpnAddr, b.Addr()) {
|
||||
punch(b, detailsVpnAddr)
|
||||
}
|
||||
punch(protoV4AddrPortToNetAddrPort(a))
|
||||
}
|
||||
|
||||
for _, a := range n.Details.V6AddrPorts {
|
||||
b := protoV6AddrPortToNetAddrPort(a)
|
||||
if remoteAllowList.Allow(detailsVpnAddr, b.Addr()) {
|
||||
punch(b, detailsVpnAddr)
|
||||
}
|
||||
punch(protoV6AddrPortToNetAddrPort(a))
|
||||
}
|
||||
|
||||
// This sends a nebula test packet to the host trying to contact us. In the case
|
||||
// of a double nat or other difficult scenario, this may help establish
|
||||
// a tunnel.
|
||||
if lhh.lh.punchy.GetRespond() {
|
||||
var queryVpnAddr netip.Addr
|
||||
if n.Details.OldVpnAddr != 0 {
|
||||
b := [4]byte{}
|
||||
binary.BigEndian.PutUint32(b[:], n.Details.OldVpnAddr)
|
||||
queryVpnAddr = netip.AddrFrom4(b)
|
||||
} else if n.Details.VpnAddr != nil {
|
||||
queryVpnAddr = protoAddrToNetAddr(n.Details.VpnAddr)
|
||||
}
|
||||
|
||||
go func() {
|
||||
time.Sleep(lhh.lh.punchy.GetRespondDelay())
|
||||
if lhh.l.Level >= logrus.DebugLevel {
|
||||
lhh.l.Debugf("Sending a nebula test packet to vpn addr %s", detailsVpnAddr)
|
||||
lhh.l.Debugf("Sending a nebula test packet to vpn addr %s", queryVpnAddr)
|
||||
}
|
||||
//NOTE: we have to allocate a new output buffer here since we are spawning a new goroutine
|
||||
// for each punchBack packet. We should move this into a timerwheel or a single goroutine
|
||||
// managed by a channel.
|
||||
w.SendMessageToVpnAddr(header.Test, header.TestRequest, detailsVpnAddr, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
||||
w.SendMessageToVpnAddr(header.Test, header.TestRequest, queryVpnAddr, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
||||
}()
|
||||
}
|
||||
}
|
||||
@@ -1445,17 +1448,3 @@ func findNetworkUnion(prefixes []netip.Prefix, addrs []netip.Addr) (netip.Addr,
|
||||
}
|
||||
return netip.Addr{}, false
|
||||
}
|
||||
|
||||
func (d *NebulaMetaDetails) GetVpnAddrAndVersion() (netip.Addr, cert.Version, error) {
|
||||
if d.OldVpnAddr != 0 {
|
||||
b := [4]byte{}
|
||||
binary.BigEndian.PutUint32(b[:], d.OldVpnAddr)
|
||||
detailsVpnAddr := netip.AddrFrom4(b)
|
||||
return detailsVpnAddr, cert.Version1, nil
|
||||
} else if d.VpnAddr != nil {
|
||||
detailsVpnAddr := protoAddrToNetAddr(d.VpnAddr)
|
||||
return detailsVpnAddr, cert.Version2, nil
|
||||
} else {
|
||||
return netip.Addr{}, cert.Version1, ErrBadDetailsVpnAddr
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
"github.com/slackhq/nebula/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.yaml.in/yaml/v3"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
func TestOldIPv4Only(t *testing.T) {
|
||||
@@ -31,8 +31,8 @@ func TestOldIPv4Only(t *testing.T) {
|
||||
func Test_lhStaticMapping(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
myVpnNet := netip.MustParsePrefix("10.128.0.1/16")
|
||||
nt := new(bart.Lite)
|
||||
nt.Insert(myVpnNet)
|
||||
nt := new(bart.Table[struct{}])
|
||||
nt.Insert(myVpnNet, struct{}{})
|
||||
cs := &CertState{
|
||||
myVpnNetworks: []netip.Prefix{myVpnNet},
|
||||
myVpnNetworksTable: nt,
|
||||
@@ -40,15 +40,15 @@ func Test_lhStaticMapping(t *testing.T) {
|
||||
lh1 := "10.128.0.2"
|
||||
|
||||
c := config.NewC(l)
|
||||
c.Settings["lighthouse"] = map[string]any{"hosts": []any{lh1}}
|
||||
c.Settings["static_host_map"] = map[string]any{lh1: []any{"1.1.1.1:4242"}}
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{"hosts": []interface{}{lh1}}
|
||||
c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"1.1.1.1:4242"}}
|
||||
_, err := NewLightHouseFromConfig(context.Background(), l, c, cs, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
lh2 := "10.128.0.3"
|
||||
c = config.NewC(l)
|
||||
c.Settings["lighthouse"] = map[string]any{"hosts": []any{lh1, lh2}}
|
||||
c.Settings["static_host_map"] = map[string]any{lh1: []any{"100.1.1.1:4242"}}
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{"hosts": []interface{}{lh1, lh2}}
|
||||
c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"100.1.1.1:4242"}}
|
||||
_, err = NewLightHouseFromConfig(context.Background(), l, c, cs, nil, nil)
|
||||
require.EqualError(t, err, "lighthouse 10.128.0.3 does not have a static_host_map entry")
|
||||
}
|
||||
@@ -56,8 +56,8 @@ func Test_lhStaticMapping(t *testing.T) {
|
||||
func TestReloadLighthouseInterval(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
myVpnNet := netip.MustParsePrefix("10.128.0.1/16")
|
||||
nt := new(bart.Lite)
|
||||
nt.Insert(myVpnNet)
|
||||
nt := new(bart.Table[struct{}])
|
||||
nt.Insert(myVpnNet, struct{}{})
|
||||
cs := &CertState{
|
||||
myVpnNetworks: []netip.Prefix{myVpnNet},
|
||||
myVpnNetworksTable: nt,
|
||||
@@ -65,12 +65,12 @@ func TestReloadLighthouseInterval(t *testing.T) {
|
||||
lh1 := "10.128.0.2"
|
||||
|
||||
c := config.NewC(l)
|
||||
c.Settings["lighthouse"] = map[string]any{
|
||||
"hosts": []any{lh1},
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||
"hosts": []interface{}{lh1},
|
||||
"interval": "1s",
|
||||
}
|
||||
|
||||
c.Settings["static_host_map"] = map[string]any{lh1: []any{"1.1.1.1:4242"}}
|
||||
c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"1.1.1.1:4242"}}
|
||||
lh, err := NewLightHouseFromConfig(context.Background(), l, c, cs, nil, nil)
|
||||
require.NoError(t, err)
|
||||
lh.ifce = &mockEncWriter{}
|
||||
@@ -91,8 +91,8 @@ func TestReloadLighthouseInterval(t *testing.T) {
|
||||
func BenchmarkLighthouseHandleRequest(b *testing.B) {
|
||||
l := test.NewLogger()
|
||||
myVpnNet := netip.MustParsePrefix("10.128.0.1/0")
|
||||
nt := new(bart.Lite)
|
||||
nt.Insert(myVpnNet)
|
||||
nt := new(bart.Table[struct{}])
|
||||
nt.Insert(myVpnNet, struct{}{})
|
||||
cs := &CertState{
|
||||
myVpnNetworks: []netip.Prefix{myVpnNet},
|
||||
myVpnNetworksTable: nt,
|
||||
@@ -192,12 +192,12 @@ func TestLighthouse_Memory(t *testing.T) {
|
||||
theirVpnIp := netip.MustParseAddr("10.128.0.3")
|
||||
|
||||
c := config.NewC(l)
|
||||
c.Settings["lighthouse"] = map[string]any{"am_lighthouse": true}
|
||||
c.Settings["listen"] = map[string]any{"port": 4242}
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true}
|
||||
c.Settings["listen"] = map[interface{}]interface{}{"port": 4242}
|
||||
|
||||
myVpnNet := netip.MustParsePrefix("10.128.0.1/24")
|
||||
nt := new(bart.Lite)
|
||||
nt.Insert(myVpnNet)
|
||||
nt := new(bart.Table[struct{}])
|
||||
nt.Insert(myVpnNet, struct{}{})
|
||||
cs := &CertState{
|
||||
myVpnNetworks: []netip.Prefix{myVpnNet},
|
||||
myVpnNetworksTable: nt,
|
||||
@@ -277,12 +277,12 @@ func TestLighthouse_Memory(t *testing.T) {
|
||||
func TestLighthouse_reload(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
c := config.NewC(l)
|
||||
c.Settings["lighthouse"] = map[string]any{"am_lighthouse": true}
|
||||
c.Settings["listen"] = map[string]any{"port": 4242}
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{"am_lighthouse": true}
|
||||
c.Settings["listen"] = map[interface{}]interface{}{"port": 4242}
|
||||
|
||||
myVpnNet := netip.MustParsePrefix("10.128.0.1/24")
|
||||
nt := new(bart.Lite)
|
||||
nt.Insert(myVpnNet)
|
||||
nt := new(bart.Table[struct{}])
|
||||
nt.Insert(myVpnNet, struct{}{})
|
||||
cs := &CertState{
|
||||
myVpnNetworks: []netip.Prefix{myVpnNet},
|
||||
myVpnNetworksTable: nt,
|
||||
@@ -291,9 +291,9 @@ func TestLighthouse_reload(t *testing.T) {
|
||||
lh, err := NewLightHouseFromConfig(context.Background(), l, c, cs, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
nc := map[string]any{
|
||||
"static_host_map": map[string]any{
|
||||
"10.128.0.2": []any{"1.1.1.1:4242"},
|
||||
nc := map[interface{}]interface{}{
|
||||
"static_host_map": map[interface{}]interface{}{
|
||||
"10.128.0.2": []interface{}{"1.1.1.1:4242"},
|
||||
},
|
||||
}
|
||||
rc, err := yaml.Marshal(nc)
|
||||
@@ -417,7 +417,7 @@ func (tw *testEncWriter) GetHostInfo(vpnIp netip.Addr) *HostInfo {
|
||||
}
|
||||
|
||||
func (tw *testEncWriter) GetCertState() *CertState {
|
||||
return &CertState{initiatingVersion: tw.protocolVersion}
|
||||
return &CertState{defaultVersion: tw.protocolVersion}
|
||||
}
|
||||
|
||||
// assertIp4InArray asserts every address in want is at the same position in have and that the lengths match
|
||||
@@ -493,123 +493,3 @@ func Test_findNetworkUnion(t *testing.T) {
|
||||
out, ok = findNetworkUnion([]netip.Prefix{fc00}, []netip.Addr{a1, afe81})
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
func TestLighthouse_Dont_Delete_Static_Hosts(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
|
||||
myUdpAddr2 := netip.MustParseAddrPort("1.2.3.4:4242")
|
||||
|
||||
testSameHostNotStatic := netip.MustParseAddr("10.128.0.41")
|
||||
testStaticHost := netip.MustParseAddr("10.128.0.42")
|
||||
//myVpnIp := netip.MustParseAddr("10.128.0.2")
|
||||
|
||||
c := config.NewC(l)
|
||||
lh1 := "10.128.0.2"
|
||||
c.Settings["lighthouse"] = map[string]any{
|
||||
"hosts": []any{lh1},
|
||||
"interval": "1s",
|
||||
}
|
||||
|
||||
c.Settings["listen"] = map[string]any{"port": 4242}
|
||||
c.Settings["static_host_map"] = map[string]any{
|
||||
lh1: []any{"1.1.1.1:4242"},
|
||||
"10.128.0.42": []any{"1.2.3.4:4242"},
|
||||
}
|
||||
|
||||
myVpnNet := netip.MustParsePrefix("10.128.0.1/24")
|
||||
nt := new(bart.Lite)
|
||||
nt.Insert(myVpnNet)
|
||||
cs := &CertState{
|
||||
myVpnNetworks: []netip.Prefix{myVpnNet},
|
||||
myVpnNetworksTable: nt,
|
||||
}
|
||||
lh, err := NewLightHouseFromConfig(context.Background(), l, c, cs, nil, nil)
|
||||
require.NoError(t, err)
|
||||
lh.ifce = &mockEncWriter{}
|
||||
|
||||
//test that we actually have the static entry:
|
||||
out := lh.Query(testStaticHost)
|
||||
assert.NotNil(t, out)
|
||||
assert.Equal(t, out.vpnAddrs[0], testStaticHost)
|
||||
out.Rebuild([]netip.Prefix{}) //why tho
|
||||
assert.Equal(t, out.addrs[0], myUdpAddr2)
|
||||
|
||||
//bolt on a lower numbered primary IP
|
||||
am := lh.unlockedGetRemoteList([]netip.Addr{testStaticHost})
|
||||
am.vpnAddrs = []netip.Addr{testSameHostNotStatic, testStaticHost}
|
||||
lh.addrMap[testSameHostNotStatic] = am
|
||||
out.Rebuild([]netip.Prefix{}) //???
|
||||
|
||||
//test that we actually have the static entry:
|
||||
out = lh.Query(testStaticHost)
|
||||
assert.NotNil(t, out)
|
||||
assert.Equal(t, out.vpnAddrs[0], testSameHostNotStatic)
|
||||
assert.Equal(t, out.vpnAddrs[1], testStaticHost)
|
||||
assert.Equal(t, out.addrs[0], myUdpAddr2)
|
||||
|
||||
//test that we actually have the static entry for BOTH:
|
||||
out2 := lh.Query(testSameHostNotStatic)
|
||||
assert.Same(t, out2, out)
|
||||
|
||||
//now do the delete
|
||||
lh.DeleteVpnAddrs([]netip.Addr{testSameHostNotStatic, testStaticHost})
|
||||
//verify
|
||||
out = lh.Query(testSameHostNotStatic)
|
||||
assert.NotNil(t, out)
|
||||
if out == nil {
|
||||
t.Fatal("expected non-nil query for the static host")
|
||||
}
|
||||
assert.Equal(t, out.vpnAddrs[0], testSameHostNotStatic)
|
||||
assert.Equal(t, out.vpnAddrs[1], testStaticHost)
|
||||
assert.Equal(t, out.addrs[0], myUdpAddr2)
|
||||
}
|
||||
|
||||
func TestLighthouse_DeletesWork(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
|
||||
myUdpAddr2 := netip.MustParseAddrPort("1.2.3.4:4242")
|
||||
testHost := netip.MustParseAddr("10.128.0.42")
|
||||
|
||||
c := config.NewC(l)
|
||||
lh1 := "10.128.0.2"
|
||||
c.Settings["lighthouse"] = map[string]any{
|
||||
"hosts": []any{lh1},
|
||||
"interval": "1s",
|
||||
}
|
||||
|
||||
c.Settings["listen"] = map[string]any{"port": 4242}
|
||||
c.Settings["static_host_map"] = map[string]any{
|
||||
lh1: []any{"1.1.1.1:4242"},
|
||||
}
|
||||
|
||||
myVpnNet := netip.MustParsePrefix("10.128.0.1/24")
|
||||
nt := new(bart.Lite)
|
||||
nt.Insert(myVpnNet)
|
||||
cs := &CertState{
|
||||
myVpnNetworks: []netip.Prefix{myVpnNet},
|
||||
myVpnNetworksTable: nt,
|
||||
}
|
||||
lh, err := NewLightHouseFromConfig(context.Background(), l, c, cs, nil, nil)
|
||||
require.NoError(t, err)
|
||||
lh.ifce = &mockEncWriter{}
|
||||
|
||||
//insert the host
|
||||
am := lh.unlockedGetRemoteList([]netip.Addr{testHost})
|
||||
am.vpnAddrs = []netip.Addr{testHost}
|
||||
am.addrs = []netip.AddrPort{myUdpAddr2}
|
||||
lh.addrMap[testHost] = am
|
||||
am.Rebuild([]netip.Prefix{}) //???
|
||||
|
||||
//test that we actually have the entry:
|
||||
out := lh.Query(testHost)
|
||||
assert.NotNil(t, out)
|
||||
assert.Equal(t, out.vpnAddrs[0], testHost)
|
||||
out.Rebuild([]netip.Prefix{}) //why tho
|
||||
assert.Equal(t, out.addrs[0], myUdpAddr2)
|
||||
|
||||
//now do the delete
|
||||
lh.DeleteVpnAddrs([]netip.Addr{testHost})
|
||||
//verify
|
||||
out = lh.Query(testHost)
|
||||
assert.Nil(t, out)
|
||||
}
|
||||
|
||||
52
main.go
52
main.go
@@ -13,10 +13,10 @@ import (
|
||||
"github.com/slackhq/nebula/sshd"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
"github.com/slackhq/nebula/util"
|
||||
"go.yaml.in/yaml/v3"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type m = map[string]any
|
||||
type m map[string]interface{}
|
||||
|
||||
func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logger, deviceFactory overlay.DeviceFactory) (retcon *Control, reterr error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -75,8 +75,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
||||
if c.GetBool("sshd.enabled", false) {
|
||||
sshStart, err = configSSH(l, ssh, c)
|
||||
if err != nil {
|
||||
l.WithError(err).Warn("Failed to configure sshd, ssh debugging will not be available")
|
||||
sshStart = nil
|
||||
return nil, util.ContextualizeIfNeeded("Error while configuring the sshd", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -186,7 +185,6 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
||||
|
||||
hostMap := NewHostMapFromConfig(l, c)
|
||||
punchy := NewPunchyFromConfig(l, c)
|
||||
connManager := newConnectionManagerFromConfig(l, c, hostMap, punchy)
|
||||
lightHouse, err := NewLightHouseFromConfig(ctx, l, c, pki.getCertState(), udpConns[0], punchy)
|
||||
if err != nil {
|
||||
return nil, util.ContextualizeIfNeeded("Failed to initialize lighthouse handler", err)
|
||||
@@ -222,26 +220,31 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
||||
}
|
||||
}
|
||||
|
||||
checkInterval := c.GetInt("timers.connection_alive_interval", 5)
|
||||
pendingDeletionInterval := c.GetInt("timers.pending_deletion_interval", 10)
|
||||
|
||||
ifConfig := &InterfaceConfig{
|
||||
HostMap: hostMap,
|
||||
Inside: tun,
|
||||
Outside: udpConns[0],
|
||||
pki: pki,
|
||||
Firewall: fw,
|
||||
ServeDns: serveDns,
|
||||
HandshakeManager: handshakeManager,
|
||||
connectionManager: connManager,
|
||||
lightHouse: lightHouse,
|
||||
tryPromoteEvery: c.GetUint32("counters.try_promote", defaultPromoteEvery),
|
||||
reQueryEvery: c.GetUint32("counters.requery_every_packets", defaultReQueryEvery),
|
||||
reQueryWait: c.GetDuration("timers.requery_wait_duration", defaultReQueryWait),
|
||||
DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false),
|
||||
DropMulticast: c.GetBool("tun.drop_multicast", false),
|
||||
routines: routines,
|
||||
MessageMetrics: messageMetrics,
|
||||
version: buildVersion,
|
||||
relayManager: NewRelayManager(ctx, l, hostMap, c),
|
||||
punchy: punchy,
|
||||
HostMap: hostMap,
|
||||
Inside: tun,
|
||||
Outside: udpConns[0],
|
||||
pki: pki,
|
||||
Firewall: fw,
|
||||
ServeDns: serveDns,
|
||||
HandshakeManager: handshakeManager,
|
||||
lightHouse: lightHouse,
|
||||
checkInterval: time.Second * time.Duration(checkInterval),
|
||||
pendingDeletionInterval: time.Second * time.Duration(pendingDeletionInterval),
|
||||
tryPromoteEvery: c.GetUint32("counters.try_promote", defaultPromoteEvery),
|
||||
reQueryEvery: c.GetUint32("counters.requery_every_packets", defaultReQueryEvery),
|
||||
reQueryWait: c.GetDuration("timers.requery_wait_duration", defaultReQueryWait),
|
||||
DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false),
|
||||
DropMulticast: c.GetBool("tun.drop_multicast", false),
|
||||
routines: routines,
|
||||
MessageMetrics: messageMetrics,
|
||||
version: buildVersion,
|
||||
relayManager: NewRelayManager(ctx, l, hostMap, c),
|
||||
punchy: punchy,
|
||||
|
||||
ConntrackCacheTimeout: conntrackCacheTimeout,
|
||||
l: l,
|
||||
}
|
||||
@@ -293,6 +296,5 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
||||
statsStart,
|
||||
dnsStart,
|
||||
lightHouse.StartUpdateWorker,
|
||||
connManager.Start,
|
||||
}, nil
|
||||
}
|
||||
|
||||
306
outside.go
306
outside.go
@@ -7,7 +7,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/google/gopacket/layers"
|
||||
"github.com/slackhq/nebula/packet"
|
||||
"golang.org/x/net/ipv6"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -20,7 +19,7 @@ const (
|
||||
minFwPacketLen = 4
|
||||
)
|
||||
|
||||
func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []byte, packet []byte, h *header.H, fwPacket *firewall.Packet, lhf *LightHouseHandler, nb []byte, q int, localCache firewall.ConntrackCache, now time.Time) {
|
||||
func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []byte, packet []byte, h *header.H, fwPacket *firewall.Packet, lhf *LightHouseHandler, nb []byte, q int, localCache firewall.ConntrackCache) {
|
||||
err := h.Parse(packet)
|
||||
if err != nil {
|
||||
// Hole punch packets are 0 or 1 byte big, so lets ignore printing those errors
|
||||
@@ -32,7 +31,8 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
||||
|
||||
//l.Error("in packet ", header, packet[HeaderLen:])
|
||||
if ip.IsValid() {
|
||||
if f.myVpnNetworksTable.Contains(ip.Addr()) {
|
||||
_, found := f.myVpnNetworksTable.Lookup(ip.Addr())
|
||||
if found {
|
||||
if f.l.Level >= logrus.DebugLevel {
|
||||
f.l.WithField("udpAddr", ip).Debug("Refusing to process double encrypted packet")
|
||||
}
|
||||
@@ -62,7 +62,7 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
||||
|
||||
switch h.Subtype {
|
||||
case header.MessageNone:
|
||||
if !f.decryptToTun(hostinfo, h.MessageCounter, out, packet, fwPacket, nb, q, localCache, now) {
|
||||
if !f.decryptToTun(hostinfo, h.MessageCounter, out, packet, fwPacket, nb, q, localCache) {
|
||||
return
|
||||
}
|
||||
case header.MessageRelay:
|
||||
@@ -82,7 +82,7 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
||||
// Pull the Roaming parts up here, and return in all call paths.
|
||||
f.handleHostRoaming(hostinfo, ip)
|
||||
// Track usage of both the HostInfo and the Relay for the received & authenticated packet
|
||||
f.connectionManager.In(hostinfo)
|
||||
f.connectionManager.In(hostinfo.localIndexId)
|
||||
f.connectionManager.RelayUsed(h.RemoteIndex)
|
||||
|
||||
relay, ok := hostinfo.relayState.QueryRelayForByIdx(h.RemoteIndex)
|
||||
@@ -97,7 +97,7 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
||||
case TerminalType:
|
||||
// If I am the target of this relay, process the unwrapped packet
|
||||
// From this recursive point, all these variables are 'burned'. We shouldn't rely on them again.
|
||||
f.readOutsidePackets(netip.AddrPort{}, &ViaSender{relayHI: hostinfo, remoteIdx: relay.RemoteIndex, relay: relay}, out[:0], signedPayload, h, fwPacket, lhf, nb, q, localCache, now)
|
||||
f.readOutsidePackets(netip.AddrPort{}, &ViaSender{relayHI: hostinfo, remoteIdx: relay.RemoteIndex, relay: relay}, out[:0], signedPayload, h, fwPacket, lhf, nb, q, localCache)
|
||||
return
|
||||
case ForwardingType:
|
||||
// Find the target HostInfo relay object
|
||||
@@ -214,218 +214,7 @@ func (f *Interface) readOutsidePackets(ip netip.AddrPort, via *ViaSender, out []
|
||||
|
||||
f.handleHostRoaming(hostinfo, ip)
|
||||
|
||||
f.connectionManager.In(hostinfo)
|
||||
}
|
||||
|
||||
func (f *Interface) readOutsidePacketsMany(packets []*packet.Packet, out []*packet.OutPacket, h *header.H, fwPacket *firewall.Packet, lhf *LightHouseHandler, nb []byte, q int, localCache firewall.ConntrackCache, now time.Time) {
|
||||
for i, pkt := range packets {
|
||||
out[i].Scratch = out[i].Scratch[:0]
|
||||
ip := pkt.AddrPort()
|
||||
|
||||
//l.Error("in packet ", header, packet[HeaderLen:])
|
||||
if ip.IsValid() {
|
||||
if f.myVpnNetworksTable.Contains(ip.Addr()) {
|
||||
if f.l.Level >= logrus.DebugLevel {
|
||||
f.l.WithField("udpAddr", ip).Debug("Refusing to process double encrypted packet")
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
//todo per-segment!
|
||||
for segment := range pkt.Segments() {
|
||||
|
||||
err := h.Parse(segment)
|
||||
if err != nil {
|
||||
// Hole punch packets are 0 or 1 byte big, so lets ignore printing those errors
|
||||
if len(segment) > 1 {
|
||||
f.l.WithField("packet", pkt).Infof("Error while parsing inbound packet from %s: %s", ip, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var hostinfo *HostInfo
|
||||
// verify if we've seen this index before, otherwise respond to the handshake initiation
|
||||
if h.Type == header.Message && h.Subtype == header.MessageRelay {
|
||||
hostinfo = f.hostMap.QueryRelayIndex(h.RemoteIndex)
|
||||
} else {
|
||||
hostinfo = f.hostMap.QueryIndex(h.RemoteIndex)
|
||||
}
|
||||
|
||||
var ci *ConnectionState
|
||||
if hostinfo != nil {
|
||||
ci = hostinfo.ConnectionState
|
||||
}
|
||||
|
||||
switch h.Type {
|
||||
case header.Message:
|
||||
// TODO handleEncrypted sends directly to addr on error. Handle this in the tunneling case.
|
||||
if !f.handleEncrypted(ci, ip, h) {
|
||||
return
|
||||
}
|
||||
|
||||
switch h.Subtype {
|
||||
case header.MessageNone:
|
||||
if !f.decryptToTunDelayWrite(hostinfo, h.MessageCounter, out[i], pkt, segment, fwPacket, nb, q, localCache, now) {
|
||||
return
|
||||
}
|
||||
case header.MessageRelay:
|
||||
// The entire body is sent as AD, not encrypted.
|
||||
// The packet consists of a 16-byte parsed Nebula header, Associated Data-protected payload, and a trailing 16-byte AEAD signature value.
|
||||
// The packet is guaranteed to be at least 16 bytes at this point, b/c it got past the h.Parse() call above. If it's
|
||||
// otherwise malformed (meaning, there is no trailing 16 byte AEAD value), then this will result in at worst a 0-length slice
|
||||
// which will gracefully fail in the DecryptDanger call.
|
||||
signedPayload := segment[:len(segment)-hostinfo.ConnectionState.dKey.Overhead()]
|
||||
signatureValue := segment[len(segment)-hostinfo.ConnectionState.dKey.Overhead():]
|
||||
out[i].Scratch, err = hostinfo.ConnectionState.dKey.DecryptDanger(out[i].Scratch, signedPayload, signatureValue, h.MessageCounter, nb)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Successfully validated the thing. Get rid of the Relay header.
|
||||
signedPayload = signedPayload[header.Len:]
|
||||
// Pull the Roaming parts up here, and return in all call paths.
|
||||
f.handleHostRoaming(hostinfo, ip)
|
||||
// Track usage of both the HostInfo and the Relay for the received & authenticated packet
|
||||
f.connectionManager.In(hostinfo)
|
||||
f.connectionManager.RelayUsed(h.RemoteIndex)
|
||||
|
||||
relay, ok := hostinfo.relayState.QueryRelayForByIdx(h.RemoteIndex)
|
||||
if !ok {
|
||||
// The only way this happens is if hostmap has an index to the correct HostInfo, but the HostInfo is missing
|
||||
// its internal mapping. This should never happen.
|
||||
hostinfo.logger(f.l).WithFields(logrus.Fields{"vpnAddrs": hostinfo.vpnAddrs, "remoteIndex": h.RemoteIndex}).Error("HostInfo missing remote relay index")
|
||||
return
|
||||
}
|
||||
|
||||
switch relay.Type {
|
||||
case TerminalType:
|
||||
// If I am the target of this relay, process the unwrapped packet
|
||||
// From this recursive point, all these variables are 'burned'. We shouldn't rely on them again.
|
||||
f.readOutsidePackets(netip.AddrPort{}, &ViaSender{relayHI: hostinfo, remoteIdx: relay.RemoteIndex, relay: relay}, out[i].Scratch[:0], signedPayload, h, fwPacket, lhf, nb, q, localCache, now)
|
||||
return
|
||||
case ForwardingType:
|
||||
// Find the target HostInfo relay object
|
||||
targetHI, targetRelay, err := f.hostMap.QueryVpnAddrsRelayFor(hostinfo.vpnAddrs, relay.PeerAddr)
|
||||
if err != nil {
|
||||
hostinfo.logger(f.l).WithField("relayTo", relay.PeerAddr).WithError(err).WithField("hostinfo.vpnAddrs", hostinfo.vpnAddrs).Info("Failed to find target host info by ip")
|
||||
return
|
||||
}
|
||||
|
||||
// If that relay is Established, forward the payload through it
|
||||
if targetRelay.State == Established {
|
||||
switch targetRelay.Type {
|
||||
case ForwardingType:
|
||||
// Forward this packet through the relay tunnel
|
||||
// Find the target HostInfo
|
||||
f.SendVia(targetHI, targetRelay, signedPayload, nb, out[i].Scratch, false)
|
||||
return
|
||||
case TerminalType:
|
||||
hostinfo.logger(f.l).Error("Unexpected Relay Type of Terminal")
|
||||
}
|
||||
} else {
|
||||
hostinfo.logger(f.l).WithFields(logrus.Fields{"relayTo": relay.PeerAddr, "relayFrom": hostinfo.vpnAddrs[0], "targetRelayState": targetRelay.State}).Info("Unexpected target relay state")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case header.LightHouse:
|
||||
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
||||
if !f.handleEncrypted(ci, ip, h) {
|
||||
return
|
||||
}
|
||||
|
||||
d, err := f.decrypt(hostinfo, h.MessageCounter, out[i].Scratch, segment, h, nb)
|
||||
if err != nil {
|
||||
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", ip).
|
||||
WithField("packet", segment).
|
||||
Error("Failed to decrypt lighthouse packet")
|
||||
return
|
||||
}
|
||||
|
||||
lhf.HandleRequest(ip, hostinfo.vpnAddrs, d, f)
|
||||
|
||||
// Fallthrough to the bottom to record incoming traffic
|
||||
|
||||
case header.Test:
|
||||
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
||||
if !f.handleEncrypted(ci, ip, h) {
|
||||
return
|
||||
}
|
||||
|
||||
d, err := f.decrypt(hostinfo, h.MessageCounter, out[i].Scratch, segment, h, nb)
|
||||
if err != nil {
|
||||
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", ip).
|
||||
WithField("packet", segment).
|
||||
Error("Failed to decrypt test packet")
|
||||
return
|
||||
}
|
||||
|
||||
if h.Subtype == header.TestRequest {
|
||||
// This testRequest might be from TryPromoteBest, so we should roam
|
||||
// to the new IP address before responding
|
||||
f.handleHostRoaming(hostinfo, ip)
|
||||
f.send(header.Test, header.TestReply, ci, hostinfo, d, nb, out[i].Scratch)
|
||||
}
|
||||
|
||||
// Fallthrough to the bottom to record incoming traffic
|
||||
|
||||
// Non encrypted messages below here, they should not fall through to avoid tracking incoming traffic since they
|
||||
// are unauthenticated
|
||||
|
||||
case header.Handshake:
|
||||
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
||||
f.handshakeManager.HandleIncoming(ip, nil, segment, h)
|
||||
return
|
||||
|
||||
case header.RecvError:
|
||||
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
||||
f.handleRecvError(ip, h)
|
||||
return
|
||||
|
||||
case header.CloseTunnel:
|
||||
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
||||
if !f.handleEncrypted(ci, ip, h) {
|
||||
return
|
||||
}
|
||||
|
||||
hostinfo.logger(f.l).WithField("udpAddr", ip).
|
||||
Info("Close tunnel received, tearing down.")
|
||||
|
||||
f.closeTunnel(hostinfo)
|
||||
return
|
||||
|
||||
case header.Control:
|
||||
if !f.handleEncrypted(ci, ip, h) {
|
||||
return
|
||||
}
|
||||
|
||||
d, err := f.decrypt(hostinfo, h.MessageCounter, out[i].Scratch, segment, h, nb)
|
||||
if err != nil {
|
||||
hostinfo.logger(f.l).WithError(err).WithField("udpAddr", ip).
|
||||
WithField("packet", segment).
|
||||
Error("Failed to decrypt Control packet")
|
||||
return
|
||||
}
|
||||
|
||||
f.relayManager.HandleControlMsg(hostinfo, d, f)
|
||||
|
||||
default:
|
||||
f.messageMetrics.Rx(h.Type, h.Subtype, 1)
|
||||
hostinfo.logger(f.l).Debugf("Unexpected packet received from %s", ip)
|
||||
return
|
||||
}
|
||||
|
||||
f.handleHostRoaming(hostinfo, ip)
|
||||
|
||||
f.connectionManager.In(hostinfo)
|
||||
|
||||
}
|
||||
_, err := f.readers[q].WriteOne(out[i], false, q)
|
||||
if err != nil {
|
||||
f.l.WithError(err).Error("Failed to write packet")
|
||||
}
|
||||
}
|
||||
f.connectionManager.In(hostinfo.localIndexId)
|
||||
}
|
||||
|
||||
// closeTunnel closes a tunnel locally, it does not send a closeTunnel packet to the remote
|
||||
@@ -466,18 +255,16 @@ func (f *Interface) handleHostRoaming(hostinfo *HostInfo, udpAddr netip.AddrPort
|
||||
|
||||
}
|
||||
|
||||
// handleEncrypted returns true if a packet should be processed, false otherwise
|
||||
func (f *Interface) handleEncrypted(ci *ConnectionState, addr netip.AddrPort, h *header.H) bool {
|
||||
// If connectionstate does not exist, send a recv error, if possible, to encourage a fast reconnect
|
||||
if ci == nil {
|
||||
// If connectionstate exists and the replay protector allows, process packet
|
||||
// Else, send recv errors for 300 seconds after a restart to allow fast reconnection.
|
||||
if ci == nil || !ci.window.Check(f.l, h.MessageCounter) {
|
||||
if addr.IsValid() {
|
||||
f.maybeSendRecvError(addr, h.RemoteIndex)
|
||||
return false
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
return false
|
||||
}
|
||||
// If the window check fails, refuse to process the packet, but don't send a recv error
|
||||
if !ci.window.Check(f.l, h.MessageCounter) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
@@ -526,11 +313,12 @@ func parseV6(data []byte, incoming bool, fp *firewall.Packet) error {
|
||||
offset := ipv6.HeaderLen // Start at the end of the ipv6 header
|
||||
next := 0
|
||||
for {
|
||||
if protoAt >= dataLen {
|
||||
if dataLen < offset {
|
||||
break
|
||||
}
|
||||
proto := layers.IPProtocol(data[protoAt])
|
||||
|
||||
proto := layers.IPProtocol(data[protoAt])
|
||||
//fmt.Println(proto, protoAt)
|
||||
switch proto {
|
||||
case layers.IPProtocolICMPv6, layers.IPProtocolESP, layers.IPProtocolNoNextHeader:
|
||||
fp.Protocol = uint8(proto)
|
||||
@@ -578,7 +366,7 @@ func parseV6(data []byte, incoming bool, fp *firewall.Packet) error {
|
||||
|
||||
case layers.IPProtocolAH:
|
||||
// Auth headers, used by IPSec, have a different meaning for header length
|
||||
if dataLen <= offset+1 {
|
||||
if dataLen < offset+1 {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -586,7 +374,7 @@ func parseV6(data []byte, incoming bool, fp *firewall.Packet) error {
|
||||
|
||||
default:
|
||||
// Normal ipv6 header length processing
|
||||
if dataLen <= offset+1 {
|
||||
if dataLen < offset+1 {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -677,55 +465,7 @@ func (f *Interface) decrypt(hostinfo *HostInfo, mc uint64, out []byte, packet []
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (f *Interface) decryptToTunDelayWrite(hostinfo *HostInfo, messageCounter uint64, out *packet.OutPacket, pkt *packet.Packet, inSegment []byte, fwPacket *firewall.Packet, nb []byte, q int, localCache firewall.ConntrackCache, now time.Time) bool {
|
||||
var err error
|
||||
|
||||
seg, err := f.readers[q].AllocSeg(out, q)
|
||||
if err != nil {
|
||||
f.l.WithError(err).Errorln("decryptToTunDelayWrite: failed to allocate segment")
|
||||
return false
|
||||
}
|
||||
|
||||
out.SegmentPayloads[seg] = out.SegmentPayloads[seg][:0]
|
||||
out.SegmentPayloads[seg], err = hostinfo.ConnectionState.dKey.DecryptDanger(out.SegmentPayloads[seg], inSegment[:header.Len], inSegment[header.Len:], messageCounter, nb)
|
||||
if err != nil {
|
||||
hostinfo.logger(f.l).WithError(err).Error("Failed to decrypt packet")
|
||||
return false
|
||||
}
|
||||
|
||||
err = newPacket(out.SegmentPayloads[seg], true, fwPacket)
|
||||
if err != nil {
|
||||
hostinfo.logger(f.l).WithError(err).WithField("packet", out).
|
||||
Warnf("Error while validating inbound packet")
|
||||
return false
|
||||
}
|
||||
|
||||
if !hostinfo.ConnectionState.window.Update(f.l, messageCounter) {
|
||||
hostinfo.logger(f.l).WithField("fwPacket", fwPacket).
|
||||
Debugln("dropping out of window packet")
|
||||
return false
|
||||
}
|
||||
|
||||
dropReason := f.firewall.Drop(*fwPacket, true, hostinfo, f.pki.GetCAPool(), localCache, now)
|
||||
if dropReason != nil {
|
||||
// NOTE: We give `packet` as the `out` here since we already decrypted from it and we don't need it anymore
|
||||
// This gives us a buffer to build the reject packet in
|
||||
f.rejectOutside(out.SegmentPayloads[seg], hostinfo.ConnectionState, hostinfo, nb, inSegment, q)
|
||||
if f.l.Level >= logrus.DebugLevel {
|
||||
hostinfo.logger(f.l).WithField("fwPacket", fwPacket).
|
||||
WithField("reason", dropReason).
|
||||
Debugln("dropping inbound packet")
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
f.connectionManager.In(hostinfo)
|
||||
pkt.OutLen += len(inSegment)
|
||||
out.Segments[seg] = out.Segments[seg][:len(out.SegmentHeaders[seg])+len(out.SegmentPayloads[seg])]
|
||||
return true
|
||||
}
|
||||
|
||||
func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out []byte, packet []byte, fwPacket *firewall.Packet, nb []byte, q int, localCache firewall.ConntrackCache, now time.Time) bool {
|
||||
func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out []byte, packet []byte, fwPacket *firewall.Packet, nb []byte, q int, localCache firewall.ConntrackCache) bool {
|
||||
var err error
|
||||
|
||||
out, err = hostinfo.ConnectionState.dKey.DecryptDanger(out, packet[:header.Len], packet[header.Len:], messageCounter, nb)
|
||||
@@ -747,7 +487,7 @@ func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out
|
||||
return false
|
||||
}
|
||||
|
||||
dropReason := f.firewall.Drop(*fwPacket, true, hostinfo, f.pki.GetCAPool(), localCache, now)
|
||||
dropReason := f.firewall.Drop(*fwPacket, true, hostinfo, f.pki.GetCAPool(), localCache)
|
||||
if dropReason != nil {
|
||||
// NOTE: We give `packet` as the `out` here since we already decrypted from it and we don't need it anymore
|
||||
// This gives us a buffer to build the reject packet in
|
||||
@@ -760,7 +500,7 @@ func (f *Interface) decryptToTun(hostinfo *HostInfo, messageCounter uint64, out
|
||||
return false
|
||||
}
|
||||
|
||||
f.connectionManager.In(hostinfo)
|
||||
f.connectionManager.In(hostinfo.localIndexId)
|
||||
_, err = f.readers[q].Write(out)
|
||||
if err != nil {
|
||||
f.l.WithError(err).Error("Failed to write to tun")
|
||||
@@ -799,6 +539,10 @@ func (f *Interface) handleRecvError(addr netip.AddrPort, h *header.H) {
|
||||
return
|
||||
}
|
||||
|
||||
if !hostinfo.RecvErrorExceeded() {
|
||||
return
|
||||
}
|
||||
|
||||
if hostinfo.remote.IsValid() && hostinfo.remote != addr {
|
||||
f.l.Infoln("Someone spoofing recv_errors? ", addr, hostinfo.remote)
|
||||
return
|
||||
|
||||
@@ -117,45 +117,6 @@ func Test_newPacket_v6(t *testing.T) {
|
||||
err = newPacket(buffer.Bytes(), true, p)
|
||||
require.ErrorIs(t, err, ErrIPv6CouldNotFindPayload)
|
||||
|
||||
// A v6 packet with a hop-by-hop extension
|
||||
// ICMPv6 Payload (Echo Request)
|
||||
icmpLayer := layers.ICMPv6{
|
||||
TypeCode: layers.ICMPv6TypeEchoRequest,
|
||||
}
|
||||
// Hop-by-Hop Extension Header
|
||||
hopOption := layers.IPv6HopByHopOption{}
|
||||
hopOption.OptionData = []byte{0, 0, 0, 0}
|
||||
hopByHop := layers.IPv6HopByHop{}
|
||||
hopByHop.Options = append(hopByHop.Options, &hopOption)
|
||||
|
||||
ip = layers.IPv6{
|
||||
Version: 6,
|
||||
HopLimit: 128,
|
||||
NextHeader: layers.IPProtocolIPv6Destination,
|
||||
SrcIP: net.IPv6linklocalallrouters,
|
||||
DstIP: net.IPv6linklocalallnodes,
|
||||
}
|
||||
|
||||
buffer.Clear()
|
||||
err = gopacket.SerializeLayers(buffer, gopacket.SerializeOptions{
|
||||
ComputeChecksums: false,
|
||||
FixLengths: true,
|
||||
}, &ip, &hopByHop, &icmpLayer)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
// Ensure buffer length checks during parsing with the next 2 tests.
|
||||
|
||||
// A full IPv6 header and 1 byte in the first extension, but missing
|
||||
// the length byte.
|
||||
err = newPacket(buffer.Bytes()[:41], true, p)
|
||||
require.ErrorIs(t, err, ErrIPv6CouldNotFindPayload)
|
||||
|
||||
// A full IPv6 header plus 1 full extension, but only 1 byte of the
|
||||
// next layer, missing length byte
|
||||
err = newPacket(buffer.Bytes()[:49], true, p)
|
||||
require.ErrorIs(t, err, ErrIPv6CouldNotFindPayload)
|
||||
|
||||
// A good ICMP packet
|
||||
ip = layers.IPv6{
|
||||
Version: 6,
|
||||
@@ -327,10 +288,6 @@ func Test_newPacket_v6(t *testing.T) {
|
||||
assert.Equal(t, uint16(22), p.LocalPort)
|
||||
assert.False(t, p.Fragment)
|
||||
|
||||
// Ensure buffer bounds checking during processing
|
||||
err = newPacket(b[:41], true, p)
|
||||
require.ErrorIs(t, err, ErrIPv6PacketTooShort)
|
||||
|
||||
// Invalid AH header
|
||||
b = buffer.Bytes()
|
||||
err = newPacket(b, true, p)
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
package overlay
|
||||
|
||||
import (
|
||||
"io"
|
||||
"net/netip"
|
||||
|
||||
"github.com/slackhq/nebula/routing"
|
||||
)
|
||||
|
||||
type Device interface {
|
||||
TunDev
|
||||
io.ReadWriteCloser
|
||||
Activate() error
|
||||
Networks() []netip.Prefix
|
||||
Name() string
|
||||
RoutesFor(netip.Addr) routing.Gateways
|
||||
NewMultiQueueReader() (TunDev, error)
|
||||
RouteFor(netip.Addr) netip.Addr
|
||||
NewMultiQueueReader() (io.ReadWriteCloser, error)
|
||||
}
|
||||
|
||||
@@ -1,91 +0,0 @@
|
||||
package eventfd
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"syscall"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type EventFD struct {
|
||||
fd int
|
||||
buf [8]byte
|
||||
}
|
||||
|
||||
func New() (EventFD, error) {
|
||||
fd, err := unix.Eventfd(0, unix.EFD_NONBLOCK)
|
||||
if err != nil {
|
||||
return EventFD{}, err
|
||||
}
|
||||
return EventFD{
|
||||
fd: fd,
|
||||
buf: [8]byte{},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e *EventFD) Kick() error {
|
||||
binary.LittleEndian.PutUint64(e.buf[:], 1) //is this right???
|
||||
_, err := syscall.Write(int(e.fd), e.buf[:])
|
||||
return err
|
||||
}
|
||||
|
||||
func (e *EventFD) Close() error {
|
||||
if e.fd != 0 {
|
||||
return unix.Close(e.fd)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *EventFD) FD() int {
|
||||
return e.fd
|
||||
}
|
||||
|
||||
type Epoll struct {
|
||||
fd int
|
||||
buf [8]byte
|
||||
events []syscall.EpollEvent
|
||||
}
|
||||
|
||||
func NewEpoll() (Epoll, error) {
|
||||
fd, err := unix.EpollCreate1(0)
|
||||
if err != nil {
|
||||
return Epoll{}, err
|
||||
}
|
||||
return Epoll{
|
||||
fd: fd,
|
||||
buf: [8]byte{},
|
||||
events: make([]syscall.EpollEvent, 1),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (ep *Epoll) AddEvent(fdToAdd int) error {
|
||||
event := syscall.EpollEvent{
|
||||
Events: syscall.EPOLLIN,
|
||||
Fd: int32(fdToAdd),
|
||||
}
|
||||
return syscall.EpollCtl(ep.fd, syscall.EPOLL_CTL_ADD, fdToAdd, &event)
|
||||
}
|
||||
|
||||
func (ep *Epoll) Block() (int, error) {
|
||||
n, err := syscall.EpollWait(ep.fd, ep.events, -1)
|
||||
if err != nil {
|
||||
//goland:noinspection GoDirectComparisonOfErrors
|
||||
if err == syscall.EINTR {
|
||||
return 0, nil //??
|
||||
}
|
||||
return -1, err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (ep *Epoll) Clear() error {
|
||||
_, err := syscall.Read(int(ep.events[0].Fd), ep.buf[:])
|
||||
return err
|
||||
}
|
||||
|
||||
func (ep *Epoll) Close() error {
|
||||
if ep.fd != 0 {
|
||||
return unix.Close(ep.fd)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -11,14 +11,13 @@ import (
|
||||
"github.com/gaissmai/bart"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/routing"
|
||||
)
|
||||
|
||||
type Route struct {
|
||||
MTU int
|
||||
Metric int
|
||||
Cidr netip.Prefix
|
||||
Via routing.Gateways
|
||||
Via netip.Addr
|
||||
Install bool
|
||||
}
|
||||
|
||||
@@ -48,17 +47,15 @@ func (r Route) String() string {
|
||||
return s
|
||||
}
|
||||
|
||||
func makeRouteTree(l *logrus.Logger, routes []Route, allowMTU bool) (*bart.Table[routing.Gateways], error) {
|
||||
routeTree := new(bart.Table[routing.Gateways])
|
||||
func makeRouteTree(l *logrus.Logger, routes []Route, allowMTU bool) (*bart.Table[netip.Addr], error) {
|
||||
routeTree := new(bart.Table[netip.Addr])
|
||||
for _, r := range routes {
|
||||
if !allowMTU && r.MTU > 0 {
|
||||
l.WithField("route", r).Warnf("route MTU is not supported in %s", runtime.GOOS)
|
||||
}
|
||||
|
||||
gateways := r.Via
|
||||
if len(gateways) > 0 {
|
||||
routing.CalculateBucketsForGateways(gateways)
|
||||
routeTree.Insert(r.Cidr, gateways)
|
||||
if r.Via.IsValid() {
|
||||
routeTree.Insert(r.Cidr, r.Via)
|
||||
}
|
||||
}
|
||||
return routeTree, nil
|
||||
@@ -72,7 +69,7 @@ func parseRoutes(c *config.C, networks []netip.Prefix) ([]Route, error) {
|
||||
return []Route{}, nil
|
||||
}
|
||||
|
||||
rawRoutes, ok := r.([]any)
|
||||
rawRoutes, ok := r.([]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("tun.routes is not an array")
|
||||
}
|
||||
@@ -83,7 +80,7 @@ func parseRoutes(c *config.C, networks []netip.Prefix) ([]Route, error) {
|
||||
|
||||
routes := make([]Route, len(rawRoutes))
|
||||
for i, r := range rawRoutes {
|
||||
m, ok := r.(map[string]any)
|
||||
m, ok := r.(map[interface{}]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("entry %v in tun.routes is invalid", i+1)
|
||||
}
|
||||
@@ -151,7 +148,7 @@ func parseUnsafeRoutes(c *config.C, networks []netip.Prefix) ([]Route, error) {
|
||||
return []Route{}, nil
|
||||
}
|
||||
|
||||
rawRoutes, ok := r.([]any)
|
||||
rawRoutes, ok := r.([]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("tun.unsafe_routes is not an array")
|
||||
}
|
||||
@@ -162,7 +159,7 @@ func parseUnsafeRoutes(c *config.C, networks []netip.Prefix) ([]Route, error) {
|
||||
|
||||
routes := make([]Route, len(rawRoutes))
|
||||
for i, r := range rawRoutes {
|
||||
m, ok := r.(map[string]any)
|
||||
m, ok := r.(map[interface{}]interface{})
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("entry %v in tun.unsafe_routes is invalid", i+1)
|
||||
}
|
||||
@@ -204,63 +201,14 @@ func parseUnsafeRoutes(c *config.C, networks []netip.Prefix) ([]Route, error) {
|
||||
return nil, fmt.Errorf("entry %v.via in tun.unsafe_routes is not present", i+1)
|
||||
}
|
||||
|
||||
var gateways routing.Gateways
|
||||
via, ok := rVia.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("entry %v.via in tun.unsafe_routes is not a string: found %T", i+1, rVia)
|
||||
}
|
||||
|
||||
switch via := rVia.(type) {
|
||||
case string:
|
||||
viaIp, err := netip.ParseAddr(via)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("entry %v.via in tun.unsafe_routes failed to parse address: %v", i+1, err)
|
||||
}
|
||||
|
||||
gateways = routing.Gateways{routing.NewGateway(viaIp, 1)}
|
||||
|
||||
case []any:
|
||||
gateways = make(routing.Gateways, len(via))
|
||||
for ig, v := range via {
|
||||
gatewayMap, ok := v.(map[string]any)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("entry %v in tun.unsafe_routes[%v].via is invalid", i+1, ig+1)
|
||||
}
|
||||
|
||||
rGateway, ok := gatewayMap["gateway"]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("entry .gateway in tun.unsafe_routes[%v].via[%v] is not present", i+1, ig+1)
|
||||
}
|
||||
|
||||
parsedGateway, ok := rGateway.(string)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("entry .gateway in tun.unsafe_routes[%v].via[%v] is not a string", i+1, ig+1)
|
||||
}
|
||||
|
||||
gatewayIp, err := netip.ParseAddr(parsedGateway)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("entry .gateway in tun.unsafe_routes[%v].via[%v] failed to parse address: %v", i+1, ig+1, err)
|
||||
}
|
||||
|
||||
rGatewayWeight, ok := gatewayMap["weight"]
|
||||
if !ok {
|
||||
rGatewayWeight = 1
|
||||
}
|
||||
|
||||
gatewayWeight, ok := rGatewayWeight.(int)
|
||||
if !ok {
|
||||
_, err = strconv.ParseInt(rGatewayWeight.(string), 10, 32)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("entry .weight in tun.unsafe_routes[%v].via[%v] is not an integer", i+1, ig+1)
|
||||
}
|
||||
}
|
||||
|
||||
if gatewayWeight < 1 || gatewayWeight > math.MaxInt32 {
|
||||
return nil, fmt.Errorf("entry .weight in tun.unsafe_routes[%v].via[%v] is not in range (1-%d) : %v", i+1, ig+1, math.MaxInt32, gatewayWeight)
|
||||
}
|
||||
|
||||
gateways[ig] = routing.NewGateway(gatewayIp, gatewayWeight)
|
||||
|
||||
}
|
||||
|
||||
default:
|
||||
return nil, fmt.Errorf("entry %v.via in tun.unsafe_routes is not a string or list of gateways: found %T", i+1, rVia)
|
||||
viaVpnIp, err := netip.ParseAddr(via)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("entry %v.via in tun.unsafe_routes failed to parse address: %v", i+1, err)
|
||||
}
|
||||
|
||||
rRoute, ok := m["route"]
|
||||
@@ -278,7 +226,7 @@ func parseUnsafeRoutes(c *config.C, networks []netip.Prefix) ([]Route, error) {
|
||||
}
|
||||
|
||||
r := Route{
|
||||
Via: gateways,
|
||||
Via: viaVpnIp,
|
||||
MTU: mtu,
|
||||
Metric: metric,
|
||||
Install: install,
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/routing"
|
||||
"github.com/slackhq/nebula/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -24,75 +23,75 @@ func Test_parseRoutes(t *testing.T) {
|
||||
assert.Empty(t, routes)
|
||||
|
||||
// not an array
|
||||
c.Settings["tun"] = map[string]any{"routes": "hi"}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"routes": "hi"}
|
||||
routes, err = parseRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "tun.routes is not an array")
|
||||
|
||||
// no routes
|
||||
c.Settings["tun"] = map[string]any{"routes": []any{}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{}}
|
||||
routes, err = parseRoutes(c, []netip.Prefix{n})
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, routes)
|
||||
|
||||
// weird route
|
||||
c.Settings["tun"] = map[string]any{"routes": []any{"asdf"}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{"asdf"}}
|
||||
routes, err = parseRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry 1 in tun.routes is invalid")
|
||||
|
||||
// no mtu
|
||||
c.Settings["tun"] = map[string]any{"routes": []any{map[string]any{}}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{map[interface{}]interface{}{}}}
|
||||
routes, err = parseRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry 1.mtu in tun.routes is not present")
|
||||
|
||||
// bad mtu
|
||||
c.Settings["tun"] = map[string]any{"routes": []any{map[string]any{"mtu": "nope"}}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{map[interface{}]interface{}{"mtu": "nope"}}}
|
||||
routes, err = parseRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry 1.mtu in tun.routes is not an integer: strconv.Atoi: parsing \"nope\": invalid syntax")
|
||||
|
||||
// low mtu
|
||||
c.Settings["tun"] = map[string]any{"routes": []any{map[string]any{"mtu": "499"}}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{map[interface{}]interface{}{"mtu": "499"}}}
|
||||
routes, err = parseRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry 1.mtu in tun.routes is below 500: 499")
|
||||
|
||||
// missing route
|
||||
c.Settings["tun"] = map[string]any{"routes": []any{map[string]any{"mtu": "500"}}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{map[interface{}]interface{}{"mtu": "500"}}}
|
||||
routes, err = parseRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry 1.route in tun.routes is not present")
|
||||
|
||||
// unparsable route
|
||||
c.Settings["tun"] = map[string]any{"routes": []any{map[string]any{"mtu": "500", "route": "nope"}}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{map[interface{}]interface{}{"mtu": "500", "route": "nope"}}}
|
||||
routes, err = parseRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry 1.route in tun.routes failed to parse: netip.ParsePrefix(\"nope\"): no '/'")
|
||||
|
||||
// below network range
|
||||
c.Settings["tun"] = map[string]any{"routes": []any{map[string]any{"mtu": "500", "route": "1.0.0.0/8"}}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{map[interface{}]interface{}{"mtu": "500", "route": "1.0.0.0/8"}}}
|
||||
routes, err = parseRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry 1.route in tun.routes is not contained within the configured vpn networks; route: 1.0.0.0/8, networks: [10.0.0.0/24]")
|
||||
|
||||
// above network range
|
||||
c.Settings["tun"] = map[string]any{"routes": []any{map[string]any{"mtu": "500", "route": "10.0.1.0/24"}}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{map[interface{}]interface{}{"mtu": "500", "route": "10.0.1.0/24"}}}
|
||||
routes, err = parseRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry 1.route in tun.routes is not contained within the configured vpn networks; route: 10.0.1.0/24, networks: [10.0.0.0/24]")
|
||||
|
||||
// Not in multiple ranges
|
||||
c.Settings["tun"] = map[string]any{"routes": []any{map[string]any{"mtu": "500", "route": "192.0.0.0/24"}}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{map[interface{}]interface{}{"mtu": "500", "route": "192.0.0.0/24"}}}
|
||||
routes, err = parseRoutes(c, []netip.Prefix{n, netip.MustParsePrefix("192.1.0.0/24")})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry 1.route in tun.routes is not contained within the configured vpn networks; route: 192.0.0.0/24, networks: [10.0.0.0/24 192.1.0.0/24]")
|
||||
|
||||
// happy case
|
||||
c.Settings["tun"] = map[string]any{"routes": []any{
|
||||
map[string]any{"mtu": "9000", "route": "10.0.0.0/29"},
|
||||
map[string]any{"mtu": "8000", "route": "10.0.0.1/32"},
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"routes": []interface{}{
|
||||
map[interface{}]interface{}{"mtu": "9000", "route": "10.0.0.0/29"},
|
||||
map[interface{}]interface{}{"mtu": "8000", "route": "10.0.0.1/32"},
|
||||
}}
|
||||
routes, err = parseRoutes(c, []netip.Prefix{n})
|
||||
require.NoError(t, err)
|
||||
@@ -129,129 +128,105 @@ func Test_parseUnsafeRoutes(t *testing.T) {
|
||||
assert.Empty(t, routes)
|
||||
|
||||
// not an array
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": "hi"}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": "hi"}
|
||||
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "tun.unsafe_routes is not an array")
|
||||
|
||||
// no routes
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{}}
|
||||
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, routes)
|
||||
|
||||
// weird route
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{"asdf"}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{"asdf"}}
|
||||
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry 1 in tun.unsafe_routes is invalid")
|
||||
|
||||
// no via
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{map[string]any{}}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{}}}
|
||||
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry 1.via in tun.unsafe_routes is not present")
|
||||
|
||||
// invalid via
|
||||
for _, invalidValue := range []any{
|
||||
for _, invalidValue := range []interface{}{
|
||||
127, false, nil, 1.0, []string{"1", "2"},
|
||||
} {
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{map[string]any{"via": invalidValue}}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": invalidValue}}}
|
||||
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, fmt.Sprintf("entry 1.via in tun.unsafe_routes is not a string or list of gateways: found %T", invalidValue))
|
||||
require.EqualError(t, err, fmt.Sprintf("entry 1.via in tun.unsafe_routes is not a string: found %T", invalidValue))
|
||||
}
|
||||
|
||||
// Unparsable list of via
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{map[string]any{"via": []string{"1", "2"}}}}
|
||||
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry 1.via in tun.unsafe_routes is not a string or list of gateways: found []string")
|
||||
|
||||
// unparsable via
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{map[string]any{"mtu": "500", "via": "nope"}}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"mtu": "500", "via": "nope"}}}
|
||||
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry 1.via in tun.unsafe_routes failed to parse address: ParseAddr(\"nope\"): unable to parse IP")
|
||||
|
||||
// unparsable gateway
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{map[string]any{"mtu": "500", "via": []any{map[string]any{"gateway": "1"}}}}}
|
||||
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry .gateway in tun.unsafe_routes[1].via[1] failed to parse address: ParseAddr(\"1\"): unable to parse IP")
|
||||
|
||||
// missing gateway element
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{map[string]any{"mtu": "500", "via": []any{map[string]any{"weight": "1"}}}}}
|
||||
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry .gateway in tun.unsafe_routes[1].via[1] is not present")
|
||||
|
||||
// unparsable weight element
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{map[string]any{"mtu": "500", "via": []any{map[string]any{"gateway": "10.0.0.1", "weight": "a"}}}}}
|
||||
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry .weight in tun.unsafe_routes[1].via[1] is not an integer")
|
||||
|
||||
// missing route
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{map[string]any{"via": "127.0.0.1", "mtu": "500"}}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "500"}}}
|
||||
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry 1.route in tun.unsafe_routes is not present")
|
||||
|
||||
// unparsable route
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{map[string]any{"via": "127.0.0.1", "mtu": "500", "route": "nope"}}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "500", "route": "nope"}}}
|
||||
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry 1.route in tun.unsafe_routes failed to parse: netip.ParsePrefix(\"nope\"): no '/'")
|
||||
|
||||
// within network range
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{map[string]any{"via": "127.0.0.1", "route": "10.0.0.0/24"}}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "route": "10.0.0.0/24"}}}
|
||||
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry 1.route in tun.unsafe_routes is contained within the configured vpn networks; route: 10.0.0.0/24, network: 10.0.0.0/24")
|
||||
|
||||
// below network range
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{map[string]any{"via": "127.0.0.1", "route": "1.0.0.0/8"}}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "route": "1.0.0.0/8"}}}
|
||||
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
assert.Len(t, routes, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// above network range
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{map[string]any{"via": "127.0.0.1", "route": "10.0.1.0/24"}}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "route": "10.0.1.0/24"}}}
|
||||
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
assert.Len(t, routes, 1)
|
||||
require.NoError(t, err)
|
||||
|
||||
// no mtu
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{map[string]any{"via": "127.0.0.1", "route": "1.0.0.0/8"}}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "route": "1.0.0.0/8"}}}
|
||||
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
assert.Len(t, routes, 1)
|
||||
assert.Equal(t, 0, routes[0].MTU)
|
||||
|
||||
// bad mtu
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{map[string]any{"via": "127.0.0.1", "mtu": "nope"}}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "nope"}}}
|
||||
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry 1.mtu in tun.unsafe_routes is not an integer: strconv.Atoi: parsing \"nope\": invalid syntax")
|
||||
|
||||
// low mtu
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{map[string]any{"via": "127.0.0.1", "mtu": "499"}}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "499"}}}
|
||||
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry 1.mtu in tun.unsafe_routes is below 500: 499")
|
||||
|
||||
// bad install
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{map[string]any{"via": "127.0.0.1", "mtu": "9000", "route": "1.0.0.0/29", "install": "nope"}}}
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "9000", "route": "1.0.0.0/29", "install": "nope"}}}
|
||||
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
assert.Nil(t, routes)
|
||||
require.EqualError(t, err, "entry 1.install in tun.unsafe_routes is not a boolean: strconv.ParseBool: parsing \"nope\": invalid syntax")
|
||||
|
||||
// happy case
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{
|
||||
map[string]any{"via": "127.0.0.1", "mtu": "9000", "route": "1.0.0.0/29", "install": "t"},
|
||||
map[string]any{"via": "127.0.0.1", "mtu": "8000", "route": "1.0.0.1/32", "install": 0},
|
||||
map[string]any{"via": "127.0.0.1", "mtu": "1500", "metric": 1234, "route": "1.0.0.2/32", "install": 1},
|
||||
map[string]any{"via": "127.0.0.1", "mtu": "1500", "metric": 1234, "route": "1.0.0.2/32"},
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{
|
||||
map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "9000", "route": "1.0.0.0/29", "install": "t"},
|
||||
map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "8000", "route": "1.0.0.1/32", "install": 0},
|
||||
map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "1500", "metric": 1234, "route": "1.0.0.2/32", "install": 1},
|
||||
map[interface{}]interface{}{"via": "127.0.0.1", "mtu": "1500", "metric": 1234, "route": "1.0.0.2/32"},
|
||||
}}
|
||||
routes, err = parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
require.NoError(t, err)
|
||||
@@ -288,9 +263,9 @@ func Test_makeRouteTree(t *testing.T) {
|
||||
n, err := netip.ParsePrefix("10.0.0.0/24")
|
||||
require.NoError(t, err)
|
||||
|
||||
c.Settings["tun"] = map[string]any{"unsafe_routes": []any{
|
||||
map[string]any{"via": "192.168.0.1", "route": "1.0.0.0/28"},
|
||||
map[string]any{"via": "192.168.0.2", "route": "1.0.0.1/32"},
|
||||
c.Settings["tun"] = map[interface{}]interface{}{"unsafe_routes": []interface{}{
|
||||
map[interface{}]interface{}{"via": "192.168.0.1", "route": "1.0.0.0/28"},
|
||||
map[interface{}]interface{}{"via": "192.168.0.2", "route": "1.0.0.1/32"},
|
||||
}}
|
||||
routes, err := parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
require.NoError(t, err)
|
||||
@@ -305,7 +280,7 @@ func Test_makeRouteTree(t *testing.T) {
|
||||
|
||||
nip, err := netip.ParseAddr("192.168.0.1")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, nip, r[0].Addr())
|
||||
assert.Equal(t, nip, r)
|
||||
|
||||
ip, err = netip.ParseAddr("1.0.0.1")
|
||||
require.NoError(t, err)
|
||||
@@ -314,91 +289,10 @@ func Test_makeRouteTree(t *testing.T) {
|
||||
|
||||
nip, err = netip.ParseAddr("192.168.0.2")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, nip, r[0].Addr())
|
||||
assert.Equal(t, nip, r)
|
||||
|
||||
ip, err = netip.ParseAddr("1.1.0.1")
|
||||
require.NoError(t, err)
|
||||
r, ok = routeTree.Lookup(ip)
|
||||
assert.False(t, ok)
|
||||
}
|
||||
|
||||
func Test_makeMultipathUnsafeRouteTree(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
c := config.NewC(l)
|
||||
n, err := netip.ParsePrefix("10.0.0.0/24")
|
||||
require.NoError(t, err)
|
||||
|
||||
c.Settings["tun"] = map[string]any{
|
||||
"unsafe_routes": []any{
|
||||
map[string]any{
|
||||
"route": "192.168.86.0/24",
|
||||
"via": "192.168.100.10",
|
||||
},
|
||||
map[string]any{
|
||||
"route": "192.168.87.0/24",
|
||||
"via": []any{
|
||||
map[string]any{
|
||||
"gateway": "10.0.0.1",
|
||||
},
|
||||
map[string]any{
|
||||
"gateway": "10.0.0.2",
|
||||
},
|
||||
map[string]any{
|
||||
"gateway": "10.0.0.3",
|
||||
},
|
||||
},
|
||||
},
|
||||
map[string]any{
|
||||
"route": "192.168.89.0/24",
|
||||
"via": []any{
|
||||
map[string]any{
|
||||
"gateway": "10.0.0.1",
|
||||
"weight": 10,
|
||||
},
|
||||
map[string]any{
|
||||
"gateway": "10.0.0.2",
|
||||
"weight": 5,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
routes, err := parseUnsafeRoutes(c, []netip.Prefix{n})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, routes, 3)
|
||||
routeTree, err := makeRouteTree(l, routes, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
ip, err := netip.ParseAddr("192.168.86.1")
|
||||
require.NoError(t, err)
|
||||
r, ok := routeTree.Lookup(ip)
|
||||
assert.True(t, ok)
|
||||
|
||||
nip, err := netip.ParseAddr("192.168.100.10")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, nip, r[0].Addr())
|
||||
|
||||
ip, err = netip.ParseAddr("192.168.87.1")
|
||||
require.NoError(t, err)
|
||||
r, ok = routeTree.Lookup(ip)
|
||||
assert.True(t, ok)
|
||||
|
||||
expectedGateways := routing.Gateways{routing.NewGateway(netip.MustParseAddr("10.0.0.1"), 1),
|
||||
routing.NewGateway(netip.MustParseAddr("10.0.0.2"), 1),
|
||||
routing.NewGateway(netip.MustParseAddr("10.0.0.3"), 1)}
|
||||
|
||||
routing.CalculateBucketsForGateways(expectedGateways)
|
||||
assert.ElementsMatch(t, expectedGateways, r)
|
||||
|
||||
ip, err = netip.ParseAddr("192.168.89.1")
|
||||
require.NoError(t, err)
|
||||
r, ok = routeTree.Lookup(ip)
|
||||
assert.True(t, ok)
|
||||
|
||||
expectedGateways = routing.Gateways{routing.NewGateway(netip.MustParseAddr("10.0.0.1"), 10),
|
||||
routing.NewGateway(netip.MustParseAddr("10.0.0.2"), 5)}
|
||||
|
||||
routing.CalculateBucketsForGateways(expectedGateways)
|
||||
assert.ElementsMatch(t, expectedGateways, r)
|
||||
}
|
||||
|
||||
@@ -1,30 +1,15 @@
|
||||
package overlay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/netip"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/packet"
|
||||
"github.com/slackhq/nebula/util"
|
||||
)
|
||||
|
||||
const DefaultMTU = 1300
|
||||
|
||||
type TunDev interface {
|
||||
io.WriteCloser
|
||||
ReadMany(x []*packet.VirtIOPacket, q int) (int, error)
|
||||
|
||||
//todo this interface sux
|
||||
AllocSeg(pkt *packet.OutPacket, q int) (int, error)
|
||||
WriteOne(x *packet.OutPacket, kick bool, q int) (int, error)
|
||||
WriteMany(x []*packet.OutPacket, q int) (int, error)
|
||||
RecycleRxSeg(pkt *packet.VirtIOPacket, kick bool, q int) error
|
||||
}
|
||||
|
||||
// TODO: We may be able to remove routines
|
||||
type DeviceFactory func(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, routines int) (Device, error)
|
||||
|
||||
@@ -39,11 +24,11 @@ func NewDeviceFromConfig(c *config.C, l *logrus.Logger, vpnNetworks []netip.Pref
|
||||
}
|
||||
}
|
||||
|
||||
//func NewFdDeviceFromConfig(fd *int) DeviceFactory {
|
||||
// return func(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, routines int) (Device, error) {
|
||||
// return newTunFromFd(c, l, *fd, vpnNetworks)
|
||||
// }
|
||||
//}
|
||||
func NewFdDeviceFromConfig(fd *int) DeviceFactory {
|
||||
return func(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, routines int) (Device, error) {
|
||||
return newTunFromFd(c, l, *fd, vpnNetworks)
|
||||
}
|
||||
}
|
||||
|
||||
func getAllRoutesFromConfig(c *config.C, vpnNetworks []netip.Prefix, initial bool) (bool, []Route, error) {
|
||||
if !initial && !c.HasChanged("tun.routes") && !c.HasChanged("tun.unsafe_routes") {
|
||||
@@ -85,51 +70,3 @@ func findRemovedRoutes(newRoutes, oldRoutes []Route) []Route {
|
||||
|
||||
return removed
|
||||
}
|
||||
|
||||
func prefixToMask(prefix netip.Prefix) netip.Addr {
|
||||
pLen := 128
|
||||
if prefix.Addr().Is4() {
|
||||
pLen = 32
|
||||
}
|
||||
|
||||
addr, _ := netip.AddrFromSlice(net.CIDRMask(prefix.Bits(), pLen))
|
||||
return addr
|
||||
}
|
||||
|
||||
func flipBytes(b []byte) []byte {
|
||||
for i := 0; i < len(b); i++ {
|
||||
b[i] ^= 0xFF
|
||||
}
|
||||
return b
|
||||
}
|
||||
func orBytes(a []byte, b []byte) []byte {
|
||||
ret := make([]byte, len(a))
|
||||
for i := 0; i < len(a); i++ {
|
||||
ret[i] = a[i] | b[i]
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func getBroadcast(cidr netip.Prefix) netip.Addr {
|
||||
broadcast, _ := netip.AddrFromSlice(
|
||||
orBytes(
|
||||
cidr.Addr().AsSlice(),
|
||||
flipBytes(prefixToMask(cidr).AsSlice()),
|
||||
),
|
||||
)
|
||||
return broadcast
|
||||
}
|
||||
|
||||
func selectGateway(dest netip.Prefix, gateways []netip.Prefix) (netip.Prefix, error) {
|
||||
for _, gateway := range gateways {
|
||||
if dest.Addr().Is4() && gateway.Addr().Is4() {
|
||||
return gateway, nil
|
||||
}
|
||||
|
||||
if dest.Addr().Is6() && gateway.Addr().Is6() {
|
||||
return gateway, nil
|
||||
}
|
||||
}
|
||||
|
||||
return netip.Prefix{}, fmt.Errorf("no gateway found for %v in the list of vpn networks", dest)
|
||||
}
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/gaissmai/bart"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/routing"
|
||||
"github.com/slackhq/nebula/util"
|
||||
)
|
||||
|
||||
@@ -22,7 +21,7 @@ type tun struct {
|
||||
fd int
|
||||
vpnNetworks []netip.Prefix
|
||||
Routes atomic.Pointer[[]Route]
|
||||
routeTree atomic.Pointer[bart.Table[routing.Gateways]]
|
||||
routeTree atomic.Pointer[bart.Table[netip.Addr]]
|
||||
l *logrus.Logger
|
||||
}
|
||||
|
||||
@@ -57,7 +56,7 @@ func newTun(_ *config.C, _ *logrus.Logger, _ []netip.Prefix, _ bool) (*tun, erro
|
||||
return nil, fmt.Errorf("newTun not supported in Android")
|
||||
}
|
||||
|
||||
func (t *tun) RoutesFor(ip netip.Addr) routing.Gateways {
|
||||
func (t *tun) RouteFor(ip netip.Addr) netip.Addr {
|
||||
r, _ := t.routeTree.Load().Lookup(ip)
|
||||
return r
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/netip"
|
||||
"os"
|
||||
"sync/atomic"
|
||||
@@ -16,7 +17,6 @@ import (
|
||||
"github.com/gaissmai/bart"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/routing"
|
||||
"github.com/slackhq/nebula/util"
|
||||
netroute "golang.org/x/net/route"
|
||||
"golang.org/x/sys/unix"
|
||||
@@ -28,7 +28,7 @@ type tun struct {
|
||||
vpnNetworks []netip.Prefix
|
||||
DefaultMTU int
|
||||
Routes atomic.Pointer[[]Route]
|
||||
routeTree atomic.Pointer[bart.Table[routing.Gateways]]
|
||||
routeTree atomic.Pointer[bart.Table[netip.Addr]]
|
||||
linkAddr *netroute.LinkAddr
|
||||
l *logrus.Logger
|
||||
|
||||
@@ -294,6 +294,7 @@ func (t *tun) activate6(network netip.Prefix) error {
|
||||
Vltime: 0xffffffff,
|
||||
Pltime: 0xffffffff,
|
||||
},
|
||||
//TODO: CERT-V2 should we disable DAD (duplicate address detection) and mark this as a secured address?
|
||||
Flags: _IN6_IFF_NODAD,
|
||||
}
|
||||
|
||||
@@ -341,12 +342,12 @@ func (t *tun) reload(c *config.C, initial bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tun) RoutesFor(ip netip.Addr) routing.Gateways {
|
||||
func (t *tun) RouteFor(ip netip.Addr) netip.Addr {
|
||||
r, ok := t.routeTree.Load().Lookup(ip)
|
||||
if ok {
|
||||
return r
|
||||
}
|
||||
return routing.Gateways{}
|
||||
return netip.Addr{}
|
||||
}
|
||||
|
||||
// Get the LinkAddr for the interface of the given name
|
||||
@@ -381,7 +382,7 @@ func (t *tun) addRoutes(logErrors bool) error {
|
||||
routes := *t.Routes.Load()
|
||||
|
||||
for _, r := range routes {
|
||||
if len(r.Via) == 0 || !r.Install {
|
||||
if !r.Via.IsValid() || !r.Install {
|
||||
// We don't allow route MTUs so only install routes with a via
|
||||
continue
|
||||
}
|
||||
@@ -392,7 +393,7 @@ func (t *tun) addRoutes(logErrors bool) error {
|
||||
t.l.WithField("route", r.Cidr).
|
||||
Warnf("unable to add unsafe_route, identical route already exists")
|
||||
} else {
|
||||
retErr := util.NewContextualError("Failed to add route", map[string]any{"route": r}, err)
|
||||
retErr := util.NewContextualError("Failed to add route", map[string]interface{}{"route": r}, err)
|
||||
if logErrors {
|
||||
retErr.Log(t.l)
|
||||
} else {
|
||||
@@ -552,3 +553,13 @@ func (t *tun) Name() string {
|
||||
func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) {
|
||||
return nil, fmt.Errorf("TODO: multiqueue not implemented for darwin")
|
||||
}
|
||||
|
||||
func prefixToMask(prefix netip.Prefix) netip.Addr {
|
||||
pLen := 128
|
||||
if prefix.Addr().Is4() {
|
||||
pLen = 32
|
||||
}
|
||||
|
||||
addr, _ := netip.AddrFromSlice(net.CIDRMask(prefix.Bits(), pLen))
|
||||
return addr
|
||||
}
|
||||
|
||||
@@ -9,9 +9,6 @@ import (
|
||||
"github.com/rcrowley/go-metrics"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/overlay/virtqueue"
|
||||
"github.com/slackhq/nebula/packet"
|
||||
"github.com/slackhq/nebula/routing"
|
||||
)
|
||||
|
||||
type disabledTun struct {
|
||||
@@ -24,10 +21,6 @@ type disabledTun struct {
|
||||
l *logrus.Logger
|
||||
}
|
||||
|
||||
func (*disabledTun) RecycleRxSeg(pkt *packet.VirtIOPacket, kick bool, q int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newDisabledTun(vpnNetworks []netip.Prefix, queueLen int, metricsEnabled bool, l *logrus.Logger) *disabledTun {
|
||||
tun := &disabledTun{
|
||||
vpnNetworks: vpnNetworks,
|
||||
@@ -46,16 +39,12 @@ func newDisabledTun(vpnNetworks []netip.Prefix, queueLen int, metricsEnabled boo
|
||||
return tun
|
||||
}
|
||||
|
||||
func (*disabledTun) GetQueues() []*virtqueue.SplitQueue {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*disabledTun) Activate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*disabledTun) RoutesFor(addr netip.Addr) routing.Gateways {
|
||||
return routing.Gateways{}
|
||||
func (*disabledTun) RouteFor(addr netip.Addr) netip.Addr {
|
||||
return netip.Addr{}
|
||||
}
|
||||
|
||||
func (t *disabledTun) Networks() []netip.Prefix {
|
||||
@@ -115,23 +104,7 @@ func (t *disabledTun) Write(b []byte) (int, error) {
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (t *disabledTun) AllocSeg(pkt *packet.OutPacket, q int) (int, error) {
|
||||
return 0, fmt.Errorf("tun_disabled: AllocSeg not implemented")
|
||||
}
|
||||
|
||||
func (t *disabledTun) WriteOne(x *packet.OutPacket, kick bool, q int) (int, error) {
|
||||
return 0, fmt.Errorf("tun_disabled: WriteOne not implemented")
|
||||
}
|
||||
|
||||
func (t *disabledTun) WriteMany(x []*packet.OutPacket, q int) (int, error) {
|
||||
return 0, fmt.Errorf("tun_disabled: WriteMany not implemented")
|
||||
}
|
||||
|
||||
func (t *disabledTun) ReadMany(b []*packet.VirtIOPacket, _ int) (int, error) {
|
||||
return t.Read(b[0].Payload)
|
||||
}
|
||||
|
||||
func (t *disabledTun) NewMultiQueueReader() (TunDev, error) {
|
||||
func (t *disabledTun) NewMultiQueueReader() (io.ReadWriteCloser, error) {
|
||||
return t, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -10,28 +10,23 @@ import (
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/netip"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/gaissmai/bart"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/routing"
|
||||
"github.com/slackhq/nebula/util"
|
||||
netroute "golang.org/x/net/route"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
// FIODGNAME is defined in sys/sys/filio.h on FreeBSD
|
||||
// For 32-bit systems, use FIODGNAME_32 (not defined in this file: 0x80086678)
|
||||
FIODGNAME = 0x80106678
|
||||
TUNSIFMODE = 0x8004745e
|
||||
TUNSIFHEAD = 0x80047460
|
||||
OSIOCAIFADDR_IN6 = 0x8088691b
|
||||
IN6_IFF_NODAD = 0x0020
|
||||
FIODGNAME = 0x80106678
|
||||
)
|
||||
|
||||
type fiodgnameArg struct {
|
||||
@@ -41,159 +36,43 @@ type fiodgnameArg struct {
|
||||
}
|
||||
|
||||
type ifreqRename struct {
|
||||
Name [unix.IFNAMSIZ]byte
|
||||
Name [16]byte
|
||||
Data uintptr
|
||||
}
|
||||
|
||||
type ifreqDestroy struct {
|
||||
Name [unix.IFNAMSIZ]byte
|
||||
Name [16]byte
|
||||
pad [16]byte
|
||||
}
|
||||
|
||||
type ifReq struct {
|
||||
Name [unix.IFNAMSIZ]byte
|
||||
Flags uint16
|
||||
}
|
||||
|
||||
type ifreqMTU struct {
|
||||
Name [unix.IFNAMSIZ]byte
|
||||
MTU int32
|
||||
}
|
||||
|
||||
type addrLifetime struct {
|
||||
Expire uint64
|
||||
Preferred uint64
|
||||
Vltime uint32
|
||||
Pltime uint32
|
||||
}
|
||||
|
||||
type ifreqAlias4 struct {
|
||||
Name [unix.IFNAMSIZ]byte
|
||||
Addr unix.RawSockaddrInet4
|
||||
DstAddr unix.RawSockaddrInet4
|
||||
MaskAddr unix.RawSockaddrInet4
|
||||
VHid uint32
|
||||
}
|
||||
|
||||
type ifreqAlias6 struct {
|
||||
Name [unix.IFNAMSIZ]byte
|
||||
Addr unix.RawSockaddrInet6
|
||||
DstAddr unix.RawSockaddrInet6
|
||||
PrefixMask unix.RawSockaddrInet6
|
||||
Flags uint32
|
||||
Lifetime addrLifetime
|
||||
VHid uint32
|
||||
}
|
||||
|
||||
type tun struct {
|
||||
Device string
|
||||
vpnNetworks []netip.Prefix
|
||||
MTU int
|
||||
Routes atomic.Pointer[[]Route]
|
||||
routeTree atomic.Pointer[bart.Table[routing.Gateways]]
|
||||
linkAddr *netroute.LinkAddr
|
||||
routeTree atomic.Pointer[bart.Table[netip.Addr]]
|
||||
l *logrus.Logger
|
||||
devFd int
|
||||
}
|
||||
|
||||
func (t *tun) Read(to []byte) (int, error) {
|
||||
// use readv() to read from the tunnel device, to eliminate the need for copying the buffer
|
||||
if t.devFd < 0 {
|
||||
return -1, syscall.EINVAL
|
||||
}
|
||||
|
||||
// first 4 bytes is protocol family, in network byte order
|
||||
head := make([]byte, 4)
|
||||
|
||||
iovecs := []syscall.Iovec{
|
||||
{&head[0], 4},
|
||||
{&to[0], uint64(len(to))},
|
||||
}
|
||||
|
||||
n, _, errno := syscall.Syscall(syscall.SYS_READV, uintptr(t.devFd), uintptr(unsafe.Pointer(&iovecs[0])), uintptr(2))
|
||||
|
||||
var err error
|
||||
if errno != 0 {
|
||||
err = syscall.Errno(errno)
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
// fix bytes read number to exclude header
|
||||
bytesRead := int(n)
|
||||
if bytesRead < 0 {
|
||||
return bytesRead, err
|
||||
} else if bytesRead < 4 {
|
||||
return 0, err
|
||||
} else {
|
||||
return bytesRead - 4, err
|
||||
}
|
||||
}
|
||||
|
||||
// Write is only valid for single threaded use
|
||||
func (t *tun) Write(from []byte) (int, error) {
|
||||
// use writev() to write to the tunnel device, to eliminate the need for copying the buffer
|
||||
if t.devFd < 0 {
|
||||
return -1, syscall.EINVAL
|
||||
}
|
||||
|
||||
if len(from) <= 1 {
|
||||
return 0, syscall.EIO
|
||||
}
|
||||
ipVer := from[0] >> 4
|
||||
var head []byte
|
||||
// first 4 bytes is protocol family, in network byte order
|
||||
if ipVer == 4 {
|
||||
head = []byte{0, 0, 0, syscall.AF_INET}
|
||||
} else if ipVer == 6 {
|
||||
head = []byte{0, 0, 0, syscall.AF_INET6}
|
||||
} else {
|
||||
return 0, fmt.Errorf("unable to determine IP version from packet")
|
||||
}
|
||||
iovecs := []syscall.Iovec{
|
||||
{&head[0], 4},
|
||||
{&from[0], uint64(len(from))},
|
||||
}
|
||||
|
||||
n, _, errno := syscall.Syscall(syscall.SYS_WRITEV, uintptr(t.devFd), uintptr(unsafe.Pointer(&iovecs[0])), uintptr(2))
|
||||
|
||||
var err error
|
||||
if errno != 0 {
|
||||
err = syscall.Errno(errno)
|
||||
} else {
|
||||
err = nil
|
||||
}
|
||||
|
||||
return int(n) - 4, err
|
||||
io.ReadWriteCloser
|
||||
}
|
||||
|
||||
func (t *tun) Close() error {
|
||||
if t.devFd >= 0 {
|
||||
err := syscall.Close(t.devFd)
|
||||
if t.ReadWriteCloser != nil {
|
||||
if err := t.ReadWriteCloser.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_DGRAM, syscall.IPPROTO_IP)
|
||||
if err != nil {
|
||||
t.l.WithError(err).Error("Error closing device")
|
||||
return err
|
||||
}
|
||||
t.devFd = -1
|
||||
defer syscall.Close(s)
|
||||
|
||||
c := make(chan struct{})
|
||||
go func() {
|
||||
// destroying the interface can block if a read() is still pending. Do this asynchronously.
|
||||
defer close(c)
|
||||
s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_DGRAM, syscall.IPPROTO_IP)
|
||||
if err == nil {
|
||||
defer syscall.Close(s)
|
||||
ifreq := ifreqDestroy{Name: t.deviceBytes()}
|
||||
err = ioctl(uintptr(s), syscall.SIOCIFDESTROY, uintptr(unsafe.Pointer(&ifreq)))
|
||||
}
|
||||
if err != nil {
|
||||
t.l.WithError(err).Error("Error destroying tunnel")
|
||||
}
|
||||
}()
|
||||
ifreq := ifreqDestroy{Name: t.deviceBytes()}
|
||||
|
||||
// wait up to 1 second so we start blocking at the ioctl
|
||||
select {
|
||||
case <-c:
|
||||
case <-time.After(1 * time.Second):
|
||||
}
|
||||
// Destroy the interface
|
||||
err = ioctl(uintptr(s), syscall.SIOCIFDESTROY, uintptr(unsafe.Pointer(&ifreq)))
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -205,37 +84,32 @@ func newTunFromFd(_ *config.C, _ *logrus.Logger, _ int, _ []netip.Prefix) (*tun,
|
||||
|
||||
func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (*tun, error) {
|
||||
// Try to open existing tun device
|
||||
var fd int
|
||||
var file *os.File
|
||||
var err error
|
||||
deviceName := c.GetString("tun.dev", "")
|
||||
if deviceName != "" {
|
||||
fd, err = syscall.Open("/dev/"+deviceName, syscall.O_RDWR, 0)
|
||||
file, err = os.OpenFile("/dev/"+deviceName, os.O_RDWR, 0)
|
||||
}
|
||||
if errors.Is(err, fs.ErrNotExist) || deviceName == "" {
|
||||
// If the device doesn't already exist, request a new one and rename it
|
||||
fd, err = syscall.Open("/dev/tun", syscall.O_RDWR, 0)
|
||||
file, err = os.OpenFile("/dev/tun", os.O_RDWR, 0)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Read the name of the interface
|
||||
rawConn, err := file.SyscallConn()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("SyscallConn: %v", err)
|
||||
}
|
||||
|
||||
var name [16]byte
|
||||
arg := fiodgnameArg{length: 16, buf: unsafe.Pointer(&name)}
|
||||
ctrlErr := ioctl(uintptr(fd), FIODGNAME, uintptr(unsafe.Pointer(&arg)))
|
||||
|
||||
if ctrlErr == nil {
|
||||
// set broadcast mode and multicast
|
||||
ifmode := uint32(unix.IFF_BROADCAST | unix.IFF_MULTICAST)
|
||||
ctrlErr = ioctl(uintptr(fd), TUNSIFMODE, uintptr(unsafe.Pointer(&ifmode)))
|
||||
}
|
||||
|
||||
if ctrlErr == nil {
|
||||
// turn on link-layer mode, to support ipv6
|
||||
ifhead := uint32(1)
|
||||
ctrlErr = ioctl(uintptr(fd), TUNSIFHEAD, uintptr(unsafe.Pointer(&ifhead)))
|
||||
}
|
||||
|
||||
var ctrlErr error
|
||||
rawConn.Control(func(fd uintptr) {
|
||||
// Read the name of the interface
|
||||
arg := fiodgnameArg{length: 16, buf: unsafe.Pointer(&name)}
|
||||
ctrlErr = ioctl(fd, FIODGNAME, uintptr(unsafe.Pointer(&arg)))
|
||||
})
|
||||
if ctrlErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -247,7 +121,11 @@ func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (
|
||||
|
||||
// If the name doesn't match the desired interface name, rename it now
|
||||
if ifName != deviceName {
|
||||
s, err := unix.Socket(unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_IP)
|
||||
s, err := syscall.Socket(
|
||||
syscall.AF_INET,
|
||||
syscall.SOCK_DGRAM,
|
||||
syscall.IPPROTO_IP,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -270,11 +148,11 @@ func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (
|
||||
}
|
||||
|
||||
t := &tun{
|
||||
Device: deviceName,
|
||||
vpnNetworks: vpnNetworks,
|
||||
MTU: c.GetInt("tun.mtu", DefaultMTU),
|
||||
l: l,
|
||||
devFd: fd,
|
||||
ReadWriteCloser: file,
|
||||
Device: deviceName,
|
||||
vpnNetworks: vpnNetworks,
|
||||
MTU: c.GetInt("tun.mtu", DefaultMTU),
|
||||
l: l,
|
||||
}
|
||||
|
||||
err = t.reload(c, true)
|
||||
@@ -293,111 +171,38 @@ func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (
|
||||
}
|
||||
|
||||
func (t *tun) addIp(cidr netip.Prefix) error {
|
||||
if cidr.Addr().Is4() {
|
||||
ifr := ifreqAlias4{
|
||||
Name: t.deviceBytes(),
|
||||
Addr: unix.RawSockaddrInet4{
|
||||
Len: unix.SizeofSockaddrInet4,
|
||||
Family: unix.AF_INET,
|
||||
Addr: cidr.Addr().As4(),
|
||||
},
|
||||
DstAddr: unix.RawSockaddrInet4{
|
||||
Len: unix.SizeofSockaddrInet4,
|
||||
Family: unix.AF_INET,
|
||||
Addr: getBroadcast(cidr).As4(),
|
||||
},
|
||||
MaskAddr: unix.RawSockaddrInet4{
|
||||
Len: unix.SizeofSockaddrInet4,
|
||||
Family: unix.AF_INET,
|
||||
Addr: prefixToMask(cidr).As4(),
|
||||
},
|
||||
VHid: 0,
|
||||
}
|
||||
s, err := unix.Socket(unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_IP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer syscall.Close(s)
|
||||
// Note: unix.SIOCAIFADDR corresponds to FreeBSD's OSIOCAIFADDR
|
||||
if err := ioctl(uintptr(s), unix.SIOCAIFADDR, uintptr(unsafe.Pointer(&ifr))); err != nil {
|
||||
return fmt.Errorf("failed to set tun address %s: %s", cidr.Addr().String(), err)
|
||||
}
|
||||
return nil
|
||||
var err error
|
||||
// TODO use syscalls instead of exec.Command
|
||||
cmd := exec.Command("/sbin/ifconfig", t.Device, cidr.String(), cidr.Addr().String())
|
||||
t.l.Debug("command: ", cmd.String())
|
||||
if err = cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to run 'ifconfig': %s", err)
|
||||
}
|
||||
|
||||
if cidr.Addr().Is6() {
|
||||
ifr := ifreqAlias6{
|
||||
Name: t.deviceBytes(),
|
||||
Addr: unix.RawSockaddrInet6{
|
||||
Len: unix.SizeofSockaddrInet6,
|
||||
Family: unix.AF_INET6,
|
||||
Addr: cidr.Addr().As16(),
|
||||
},
|
||||
PrefixMask: unix.RawSockaddrInet6{
|
||||
Len: unix.SizeofSockaddrInet6,
|
||||
Family: unix.AF_INET6,
|
||||
Addr: prefixToMask(cidr).As16(),
|
||||
},
|
||||
Lifetime: addrLifetime{
|
||||
Expire: 0,
|
||||
Preferred: 0,
|
||||
Vltime: 0xffffffff,
|
||||
Pltime: 0xffffffff,
|
||||
},
|
||||
Flags: IN6_IFF_NODAD,
|
||||
}
|
||||
s, err := syscall.Socket(syscall.AF_INET6, syscall.SOCK_DGRAM, syscall.IPPROTO_IP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer syscall.Close(s)
|
||||
|
||||
if err := ioctl(uintptr(s), OSIOCAIFADDR_IN6, uintptr(unsafe.Pointer(&ifr))); err != nil {
|
||||
return fmt.Errorf("failed to set tun address %s: %s", cidr.Addr().String(), err)
|
||||
}
|
||||
return nil
|
||||
cmd = exec.Command("/sbin/route", "-n", "add", "-net", cidr.String(), "-interface", t.Device)
|
||||
t.l.Debug("command: ", cmd.String())
|
||||
if err = cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to run 'route add': %s", err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("unknown address type %v", cidr)
|
||||
cmd = exec.Command("/sbin/ifconfig", t.Device, "mtu", strconv.Itoa(t.MTU))
|
||||
t.l.Debug("command: ", cmd.String())
|
||||
if err = cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to run 'ifconfig': %s", err)
|
||||
}
|
||||
|
||||
// Unsafe path routes
|
||||
return t.addRoutes(false)
|
||||
}
|
||||
|
||||
func (t *tun) Activate() error {
|
||||
// Setup our default MTU
|
||||
err := t.setMTU()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
linkAddr, err := getLinkAddr(t.Device)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if linkAddr == nil {
|
||||
return fmt.Errorf("unable to discover link_addr for tun interface")
|
||||
}
|
||||
t.linkAddr = linkAddr
|
||||
|
||||
for i := range t.vpnNetworks {
|
||||
err := t.addIp(t.vpnNetworks[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return t.addRoutes(false)
|
||||
}
|
||||
|
||||
func (t *tun) setMTU() error {
|
||||
// Set the MTU on the device
|
||||
s, err := unix.Socket(unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_IP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer syscall.Close(s)
|
||||
|
||||
ifm := ifreqMTU{Name: t.deviceBytes(), MTU: int32(t.MTU)}
|
||||
err = ioctl(uintptr(s), unix.SIOCSIFMTU, uintptr(unsafe.Pointer(&ifm)))
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tun) reload(c *config.C, initial bool) error {
|
||||
@@ -437,7 +242,7 @@ func (t *tun) reload(c *config.C, initial bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tun) RoutesFor(ip netip.Addr) routing.Gateways {
|
||||
func (t *tun) RouteFor(ip netip.Addr) netip.Addr {
|
||||
r, _ := t.routeTree.Load().Lookup(ip)
|
||||
return r
|
||||
}
|
||||
@@ -457,21 +262,20 @@ func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) {
|
||||
func (t *tun) addRoutes(logErrors bool) error {
|
||||
routes := *t.Routes.Load()
|
||||
for _, r := range routes {
|
||||
if len(r.Via) == 0 || !r.Install {
|
||||
if !r.Via.IsValid() || !r.Install {
|
||||
// We don't allow route MTUs so only install routes with a via
|
||||
continue
|
||||
}
|
||||
|
||||
err := addRoute(r.Cidr, t.linkAddr)
|
||||
if err != nil {
|
||||
retErr := util.NewContextualError("Failed to add route", map[string]any{"route": r}, err)
|
||||
cmd := exec.Command("/sbin/route", "-n", "add", "-net", r.Cidr.String(), "-interface", t.Device)
|
||||
t.l.Debug("command: ", cmd.String())
|
||||
if err := cmd.Run(); err != nil {
|
||||
retErr := util.NewContextualError("failed to run 'route add' for unsafe_route", map[string]interface{}{"route": r}, err)
|
||||
if logErrors {
|
||||
retErr.Log(t.l)
|
||||
} else {
|
||||
return retErr
|
||||
}
|
||||
} else {
|
||||
t.l.WithField("route", r).Info("Added route")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -484,8 +288,9 @@ func (t *tun) removeRoutes(routes []Route) error {
|
||||
continue
|
||||
}
|
||||
|
||||
err := delRoute(r.Cidr, t.linkAddr)
|
||||
if err != nil {
|
||||
cmd := exec.Command("/sbin/route", "-n", "delete", "-net", r.Cidr.String(), "-interface", t.Device)
|
||||
t.l.Debug("command: ", cmd.String())
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.l.WithError(err).WithField("route", r).Error("Failed to remove route")
|
||||
} else {
|
||||
t.l.WithField("route", r).Info("Removed route")
|
||||
@@ -500,120 +305,3 @@ func (t *tun) deviceBytes() (o [16]byte) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func addRoute(prefix netip.Prefix, gateway netroute.Addr) error {
|
||||
sock, err := unix.Socket(unix.AF_ROUTE, unix.SOCK_RAW, unix.AF_UNSPEC)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create AF_ROUTE socket: %v", err)
|
||||
}
|
||||
defer unix.Close(sock)
|
||||
|
||||
route := &netroute.RouteMessage{
|
||||
Version: unix.RTM_VERSION,
|
||||
Type: unix.RTM_ADD,
|
||||
Flags: unix.RTF_UP,
|
||||
Seq: 1,
|
||||
}
|
||||
|
||||
if prefix.Addr().Is4() {
|
||||
route.Addrs = []netroute.Addr{
|
||||
unix.RTAX_DST: &netroute.Inet4Addr{IP: prefix.Masked().Addr().As4()},
|
||||
unix.RTAX_NETMASK: &netroute.Inet4Addr{IP: prefixToMask(prefix).As4()},
|
||||
unix.RTAX_GATEWAY: gateway,
|
||||
}
|
||||
} else {
|
||||
route.Addrs = []netroute.Addr{
|
||||
unix.RTAX_DST: &netroute.Inet6Addr{IP: prefix.Masked().Addr().As16()},
|
||||
unix.RTAX_NETMASK: &netroute.Inet6Addr{IP: prefixToMask(prefix).As16()},
|
||||
unix.RTAX_GATEWAY: gateway,
|
||||
}
|
||||
}
|
||||
|
||||
data, err := route.Marshal()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create route.RouteMessage: %w", err)
|
||||
}
|
||||
|
||||
_, err = unix.Write(sock, data[:])
|
||||
if err != nil {
|
||||
if errors.Is(err, unix.EEXIST) {
|
||||
// Try to do a change
|
||||
route.Type = unix.RTM_CHANGE
|
||||
data, err = route.Marshal()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create route.RouteMessage for change: %w", err)
|
||||
}
|
||||
_, err = unix.Write(sock, data[:])
|
||||
fmt.Println("DOING CHANGE")
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("failed to write route.RouteMessage to socket: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func delRoute(prefix netip.Prefix, gateway netroute.Addr) error {
|
||||
sock, err := unix.Socket(unix.AF_ROUTE, unix.SOCK_RAW, unix.AF_UNSPEC)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create AF_ROUTE socket: %v", err)
|
||||
}
|
||||
defer unix.Close(sock)
|
||||
|
||||
route := netroute.RouteMessage{
|
||||
Version: unix.RTM_VERSION,
|
||||
Type: unix.RTM_DELETE,
|
||||
Seq: 1,
|
||||
}
|
||||
|
||||
if prefix.Addr().Is4() {
|
||||
route.Addrs = []netroute.Addr{
|
||||
unix.RTAX_DST: &netroute.Inet4Addr{IP: prefix.Masked().Addr().As4()},
|
||||
unix.RTAX_NETMASK: &netroute.Inet4Addr{IP: prefixToMask(prefix).As4()},
|
||||
unix.RTAX_GATEWAY: gateway,
|
||||
}
|
||||
} else {
|
||||
route.Addrs = []netroute.Addr{
|
||||
unix.RTAX_DST: &netroute.Inet6Addr{IP: prefix.Masked().Addr().As16()},
|
||||
unix.RTAX_NETMASK: &netroute.Inet6Addr{IP: prefixToMask(prefix).As16()},
|
||||
unix.RTAX_GATEWAY: gateway,
|
||||
}
|
||||
}
|
||||
|
||||
data, err := route.Marshal()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create route.RouteMessage: %w", err)
|
||||
}
|
||||
_, err = unix.Write(sock, data[:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write route.RouteMessage to socket: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getLinkAddr Gets the link address for the interface of the given name
|
||||
func getLinkAddr(name string) (*netroute.LinkAddr, error) {
|
||||
rib, err := netroute.FetchRIB(unix.AF_UNSPEC, unix.NET_RT_IFLIST, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
msgs, err := netroute.ParseRIB(unix.NET_RT_IFLIST, rib)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, m := range msgs {
|
||||
switch m := m.(type) {
|
||||
case *netroute.InterfaceMessage:
|
||||
if m.Name == name {
|
||||
sa, ok := m.Addrs[unix.RTAX_IFP].(*netroute.LinkAddr)
|
||||
if ok {
|
||||
return sa, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ import (
|
||||
"github.com/gaissmai/bart"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/routing"
|
||||
"github.com/slackhq/nebula/util"
|
||||
)
|
||||
|
||||
@@ -24,7 +23,7 @@ type tun struct {
|
||||
io.ReadWriteCloser
|
||||
vpnNetworks []netip.Prefix
|
||||
Routes atomic.Pointer[[]Route]
|
||||
routeTree atomic.Pointer[bart.Table[routing.Gateways]]
|
||||
routeTree atomic.Pointer[bart.Table[netip.Addr]]
|
||||
l *logrus.Logger
|
||||
}
|
||||
|
||||
@@ -80,7 +79,7 @@ func (t *tun) reload(c *config.C, initial bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tun) RoutesFor(ip netip.Addr) routing.Gateways {
|
||||
func (t *tun) RouteFor(ip netip.Addr) netip.Addr {
|
||||
r, _ := t.routeTree.Load().Lookup(ip)
|
||||
return r
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ package overlay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/netip"
|
||||
"os"
|
||||
@@ -16,19 +17,14 @@ import (
|
||||
"github.com/gaissmai/bart"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/overlay/vhostnet"
|
||||
"github.com/slackhq/nebula/packet"
|
||||
"github.com/slackhq/nebula/routing"
|
||||
"github.com/slackhq/nebula/util"
|
||||
"github.com/slackhq/nebula/util/virtio"
|
||||
"github.com/vishvananda/netlink"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type tun struct {
|
||||
file *os.File
|
||||
io.ReadWriteCloser
|
||||
fd int
|
||||
vdev []*vhostnet.Device
|
||||
Device string
|
||||
vpnNetworks []netip.Prefix
|
||||
MaxMTU int
|
||||
@@ -37,14 +33,12 @@ type tun struct {
|
||||
deviceIndex int
|
||||
ioctlFd uintptr
|
||||
|
||||
Routes atomic.Pointer[[]Route]
|
||||
routeTree atomic.Pointer[bart.Table[routing.Gateways]]
|
||||
routeChan chan struct{}
|
||||
useSystemRoutes bool
|
||||
useSystemRoutesBufferSize int
|
||||
Routes atomic.Pointer[[]Route]
|
||||
routeTree atomic.Pointer[bart.Table[netip.Addr]]
|
||||
routeChan chan struct{}
|
||||
useSystemRoutes bool
|
||||
|
||||
isV6 bool
|
||||
l *logrus.Logger
|
||||
l *logrus.Logger
|
||||
}
|
||||
|
||||
func (t *tun) Networks() []netip.Prefix {
|
||||
@@ -106,7 +100,7 @@ func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, multiqueu
|
||||
}
|
||||
|
||||
var req ifReq
|
||||
req.Flags = uint16(unix.IFF_TUN | unix.IFF_NO_PI | unix.IFF_TUN_EXCL | unix.IFF_VNET_HDR | unix.IFF_NAPI)
|
||||
req.Flags = uint16(unix.IFF_TUN | unix.IFF_NO_PI)
|
||||
if multiqueue {
|
||||
req.Flags |= unix.IFF_MULTI_QUEUE
|
||||
}
|
||||
@@ -116,56 +110,25 @@ func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, multiqueu
|
||||
}
|
||||
name := strings.Trim(string(req.Name[:]), "\x00")
|
||||
|
||||
if err = unix.SetNonblock(fd, true); err != nil {
|
||||
_ = unix.Close(fd)
|
||||
return nil, fmt.Errorf("make file descriptor non-blocking: %w", err)
|
||||
}
|
||||
|
||||
file := os.NewFile(uintptr(fd), "/dev/net/tun")
|
||||
|
||||
err = unix.IoctlSetPointerInt(fd, unix.TUNSETVNETHDRSZ, virtio.NetHdrSize)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("set vnethdr size: %w", err)
|
||||
}
|
||||
|
||||
flags := 0
|
||||
//flags = //unix.TUN_F_CSUM //| unix.TUN_F_TSO4 | unix.TUN_F_USO4 | unix.TUN_F_TSO6 | unix.TUN_F_USO6
|
||||
err = unix.IoctlSetInt(fd, unix.TUNSETOFFLOAD, flags)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("set offloads: %w", err)
|
||||
}
|
||||
|
||||
t, err := newTunGeneric(c, l, file, vpnNetworks)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.fd = fd
|
||||
t.Device = name
|
||||
|
||||
vdev, err := vhostnet.NewDevice(
|
||||
vhostnet.WithBackendFD(fd),
|
||||
vhostnet.WithQueueSize(8192), //todo config
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.vdev = []*vhostnet.Device{vdev}
|
||||
t.Device = name
|
||||
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func newTunGeneric(c *config.C, l *logrus.Logger, file *os.File, vpnNetworks []netip.Prefix) (*tun, error) {
|
||||
t := &tun{
|
||||
file: file,
|
||||
fd: int(file.Fd()),
|
||||
vpnNetworks: vpnNetworks,
|
||||
TXQueueLen: c.GetInt("tun.tx_queue", 500),
|
||||
useSystemRoutes: c.GetBool("tun.use_system_route_table", false),
|
||||
useSystemRoutesBufferSize: c.GetInt("tun.use_system_route_table_buffer_size", 0),
|
||||
l: l,
|
||||
}
|
||||
if len(vpnNetworks) != 0 {
|
||||
t.isV6 = vpnNetworks[0].Addr().Is6() //todo what about multi-IP?
|
||||
ReadWriteCloser: file,
|
||||
fd: int(file.Fd()),
|
||||
vpnNetworks: vpnNetworks,
|
||||
TXQueueLen: c.GetInt("tun.tx_queue", 500),
|
||||
useSystemRoutes: c.GetBool("tun.use_system_route_table", false),
|
||||
l: l,
|
||||
}
|
||||
|
||||
err := t.reload(c, true)
|
||||
@@ -250,7 +213,7 @@ func (t *tun) reload(c *config.C, initial bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tun) NewMultiQueueReader() (TunDev, error) {
|
||||
func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) {
|
||||
fd, err := unix.Open("/dev/net/tun", os.O_RDWR, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -263,24 +226,39 @@ func (t *tun) NewMultiQueueReader() (TunDev, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
vdev, err := vhostnet.NewDevice(
|
||||
vhostnet.WithBackendFD(fd),
|
||||
vhostnet.WithQueueSize(8192), //todo config
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
file := os.NewFile(uintptr(fd), "/dev/net/tun")
|
||||
|
||||
t.vdev = append(t.vdev, vdev)
|
||||
|
||||
return t, nil
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (t *tun) RoutesFor(ip netip.Addr) routing.Gateways {
|
||||
func (t *tun) RouteFor(ip netip.Addr) netip.Addr {
|
||||
r, _ := t.routeTree.Load().Lookup(ip)
|
||||
return r
|
||||
}
|
||||
|
||||
func (t *tun) Write(b []byte) (int, error) {
|
||||
var nn int
|
||||
maximum := len(b)
|
||||
|
||||
for {
|
||||
n, err := unix.Write(t.fd, b[nn:maximum])
|
||||
if n > 0 {
|
||||
nn += n
|
||||
}
|
||||
if nn == len(b) {
|
||||
return nn, err
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nn, err
|
||||
}
|
||||
|
||||
if n == 0 {
|
||||
return nn, io.ErrUnexpectedEOF
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (t *tun) deviceBytes() (o [16]byte) {
|
||||
for i, c := range t.Device {
|
||||
o[i] = byte(c)
|
||||
@@ -312,6 +290,7 @@ func (t *tun) addIPs(link netlink.Link) error {
|
||||
|
||||
//add all new addresses
|
||||
for i := range newAddrs {
|
||||
//TODO: CERT-V2 do we want to stack errors and try as many ops as possible?
|
||||
//AddrReplace still adds new IPs, but if their properties change it will change them as well
|
||||
if err := netlink.AddrReplace(link, newAddrs[i]); err != nil {
|
||||
return err
|
||||
@@ -379,11 +358,6 @@ func (t *tun) Activate() error {
|
||||
t.l.WithError(err).Error("Failed to set tun tx queue length")
|
||||
}
|
||||
|
||||
const modeNone = 1
|
||||
if err = netlink.LinkSetIP6AddrGenMode(link, modeNone); err != nil {
|
||||
t.l.WithError(err).Warn("Failed to disable link local address generation")
|
||||
}
|
||||
|
||||
if err = t.addIPs(link); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -489,7 +463,7 @@ func (t *tun) addRoutes(logErrors bool) error {
|
||||
|
||||
err := netlink.RouteReplace(&nr)
|
||||
if err != nil {
|
||||
retErr := util.NewContextualError("Failed to add route", map[string]any{"route": r}, err)
|
||||
retErr := util.NewContextualError("Failed to add route", map[string]interface{}{"route": r}, err)
|
||||
if logErrors {
|
||||
retErr.Log(t.l)
|
||||
} else {
|
||||
@@ -556,13 +530,7 @@ func (t *tun) watchRoutes() {
|
||||
rch := make(chan netlink.RouteUpdate)
|
||||
doneChan := make(chan struct{})
|
||||
|
||||
netlinkOptions := netlink.RouteSubscribeOptions{
|
||||
ReceiveBufferSize: t.useSystemRoutesBufferSize,
|
||||
ReceiveBufferForceSize: t.useSystemRoutesBufferSize != 0,
|
||||
ErrorCallback: func(e error) { t.l.WithError(e).Errorf("netlink error") },
|
||||
}
|
||||
|
||||
if err := netlink.RouteSubscribeWithOptions(rch, doneChan, netlinkOptions); err != nil {
|
||||
if err := netlink.RouteSubscribe(rch, doneChan); err != nil {
|
||||
t.l.WithError(err).Errorf("failed to subscribe to system route changes")
|
||||
return
|
||||
}
|
||||
@@ -572,14 +540,8 @@ func (t *tun) watchRoutes() {
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case r, ok := <-rch:
|
||||
if ok {
|
||||
t.updateRoutes(r)
|
||||
} else {
|
||||
// may be should do something here as
|
||||
// netlink stops sending updates
|
||||
return
|
||||
}
|
||||
case r := <-rch:
|
||||
t.updateRoutes(r)
|
||||
case <-doneChan:
|
||||
// netlink.RouteSubscriber will close the rch for us
|
||||
return
|
||||
@@ -588,7 +550,20 @@ func (t *tun) watchRoutes() {
|
||||
}()
|
||||
}
|
||||
|
||||
func (t *tun) isGatewayInVpnNetworks(gwAddr netip.Addr) bool {
|
||||
func (t *tun) updateRoutes(r netlink.RouteUpdate) {
|
||||
if r.Gw == nil {
|
||||
// Not a gateway route, ignore
|
||||
t.l.WithField("route", r).Debug("Ignoring route update, not a gateway route")
|
||||
return
|
||||
}
|
||||
|
||||
gwAddr, ok := netip.AddrFromSlice(r.Gw)
|
||||
if !ok {
|
||||
t.l.WithField("route", r).Debug("Ignoring route update, invalid gateway address")
|
||||
return
|
||||
}
|
||||
|
||||
gwAddr = gwAddr.Unmap()
|
||||
withinNetworks := false
|
||||
for i := range t.vpnNetworks {
|
||||
if t.vpnNetworks[i].Contains(gwAddr) {
|
||||
@@ -596,73 +571,9 @@ func (t *tun) isGatewayInVpnNetworks(gwAddr netip.Addr) bool {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return withinNetworks
|
||||
}
|
||||
|
||||
func (t *tun) getGatewaysFromRoute(r *netlink.Route) routing.Gateways {
|
||||
|
||||
var gateways routing.Gateways
|
||||
|
||||
link, err := netlink.LinkByName(t.Device)
|
||||
if err != nil {
|
||||
t.l.WithField("Devicename", t.Device).Error("Ignoring route update: failed to get link by name")
|
||||
return gateways
|
||||
}
|
||||
|
||||
// If this route is relevant to our interface and there is a gateway then add it
|
||||
if r.LinkIndex == link.Attrs().Index && len(r.Gw) > 0 {
|
||||
gwAddr, ok := netip.AddrFromSlice(r.Gw)
|
||||
if !ok {
|
||||
t.l.WithField("route", r).Debug("Ignoring route update, invalid gateway address")
|
||||
} else {
|
||||
gwAddr = gwAddr.Unmap()
|
||||
|
||||
if !t.isGatewayInVpnNetworks(gwAddr) {
|
||||
// Gateway isn't in our overlay network, ignore
|
||||
t.l.WithField("route", r).Debug("Ignoring route update, not in our network")
|
||||
} else {
|
||||
gateways = append(gateways, routing.NewGateway(gwAddr, 1))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, p := range r.MultiPath {
|
||||
// If this route is relevant to our interface and there is a gateway then add it
|
||||
if p.LinkIndex == link.Attrs().Index && len(p.Gw) > 0 {
|
||||
gwAddr, ok := netip.AddrFromSlice(p.Gw)
|
||||
if !ok {
|
||||
t.l.WithField("route", r).Debug("Ignoring multipath route update, invalid gateway address")
|
||||
} else {
|
||||
gwAddr = gwAddr.Unmap()
|
||||
|
||||
if !t.isGatewayInVpnNetworks(gwAddr) {
|
||||
// Gateway isn't in our overlay network, ignore
|
||||
t.l.WithField("route", r).Debug("Ignoring route update, not in our network")
|
||||
} else {
|
||||
// p.Hops+1 = weight of the route
|
||||
gateways = append(gateways, routing.NewGateway(gwAddr, p.Hops+1))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
routing.CalculateBucketsForGateways(gateways)
|
||||
return gateways
|
||||
}
|
||||
|
||||
func (t *tun) updateRoutes(r netlink.RouteUpdate) {
|
||||
|
||||
gateways := t.getGatewaysFromRoute(&r.Route)
|
||||
|
||||
if len(gateways) == 0 {
|
||||
// No gateways relevant to our network, no routing changes required.
|
||||
t.l.WithField("route", r).Debug("Ignoring route update, no gateways")
|
||||
return
|
||||
}
|
||||
|
||||
if r.Dst == nil {
|
||||
t.l.WithField("route", r).Debug("Ignoring route update, no destination address")
|
||||
if !withinNetworks {
|
||||
// Gateway isn't in our overlay network, ignore
|
||||
t.l.WithField("route", r).Debug("Ignoring route update, not in our networks")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -678,12 +589,12 @@ func (t *tun) updateRoutes(r netlink.RouteUpdate) {
|
||||
newTree := t.routeTree.Load().Clone()
|
||||
|
||||
if r.Type == unix.RTM_NEWROUTE {
|
||||
t.l.WithField("destination", dst).WithField("via", gateways).Info("Adding route")
|
||||
newTree.Insert(dst, gateways)
|
||||
t.l.WithField("destination", r.Dst).WithField("via", r.Gw).Info("Adding route")
|
||||
newTree.Insert(dst, gwAddr)
|
||||
|
||||
} else {
|
||||
t.l.WithField("destination", dst).WithField("via", gateways).Info("Removing route")
|
||||
newTree.Delete(dst)
|
||||
t.l.WithField("destination", r.Dst).WithField("via", r.Gw).Info("Removing route")
|
||||
}
|
||||
t.routeTree.Store(newTree)
|
||||
}
|
||||
@@ -693,14 +604,8 @@ func (t *tun) Close() error {
|
||||
close(t.routeChan)
|
||||
}
|
||||
|
||||
for _, v := range t.vdev {
|
||||
if v != nil {
|
||||
_ = v.Close()
|
||||
}
|
||||
}
|
||||
|
||||
if t.file != nil {
|
||||
_ = t.file.Close()
|
||||
if t.ReadWriteCloser != nil {
|
||||
_ = t.ReadWriteCloser.Close()
|
||||
}
|
||||
|
||||
if t.ioctlFd > 0 {
|
||||
@@ -709,65 +614,3 @@ func (t *tun) Close() error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tun) ReadMany(p []*packet.VirtIOPacket, q int) (int, error) {
|
||||
n, err := t.vdev[q].ReceivePackets(p) //we are TXing
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (t *tun) Write(b []byte) (int, error) {
|
||||
maximum := len(b) //we are RXing
|
||||
|
||||
//todo garbagey
|
||||
out := packet.NewOut()
|
||||
x, err := t.AllocSeg(out, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
copy(out.SegmentPayloads[x], b)
|
||||
err = t.vdev[0].TransmitPacket(out, true)
|
||||
|
||||
if err != nil {
|
||||
t.l.WithError(err).Error("Transmitting packet")
|
||||
return 0, err
|
||||
}
|
||||
return maximum, nil
|
||||
}
|
||||
|
||||
func (t *tun) AllocSeg(pkt *packet.OutPacket, q int) (int, error) {
|
||||
idx, buf, err := t.vdev[q].GetPacketForTx()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
x := pkt.UseSegment(idx, buf, t.isV6)
|
||||
return x, nil
|
||||
}
|
||||
|
||||
func (t *tun) WriteOne(x *packet.OutPacket, kick bool, q int) (int, error) {
|
||||
if err := t.vdev[q].TransmitPacket(x, kick); err != nil {
|
||||
t.l.WithError(err).Error("Transmitting packet")
|
||||
return 0, err
|
||||
}
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
func (t *tun) WriteMany(x []*packet.OutPacket, q int) (int, error) {
|
||||
maximum := len(x) //we are RXing
|
||||
if maximum == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
err := t.vdev[q].TransmitPackets(x)
|
||||
if err != nil {
|
||||
t.l.WithError(err).Error("Transmitting packet")
|
||||
return 0, err
|
||||
}
|
||||
return maximum, nil
|
||||
}
|
||||
|
||||
func (t *tun) RecycleRxSeg(pkt *packet.VirtIOPacket, kick bool, q int) error {
|
||||
return t.vdev[q].ReceiveQueue.OfferDescriptorChains(pkt.Chains, kick)
|
||||
}
|
||||
|
||||
@@ -4,12 +4,13 @@
|
||||
package overlay
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/netip"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
@@ -17,44 +18,12 @@ import (
|
||||
"github.com/gaissmai/bart"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/routing"
|
||||
"github.com/slackhq/nebula/util"
|
||||
netroute "golang.org/x/net/route"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
SIOCAIFADDR_IN6 = 0x8080696b
|
||||
TUNSIFHEAD = 0x80047442
|
||||
TUNSIFMODE = 0x80047458
|
||||
)
|
||||
|
||||
type ifreqAlias4 struct {
|
||||
Name [unix.IFNAMSIZ]byte
|
||||
Addr unix.RawSockaddrInet4
|
||||
DstAddr unix.RawSockaddrInet4
|
||||
MaskAddr unix.RawSockaddrInet4
|
||||
}
|
||||
|
||||
type ifreqAlias6 struct {
|
||||
Name [unix.IFNAMSIZ]byte
|
||||
Addr unix.RawSockaddrInet6
|
||||
DstAddr unix.RawSockaddrInet6
|
||||
PrefixMask unix.RawSockaddrInet6
|
||||
Flags uint32
|
||||
Lifetime addrLifetime
|
||||
}
|
||||
|
||||
type ifreq struct {
|
||||
Name [unix.IFNAMSIZ]byte
|
||||
data int
|
||||
}
|
||||
|
||||
type addrLifetime struct {
|
||||
Expire uint64
|
||||
Preferred uint64
|
||||
Vltime uint32
|
||||
Pltime uint32
|
||||
type ifreqDestroy struct {
|
||||
Name [16]byte
|
||||
pad [16]byte
|
||||
}
|
||||
|
||||
type tun struct {
|
||||
@@ -62,20 +31,42 @@ type tun struct {
|
||||
vpnNetworks []netip.Prefix
|
||||
MTU int
|
||||
Routes atomic.Pointer[[]Route]
|
||||
routeTree atomic.Pointer[bart.Table[routing.Gateways]]
|
||||
routeTree atomic.Pointer[bart.Table[netip.Addr]]
|
||||
l *logrus.Logger
|
||||
f *os.File
|
||||
fd int
|
||||
|
||||
io.ReadWriteCloser
|
||||
}
|
||||
|
||||
var deviceNameRE = regexp.MustCompile(`^tun[0-9]+$`)
|
||||
func (t *tun) Close() error {
|
||||
if t.ReadWriteCloser != nil {
|
||||
if err := t.ReadWriteCloser.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_DGRAM, syscall.IPPROTO_IP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer syscall.Close(s)
|
||||
|
||||
ifreq := ifreqDestroy{Name: t.deviceBytes()}
|
||||
|
||||
err = ioctl(uintptr(s), syscall.SIOCIFDESTROY, uintptr(unsafe.Pointer(&ifreq)))
|
||||
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newTunFromFd(_ *config.C, _ *logrus.Logger, _ int, _ []netip.Prefix) (*tun, error) {
|
||||
return nil, fmt.Errorf("newTunFromFd not supported in NetBSD")
|
||||
}
|
||||
|
||||
var deviceNameRE = regexp.MustCompile(`^tun[0-9]+$`)
|
||||
|
||||
func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (*tun, error) {
|
||||
// Try to open tun device
|
||||
var file *os.File
|
||||
var err error
|
||||
deviceName := c.GetString("tun.dev", "")
|
||||
if deviceName == "" {
|
||||
@@ -85,23 +76,17 @@ func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (
|
||||
return nil, fmt.Errorf("a device name in the format of /dev/tunN must be specified")
|
||||
}
|
||||
|
||||
fd, err := unix.Open("/dev/"+deviceName, os.O_RDWR, 0)
|
||||
file, err = os.OpenFile("/dev/"+deviceName, os.O_RDWR, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = unix.SetNonblock(fd, true)
|
||||
if err != nil {
|
||||
l.WithError(err).Warn("Failed to set the tun device as nonblocking")
|
||||
}
|
||||
|
||||
t := &tun{
|
||||
f: os.NewFile(uintptr(fd), ""),
|
||||
fd: fd,
|
||||
Device: deviceName,
|
||||
vpnNetworks: vpnNetworks,
|
||||
MTU: c.GetInt("tun.mtu", DefaultMTU),
|
||||
l: l,
|
||||
ReadWriteCloser: file,
|
||||
Device: deviceName,
|
||||
vpnNetworks: vpnNetworks,
|
||||
MTU: c.GetInt("tun.mtu", DefaultMTU),
|
||||
l: l,
|
||||
}
|
||||
|
||||
err = t.reload(c, true)
|
||||
@@ -119,225 +104,40 @@ func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (t *tun) Close() error {
|
||||
if t.f != nil {
|
||||
if err := t.f.Close(); err != nil {
|
||||
return fmt.Errorf("error closing tun file: %w", err)
|
||||
}
|
||||
|
||||
// t.f.Close should have handled it for us but let's be extra sure
|
||||
_ = unix.Close(t.fd)
|
||||
|
||||
s, err := syscall.Socket(syscall.AF_INET, syscall.SOCK_DGRAM, syscall.IPPROTO_IP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer syscall.Close(s)
|
||||
|
||||
ifr := ifreq{Name: t.deviceBytes()}
|
||||
err = ioctl(uintptr(s), syscall.SIOCIFDESTROY, uintptr(unsafe.Pointer(&ifr)))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tun) Read(to []byte) (int, error) {
|
||||
rc, err := t.f.SyscallConn()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get syscall conn for tun: %w", err)
|
||||
}
|
||||
|
||||
var errno syscall.Errno
|
||||
var n uintptr
|
||||
err = rc.Read(func(fd uintptr) bool {
|
||||
// first 4 bytes is protocol family, in network byte order
|
||||
head := [4]byte{}
|
||||
iovecs := []syscall.Iovec{
|
||||
{&head[0], 4},
|
||||
{&to[0], uint64(len(to))},
|
||||
}
|
||||
|
||||
n, _, errno = syscall.Syscall(syscall.SYS_READV, fd, uintptr(unsafe.Pointer(&iovecs[0])), uintptr(2))
|
||||
if errno.Temporary() {
|
||||
// We got an EAGAIN, EINTR, or EWOULDBLOCK, go again
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
if err == syscall.EBADF || err.Error() == "use of closed file" {
|
||||
// Go doesn't export poll.ErrFileClosing but happily reports it to us so here we are
|
||||
// https://github.com/golang/go/blob/master/src/internal/poll/fd_poll_runtime.go#L121
|
||||
return 0, os.ErrClosed
|
||||
}
|
||||
return 0, fmt.Errorf("failed to make read call for tun: %w", err)
|
||||
}
|
||||
|
||||
if errno != 0 {
|
||||
return 0, fmt.Errorf("failed to make inner read call for tun: %w", errno)
|
||||
}
|
||||
|
||||
// fix bytes read number to exclude header
|
||||
bytesRead := int(n)
|
||||
if bytesRead < 0 {
|
||||
return bytesRead, nil
|
||||
} else if bytesRead < 4 {
|
||||
return 0, nil
|
||||
} else {
|
||||
return bytesRead - 4, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Write is only valid for single threaded use
|
||||
func (t *tun) Write(from []byte) (int, error) {
|
||||
if len(from) <= 1 {
|
||||
return 0, syscall.EIO
|
||||
}
|
||||
|
||||
ipVer := from[0] >> 4
|
||||
var head [4]byte
|
||||
// first 4 bytes is protocol family, in network byte order
|
||||
if ipVer == 4 {
|
||||
head[3] = syscall.AF_INET
|
||||
} else if ipVer == 6 {
|
||||
head[3] = syscall.AF_INET6
|
||||
} else {
|
||||
return 0, fmt.Errorf("unable to determine IP version from packet")
|
||||
}
|
||||
|
||||
rc, err := t.f.SyscallConn()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var errno syscall.Errno
|
||||
var n uintptr
|
||||
err = rc.Write(func(fd uintptr) bool {
|
||||
iovecs := []syscall.Iovec{
|
||||
{&head[0], 4},
|
||||
{&from[0], uint64(len(from))},
|
||||
}
|
||||
|
||||
n, _, errno = syscall.Syscall(syscall.SYS_WRITEV, fd, uintptr(unsafe.Pointer(&iovecs[0])), uintptr(2))
|
||||
// According to NetBSD documentation for TUN, writes will only return errors in which
|
||||
// this packet will never be delivered so just go on living life.
|
||||
return true
|
||||
})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if errno != 0 {
|
||||
return 0, errno
|
||||
}
|
||||
|
||||
return int(n) - 4, err
|
||||
}
|
||||
|
||||
func (t *tun) addIp(cidr netip.Prefix) error {
|
||||
if cidr.Addr().Is4() {
|
||||
var req ifreqAlias4
|
||||
req.Name = t.deviceBytes()
|
||||
req.Addr = unix.RawSockaddrInet4{
|
||||
Len: unix.SizeofSockaddrInet4,
|
||||
Family: unix.AF_INET,
|
||||
Addr: cidr.Addr().As4(),
|
||||
}
|
||||
req.DstAddr = unix.RawSockaddrInet4{
|
||||
Len: unix.SizeofSockaddrInet4,
|
||||
Family: unix.AF_INET,
|
||||
Addr: cidr.Addr().As4(),
|
||||
}
|
||||
req.MaskAddr = unix.RawSockaddrInet4{
|
||||
Len: unix.SizeofSockaddrInet4,
|
||||
Family: unix.AF_INET,
|
||||
Addr: prefixToMask(cidr).As4(),
|
||||
}
|
||||
var err error
|
||||
|
||||
s, err := unix.Socket(unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_IP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer syscall.Close(s)
|
||||
|
||||
if err := ioctl(uintptr(s), unix.SIOCAIFADDR, uintptr(unsafe.Pointer(&req))); err != nil {
|
||||
return fmt.Errorf("failed to set tun address %s: %s", cidr.Addr(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
// TODO use syscalls instead of exec.Command
|
||||
cmd := exec.Command("/sbin/ifconfig", t.Device, cidr.String(), cidr.Addr().String())
|
||||
t.l.Debug("command: ", cmd.String())
|
||||
if err = cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to run 'ifconfig': %s", err)
|
||||
}
|
||||
|
||||
if cidr.Addr().Is6() {
|
||||
var req ifreqAlias6
|
||||
req.Name = t.deviceBytes()
|
||||
req.Addr = unix.RawSockaddrInet6{
|
||||
Len: unix.SizeofSockaddrInet6,
|
||||
Family: unix.AF_INET6,
|
||||
Addr: cidr.Addr().As16(),
|
||||
}
|
||||
req.PrefixMask = unix.RawSockaddrInet6{
|
||||
Len: unix.SizeofSockaddrInet6,
|
||||
Family: unix.AF_INET6,
|
||||
Addr: prefixToMask(cidr).As16(),
|
||||
}
|
||||
req.Lifetime = addrLifetime{
|
||||
Vltime: 0xffffffff,
|
||||
Pltime: 0xffffffff,
|
||||
}
|
||||
|
||||
s, err := unix.Socket(unix.AF_INET6, unix.SOCK_DGRAM, unix.IPPROTO_IP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer syscall.Close(s)
|
||||
|
||||
if err := ioctl(uintptr(s), SIOCAIFADDR_IN6, uintptr(unsafe.Pointer(&req))); err != nil {
|
||||
return fmt.Errorf("failed to set tun address %s: %s", cidr.Addr().String(), err)
|
||||
}
|
||||
return nil
|
||||
cmd = exec.Command("/sbin/route", "-n", "add", "-net", cidr.String(), cidr.Addr().String())
|
||||
t.l.Debug("command: ", cmd.String())
|
||||
if err = cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to run 'route add': %s", err)
|
||||
}
|
||||
|
||||
return fmt.Errorf("unknown address type %v", cidr)
|
||||
}
|
||||
|
||||
func (t *tun) Activate() error {
|
||||
mode := int32(unix.IFF_BROADCAST)
|
||||
err := ioctl(uintptr(t.fd), TUNSIFMODE, uintptr(unsafe.Pointer(&mode)))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set tun device mode: %w", err)
|
||||
}
|
||||
|
||||
v := 1
|
||||
err = ioctl(uintptr(t.fd), TUNSIFHEAD, uintptr(unsafe.Pointer(&v)))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set tun device head: %w", err)
|
||||
}
|
||||
|
||||
err = t.doIoctlByName(unix.SIOCSIFMTU, uint32(t.MTU))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set tun mtu: %w", err)
|
||||
}
|
||||
|
||||
for i := range t.vpnNetworks {
|
||||
err = t.addIp(t.vpnNetworks[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cmd = exec.Command("/sbin/ifconfig", t.Device, "mtu", strconv.Itoa(t.MTU))
|
||||
t.l.Debug("command: ", cmd.String())
|
||||
if err = cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to run 'ifconfig': %s", err)
|
||||
}
|
||||
|
||||
// Unsafe path routes
|
||||
return t.addRoutes(false)
|
||||
}
|
||||
|
||||
func (t *tun) doIoctlByName(ctl uintptr, value uint32) error {
|
||||
s, err := unix.Socket(unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_IP)
|
||||
if err != nil {
|
||||
return err
|
||||
func (t *tun) Activate() error {
|
||||
for i := range t.vpnNetworks {
|
||||
err := t.addIp(t.vpnNetworks[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
defer syscall.Close(s)
|
||||
|
||||
ir := ifreq{Name: t.deviceBytes(), data: int(value)}
|
||||
err = ioctl(uintptr(s), ctl, uintptr(unsafe.Pointer(&ir)))
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tun) reload(c *config.C, initial bool) error {
|
||||
@@ -377,7 +177,7 @@ func (t *tun) reload(c *config.C, initial bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tun) RoutesFor(ip netip.Addr) routing.Gateways {
|
||||
func (t *tun) RouteFor(ip netip.Addr) netip.Addr {
|
||||
r, _ := t.routeTree.Load().Lookup(ip)
|
||||
return r
|
||||
}
|
||||
@@ -396,23 +196,21 @@ func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) {
|
||||
|
||||
func (t *tun) addRoutes(logErrors bool) error {
|
||||
routes := *t.Routes.Load()
|
||||
|
||||
for _, r := range routes {
|
||||
if len(r.Via) == 0 || !r.Install {
|
||||
if !r.Via.IsValid() || !r.Install {
|
||||
// We don't allow route MTUs so only install routes with a via
|
||||
continue
|
||||
}
|
||||
|
||||
err := addRoute(r.Cidr, t.vpnNetworks)
|
||||
if err != nil {
|
||||
retErr := util.NewContextualError("Failed to add route", map[string]any{"route": r}, err)
|
||||
cmd := exec.Command("/sbin/route", "-n", "add", "-net", r.Cidr.String(), t.vpnNetworks[0].Addr().String())
|
||||
t.l.Debug("command: ", cmd.String())
|
||||
if err := cmd.Run(); err != nil {
|
||||
retErr := util.NewContextualError("failed to run 'route add' for unsafe_route", map[string]interface{}{"route": r}, err)
|
||||
if logErrors {
|
||||
retErr.Log(t.l)
|
||||
} else {
|
||||
return retErr
|
||||
}
|
||||
} else {
|
||||
t.l.WithField("route", r).Info("Added route")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -425,8 +223,10 @@ func (t *tun) removeRoutes(routes []Route) error {
|
||||
continue
|
||||
}
|
||||
|
||||
err := delRoute(r.Cidr, t.vpnNetworks)
|
||||
if err != nil {
|
||||
//TODO: CERT-V2 is this right?
|
||||
cmd := exec.Command("/sbin/route", "-n", "delete", "-net", r.Cidr.String(), t.vpnNetworks[0].Addr().String())
|
||||
t.l.Debug("command: ", cmd.String())
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.l.WithError(err).WithField("route", r).Error("Failed to remove route")
|
||||
} else {
|
||||
t.l.WithField("route", r).Info("Removed route")
|
||||
@@ -441,109 +241,3 @@ func (t *tun) deviceBytes() (o [16]byte) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func addRoute(prefix netip.Prefix, gateways []netip.Prefix) error {
|
||||
sock, err := unix.Socket(unix.AF_ROUTE, unix.SOCK_RAW, unix.AF_UNSPEC)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create AF_ROUTE socket: %v", err)
|
||||
}
|
||||
defer unix.Close(sock)
|
||||
|
||||
route := &netroute.RouteMessage{
|
||||
Version: unix.RTM_VERSION,
|
||||
Type: unix.RTM_ADD,
|
||||
Flags: unix.RTF_UP | unix.RTF_GATEWAY,
|
||||
Seq: 1,
|
||||
}
|
||||
|
||||
if prefix.Addr().Is4() {
|
||||
gw, err := selectGateway(prefix, gateways)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
route.Addrs = []netroute.Addr{
|
||||
unix.RTAX_DST: &netroute.Inet4Addr{IP: prefix.Masked().Addr().As4()},
|
||||
unix.RTAX_NETMASK: &netroute.Inet4Addr{IP: prefixToMask(prefix).As4()},
|
||||
unix.RTAX_GATEWAY: &netroute.Inet4Addr{IP: gw.Addr().As4()},
|
||||
}
|
||||
} else {
|
||||
gw, err := selectGateway(prefix, gateways)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
route.Addrs = []netroute.Addr{
|
||||
unix.RTAX_DST: &netroute.Inet6Addr{IP: prefix.Masked().Addr().As16()},
|
||||
unix.RTAX_NETMASK: &netroute.Inet6Addr{IP: prefixToMask(prefix).As16()},
|
||||
unix.RTAX_GATEWAY: &netroute.Inet6Addr{IP: gw.Addr().As16()},
|
||||
}
|
||||
}
|
||||
|
||||
data, err := route.Marshal()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create route.RouteMessage: %w", err)
|
||||
}
|
||||
|
||||
_, err = unix.Write(sock, data[:])
|
||||
if err != nil {
|
||||
if errors.Is(err, unix.EEXIST) {
|
||||
// Try to do a change
|
||||
route.Type = unix.RTM_CHANGE
|
||||
data, err = route.Marshal()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create route.RouteMessage for change: %w", err)
|
||||
}
|
||||
_, err = unix.Write(sock, data[:])
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("failed to write route.RouteMessage to socket: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func delRoute(prefix netip.Prefix, gateways []netip.Prefix) error {
|
||||
sock, err := unix.Socket(unix.AF_ROUTE, unix.SOCK_RAW, unix.AF_UNSPEC)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create AF_ROUTE socket: %v", err)
|
||||
}
|
||||
defer unix.Close(sock)
|
||||
|
||||
route := netroute.RouteMessage{
|
||||
Version: unix.RTM_VERSION,
|
||||
Type: unix.RTM_DELETE,
|
||||
Seq: 1,
|
||||
}
|
||||
|
||||
if prefix.Addr().Is4() {
|
||||
gw, err := selectGateway(prefix, gateways)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
route.Addrs = []netroute.Addr{
|
||||
unix.RTAX_DST: &netroute.Inet4Addr{IP: prefix.Masked().Addr().As4()},
|
||||
unix.RTAX_NETMASK: &netroute.Inet4Addr{IP: prefixToMask(prefix).As4()},
|
||||
unix.RTAX_GATEWAY: &netroute.Inet4Addr{IP: gw.Addr().As4()},
|
||||
}
|
||||
} else {
|
||||
gw, err := selectGateway(prefix, gateways)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
route.Addrs = []netroute.Addr{
|
||||
unix.RTAX_DST: &netroute.Inet6Addr{IP: prefix.Masked().Addr().As16()},
|
||||
unix.RTAX_NETMASK: &netroute.Inet6Addr{IP: prefixToMask(prefix).As16()},
|
||||
unix.RTAX_GATEWAY: &netroute.Inet6Addr{IP: gw.Addr().As16()},
|
||||
}
|
||||
}
|
||||
|
||||
data, err := route.Marshal()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create route.RouteMessage: %w", err)
|
||||
}
|
||||
_, err = unix.Write(sock, data[:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write route.RouteMessage to socket: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,97 +4,71 @@
|
||||
package overlay
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/netip"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/gaissmai/bart"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/routing"
|
||||
"github.com/slackhq/nebula/util"
|
||||
netroute "golang.org/x/net/route"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
SIOCAIFADDR_IN6 = 0x8080691a
|
||||
)
|
||||
|
||||
type ifreqAlias4 struct {
|
||||
Name [unix.IFNAMSIZ]byte
|
||||
Addr unix.RawSockaddrInet4
|
||||
DstAddr unix.RawSockaddrInet4
|
||||
MaskAddr unix.RawSockaddrInet4
|
||||
}
|
||||
|
||||
type ifreqAlias6 struct {
|
||||
Name [unix.IFNAMSIZ]byte
|
||||
Addr unix.RawSockaddrInet6
|
||||
DstAddr unix.RawSockaddrInet6
|
||||
PrefixMask unix.RawSockaddrInet6
|
||||
Flags uint32
|
||||
Lifetime [2]uint32
|
||||
}
|
||||
|
||||
type ifreq struct {
|
||||
Name [unix.IFNAMSIZ]byte
|
||||
data int
|
||||
}
|
||||
|
||||
type tun struct {
|
||||
Device string
|
||||
vpnNetworks []netip.Prefix
|
||||
MTU int
|
||||
Routes atomic.Pointer[[]Route]
|
||||
routeTree atomic.Pointer[bart.Table[routing.Gateways]]
|
||||
routeTree atomic.Pointer[bart.Table[netip.Addr]]
|
||||
l *logrus.Logger
|
||||
f *os.File
|
||||
fd int
|
||||
|
||||
io.ReadWriteCloser
|
||||
|
||||
// cache out buffer since we need to prepend 4 bytes for tun metadata
|
||||
out []byte
|
||||
}
|
||||
|
||||
var deviceNameRE = regexp.MustCompile(`^tun[0-9]+$`)
|
||||
func (t *tun) Close() error {
|
||||
if t.ReadWriteCloser != nil {
|
||||
return t.ReadWriteCloser.Close()
|
||||
}
|
||||
|
||||
func newTunFromFd(_ *config.C, _ *logrus.Logger, _ int, _ []netip.Prefix) (*tun, error) {
|
||||
return nil, fmt.Errorf("newTunFromFd not supported in openbsd")
|
||||
return nil
|
||||
}
|
||||
|
||||
func newTunFromFd(_ *config.C, _ *logrus.Logger, _ int, _ []netip.Prefix) (*tun, error) {
|
||||
return nil, fmt.Errorf("newTunFromFd not supported in OpenBSD")
|
||||
}
|
||||
|
||||
var deviceNameRE = regexp.MustCompile(`^tun[0-9]+$`)
|
||||
|
||||
func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (*tun, error) {
|
||||
// Try to open tun device
|
||||
var err error
|
||||
deviceName := c.GetString("tun.dev", "")
|
||||
if deviceName == "" {
|
||||
return nil, fmt.Errorf("a device name in the format of /dev/tunN must be specified")
|
||||
}
|
||||
if !deviceNameRE.MatchString(deviceName) {
|
||||
return nil, fmt.Errorf("a device name in the format of /dev/tunN must be specified")
|
||||
return nil, fmt.Errorf("a device name in the format of tunN must be specified")
|
||||
}
|
||||
|
||||
fd, err := unix.Open("/dev/"+deviceName, os.O_RDWR, 0)
|
||||
if !deviceNameRE.MatchString(deviceName) {
|
||||
return nil, fmt.Errorf("a device name in the format of tunN must be specified")
|
||||
}
|
||||
|
||||
file, err := os.OpenFile("/dev/"+deviceName, os.O_RDWR, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = unix.SetNonblock(fd, true)
|
||||
if err != nil {
|
||||
l.WithError(err).Warn("Failed to set the tun device as nonblocking")
|
||||
}
|
||||
|
||||
t := &tun{
|
||||
f: os.NewFile(uintptr(fd), ""),
|
||||
fd: fd,
|
||||
Device: deviceName,
|
||||
vpnNetworks: vpnNetworks,
|
||||
MTU: c.GetInt("tun.mtu", DefaultMTU),
|
||||
l: l,
|
||||
ReadWriteCloser: file,
|
||||
Device: deviceName,
|
||||
vpnNetworks: vpnNetworks,
|
||||
MTU: c.GetInt("tun.mtu", DefaultMTU),
|
||||
l: l,
|
||||
}
|
||||
|
||||
err = t.reload(c, true)
|
||||
@@ -112,154 +86,6 @@ func newTun(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, _ bool) (
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (t *tun) Close() error {
|
||||
if t.f != nil {
|
||||
if err := t.f.Close(); err != nil {
|
||||
return fmt.Errorf("error closing tun file: %w", err)
|
||||
}
|
||||
|
||||
// t.f.Close should have handled it for us but let's be extra sure
|
||||
_ = unix.Close(t.fd)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tun) Read(to []byte) (int, error) {
|
||||
buf := make([]byte, len(to)+4)
|
||||
|
||||
n, err := t.f.Read(buf)
|
||||
|
||||
copy(to, buf[4:])
|
||||
return n - 4, err
|
||||
}
|
||||
|
||||
// Write is only valid for single threaded use
|
||||
func (t *tun) Write(from []byte) (int, error) {
|
||||
buf := t.out
|
||||
if cap(buf) < len(from)+4 {
|
||||
buf = make([]byte, len(from)+4)
|
||||
t.out = buf
|
||||
}
|
||||
buf = buf[:len(from)+4]
|
||||
|
||||
if len(from) == 0 {
|
||||
return 0, syscall.EIO
|
||||
}
|
||||
|
||||
// Determine the IP Family for the NULL L2 Header
|
||||
ipVer := from[0] >> 4
|
||||
if ipVer == 4 {
|
||||
buf[3] = syscall.AF_INET
|
||||
} else if ipVer == 6 {
|
||||
buf[3] = syscall.AF_INET6
|
||||
} else {
|
||||
return 0, fmt.Errorf("unable to determine IP version from packet")
|
||||
}
|
||||
|
||||
copy(buf[4:], from)
|
||||
|
||||
n, err := t.f.Write(buf)
|
||||
return n - 4, err
|
||||
}
|
||||
|
||||
func (t *tun) addIp(cidr netip.Prefix) error {
|
||||
if cidr.Addr().Is4() {
|
||||
var req ifreqAlias4
|
||||
req.Name = t.deviceBytes()
|
||||
req.Addr = unix.RawSockaddrInet4{
|
||||
Len: unix.SizeofSockaddrInet4,
|
||||
Family: unix.AF_INET,
|
||||
Addr: cidr.Addr().As4(),
|
||||
}
|
||||
req.DstAddr = unix.RawSockaddrInet4{
|
||||
Len: unix.SizeofSockaddrInet4,
|
||||
Family: unix.AF_INET,
|
||||
Addr: cidr.Addr().As4(),
|
||||
}
|
||||
req.MaskAddr = unix.RawSockaddrInet4{
|
||||
Len: unix.SizeofSockaddrInet4,
|
||||
Family: unix.AF_INET,
|
||||
Addr: prefixToMask(cidr).As4(),
|
||||
}
|
||||
|
||||
s, err := unix.Socket(unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_IP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer syscall.Close(s)
|
||||
|
||||
if err := ioctl(uintptr(s), unix.SIOCAIFADDR, uintptr(unsafe.Pointer(&req))); err != nil {
|
||||
return fmt.Errorf("failed to set tun address %s: %s", cidr.Addr(), err)
|
||||
}
|
||||
|
||||
err = addRoute(cidr, t.vpnNetworks)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set route for vpn network %v: %w", cidr, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
if cidr.Addr().Is6() {
|
||||
var req ifreqAlias6
|
||||
req.Name = t.deviceBytes()
|
||||
req.Addr = unix.RawSockaddrInet6{
|
||||
Len: unix.SizeofSockaddrInet6,
|
||||
Family: unix.AF_INET6,
|
||||
Addr: cidr.Addr().As16(),
|
||||
}
|
||||
req.PrefixMask = unix.RawSockaddrInet6{
|
||||
Len: unix.SizeofSockaddrInet6,
|
||||
Family: unix.AF_INET6,
|
||||
Addr: prefixToMask(cidr).As16(),
|
||||
}
|
||||
req.Lifetime[0] = 0xffffffff
|
||||
req.Lifetime[1] = 0xffffffff
|
||||
|
||||
s, err := unix.Socket(unix.AF_INET6, unix.SOCK_DGRAM, unix.IPPROTO_IP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer syscall.Close(s)
|
||||
|
||||
if err := ioctl(uintptr(s), SIOCAIFADDR_IN6, uintptr(unsafe.Pointer(&req))); err != nil {
|
||||
return fmt.Errorf("failed to set tun address %s: %s", cidr.Addr().String(), err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unknown address type %v", cidr)
|
||||
}
|
||||
|
||||
func (t *tun) Activate() error {
|
||||
err := t.doIoctlByName(unix.SIOCSIFMTU, uint32(t.MTU))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set tun mtu: %w", err)
|
||||
}
|
||||
|
||||
for i := range t.vpnNetworks {
|
||||
err = t.addIp(t.vpnNetworks[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return t.addRoutes(false)
|
||||
}
|
||||
|
||||
func (t *tun) doIoctlByName(ctl uintptr, value uint32) error {
|
||||
s, err := unix.Socket(unix.AF_INET, unix.SOCK_DGRAM, unix.IPPROTO_IP)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer syscall.Close(s)
|
||||
|
||||
ir := ifreq{Name: t.deviceBytes(), data: int(value)}
|
||||
err = ioctl(uintptr(s), ctl, uintptr(unsafe.Pointer(&ir)))
|
||||
return err
|
||||
}
|
||||
|
||||
func (t *tun) reload(c *config.C, initial bool) error {
|
||||
change, routes, err := getAllRoutesFromConfig(c, t.vpnNetworks, initial)
|
||||
if err != nil {
|
||||
@@ -297,42 +123,63 @@ func (t *tun) reload(c *config.C, initial bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tun) RoutesFor(ip netip.Addr) routing.Gateways {
|
||||
func (t *tun) addIp(cidr netip.Prefix) error {
|
||||
var err error
|
||||
// TODO use syscalls instead of exec.Command
|
||||
cmd := exec.Command("/sbin/ifconfig", t.Device, cidr.String(), cidr.Addr().String())
|
||||
t.l.Debug("command: ", cmd.String())
|
||||
if err = cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to run 'ifconfig': %s", err)
|
||||
}
|
||||
|
||||
cmd = exec.Command("/sbin/ifconfig", t.Device, "mtu", strconv.Itoa(t.MTU))
|
||||
t.l.Debug("command: ", cmd.String())
|
||||
if err = cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to run 'ifconfig': %s", err)
|
||||
}
|
||||
|
||||
cmd = exec.Command("/sbin/route", "-n", "add", "-inet", cidr.String(), cidr.Addr().String())
|
||||
t.l.Debug("command: ", cmd.String())
|
||||
if err = cmd.Run(); err != nil {
|
||||
return fmt.Errorf("failed to run 'route add': %s", err)
|
||||
}
|
||||
|
||||
// Unsafe path routes
|
||||
return t.addRoutes(false)
|
||||
}
|
||||
|
||||
func (t *tun) Activate() error {
|
||||
for i := range t.vpnNetworks {
|
||||
err := t.addIp(t.vpnNetworks[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tun) RouteFor(ip netip.Addr) netip.Addr {
|
||||
r, _ := t.routeTree.Load().Lookup(ip)
|
||||
return r
|
||||
}
|
||||
|
||||
func (t *tun) Networks() []netip.Prefix {
|
||||
return t.vpnNetworks
|
||||
}
|
||||
|
||||
func (t *tun) Name() string {
|
||||
return t.Device
|
||||
}
|
||||
|
||||
func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) {
|
||||
return nil, fmt.Errorf("TODO: multiqueue not implemented for openbsd")
|
||||
}
|
||||
|
||||
func (t *tun) addRoutes(logErrors bool) error {
|
||||
routes := *t.Routes.Load()
|
||||
|
||||
for _, r := range routes {
|
||||
if len(r.Via) == 0 || !r.Install {
|
||||
if !r.Via.IsValid() || !r.Install {
|
||||
// We don't allow route MTUs so only install routes with a via
|
||||
continue
|
||||
}
|
||||
|
||||
err := addRoute(r.Cidr, t.vpnNetworks)
|
||||
if err != nil {
|
||||
retErr := util.NewContextualError("Failed to add route", map[string]any{"route": r}, err)
|
||||
//TODO: CERT-V2 is this right?
|
||||
cmd := exec.Command("/sbin/route", "-n", "add", "-inet", r.Cidr.String(), t.vpnNetworks[0].Addr().String())
|
||||
t.l.Debug("command: ", cmd.String())
|
||||
if err := cmd.Run(); err != nil {
|
||||
retErr := util.NewContextualError("failed to run 'route add' for unsafe_route", map[string]interface{}{"route": r}, err)
|
||||
if logErrors {
|
||||
retErr.Log(t.l)
|
||||
} else {
|
||||
return retErr
|
||||
}
|
||||
} else {
|
||||
t.l.WithField("route", r).Info("Added route")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -344,9 +191,10 @@ func (t *tun) removeRoutes(routes []Route) error {
|
||||
if !r.Install {
|
||||
continue
|
||||
}
|
||||
|
||||
err := delRoute(r.Cidr, t.vpnNetworks)
|
||||
if err != nil {
|
||||
//TODO: CERT-V2 is this right?
|
||||
cmd := exec.Command("/sbin/route", "-n", "delete", "-inet", r.Cidr.String(), t.vpnNetworks[0].Addr().String())
|
||||
t.l.Debug("command: ", cmd.String())
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.l.WithError(err).WithField("route", r).Error("Failed to remove route")
|
||||
} else {
|
||||
t.l.WithField("route", r).Info("Removed route")
|
||||
@@ -355,115 +203,52 @@ func (t *tun) removeRoutes(routes []Route) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *tun) deviceBytes() (o [16]byte) {
|
||||
for i, c := range t.Device {
|
||||
o[i] = byte(c)
|
||||
}
|
||||
return
|
||||
func (t *tun) Networks() []netip.Prefix {
|
||||
return t.vpnNetworks
|
||||
}
|
||||
|
||||
func addRoute(prefix netip.Prefix, gateways []netip.Prefix) error {
|
||||
sock, err := unix.Socket(unix.AF_ROUTE, unix.SOCK_RAW, unix.AF_UNSPEC)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create AF_ROUTE socket: %v", err)
|
||||
}
|
||||
defer unix.Close(sock)
|
||||
func (t *tun) Name() string {
|
||||
return t.Device
|
||||
}
|
||||
|
||||
route := &netroute.RouteMessage{
|
||||
Version: unix.RTM_VERSION,
|
||||
Type: unix.RTM_ADD,
|
||||
Flags: unix.RTF_UP | unix.RTF_GATEWAY,
|
||||
Seq: 1,
|
||||
func (t *tun) NewMultiQueueReader() (io.ReadWriteCloser, error) {
|
||||
return nil, fmt.Errorf("TODO: multiqueue not implemented for freebsd")
|
||||
}
|
||||
|
||||
func (t *tun) Read(to []byte) (int, error) {
|
||||
buf := make([]byte, len(to)+4)
|
||||
|
||||
n, err := t.ReadWriteCloser.Read(buf)
|
||||
|
||||
copy(to, buf[4:])
|
||||
return n - 4, err
|
||||
}
|
||||
|
||||
// Write is only valid for single threaded use
|
||||
func (t *tun) Write(from []byte) (int, error) {
|
||||
buf := t.out
|
||||
if cap(buf) < len(from)+4 {
|
||||
buf = make([]byte, len(from)+4)
|
||||
t.out = buf
|
||||
}
|
||||
buf = buf[:len(from)+4]
|
||||
|
||||
if len(from) == 0 {
|
||||
return 0, syscall.EIO
|
||||
}
|
||||
|
||||
if prefix.Addr().Is4() {
|
||||
gw, err := selectGateway(prefix, gateways)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
route.Addrs = []netroute.Addr{
|
||||
unix.RTAX_DST: &netroute.Inet4Addr{IP: prefix.Masked().Addr().As4()},
|
||||
unix.RTAX_NETMASK: &netroute.Inet4Addr{IP: prefixToMask(prefix).As4()},
|
||||
unix.RTAX_GATEWAY: &netroute.Inet4Addr{IP: gw.Addr().As4()},
|
||||
}
|
||||
// Determine the IP Family for the NULL L2 Header
|
||||
ipVer := from[0] >> 4
|
||||
if ipVer == 4 {
|
||||
buf[3] = syscall.AF_INET
|
||||
} else if ipVer == 6 {
|
||||
buf[3] = syscall.AF_INET6
|
||||
} else {
|
||||
gw, err := selectGateway(prefix, gateways)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
route.Addrs = []netroute.Addr{
|
||||
unix.RTAX_DST: &netroute.Inet6Addr{IP: prefix.Masked().Addr().As16()},
|
||||
unix.RTAX_NETMASK: &netroute.Inet6Addr{IP: prefixToMask(prefix).As16()},
|
||||
unix.RTAX_GATEWAY: &netroute.Inet6Addr{IP: gw.Addr().As16()},
|
||||
}
|
||||
return 0, fmt.Errorf("unable to determine IP version from packet")
|
||||
}
|
||||
|
||||
data, err := route.Marshal()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create route.RouteMessage: %w", err)
|
||||
}
|
||||
copy(buf[4:], from)
|
||||
|
||||
_, err = unix.Write(sock, data[:])
|
||||
if err != nil {
|
||||
if errors.Is(err, unix.EEXIST) {
|
||||
// Try to do a change
|
||||
route.Type = unix.RTM_CHANGE
|
||||
data, err = route.Marshal()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create route.RouteMessage for change: %w", err)
|
||||
}
|
||||
_, err = unix.Write(sock, data[:])
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("failed to write route.RouteMessage to socket: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func delRoute(prefix netip.Prefix, gateways []netip.Prefix) error {
|
||||
sock, err := unix.Socket(unix.AF_ROUTE, unix.SOCK_RAW, unix.AF_UNSPEC)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create AF_ROUTE socket: %v", err)
|
||||
}
|
||||
defer unix.Close(sock)
|
||||
|
||||
route := netroute.RouteMessage{
|
||||
Version: unix.RTM_VERSION,
|
||||
Type: unix.RTM_DELETE,
|
||||
Seq: 1,
|
||||
}
|
||||
|
||||
if prefix.Addr().Is4() {
|
||||
gw, err := selectGateway(prefix, gateways)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
route.Addrs = []netroute.Addr{
|
||||
unix.RTAX_DST: &netroute.Inet4Addr{IP: prefix.Masked().Addr().As4()},
|
||||
unix.RTAX_NETMASK: &netroute.Inet4Addr{IP: prefixToMask(prefix).As4()},
|
||||
unix.RTAX_GATEWAY: &netroute.Inet4Addr{IP: gw.Addr().As4()},
|
||||
}
|
||||
} else {
|
||||
gw, err := selectGateway(prefix, gateways)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
route.Addrs = []netroute.Addr{
|
||||
unix.RTAX_DST: &netroute.Inet6Addr{IP: prefix.Masked().Addr().As16()},
|
||||
unix.RTAX_NETMASK: &netroute.Inet6Addr{IP: prefixToMask(prefix).As16()},
|
||||
unix.RTAX_GATEWAY: &netroute.Inet6Addr{IP: gw.Addr().As16()},
|
||||
}
|
||||
}
|
||||
|
||||
data, err := route.Marshal()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create route.RouteMessage: %w", err)
|
||||
}
|
||||
_, err = unix.Write(sock, data[:])
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write route.RouteMessage to socket: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
n, err := t.ReadWriteCloser.Write(buf)
|
||||
return n - 4, err
|
||||
}
|
||||
|
||||
@@ -13,14 +13,13 @@ import (
|
||||
"github.com/gaissmai/bart"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/routing"
|
||||
)
|
||||
|
||||
type TestTun struct {
|
||||
Device string
|
||||
vpnNetworks []netip.Prefix
|
||||
Routes []Route
|
||||
routeTree *bart.Table[routing.Gateways]
|
||||
routeTree *bart.Table[netip.Addr]
|
||||
l *logrus.Logger
|
||||
|
||||
closed atomic.Bool
|
||||
@@ -87,7 +86,7 @@ func (t *TestTun) Get(block bool) []byte {
|
||||
// Below this is boilerplate implementation to make nebula actually work
|
||||
//********************************************************************************************************************//
|
||||
|
||||
func (t *TestTun) RoutesFor(ip netip.Addr) routing.Gateways {
|
||||
func (t *TestTun) RouteFor(ip netip.Addr) netip.Addr {
|
||||
r, _ := t.routeTree.Lookup(ip)
|
||||
return r
|
||||
}
|
||||
|
||||
@@ -18,7 +18,6 @@ import (
|
||||
"github.com/gaissmai/bart"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/routing"
|
||||
"github.com/slackhq/nebula/util"
|
||||
"github.com/slackhq/nebula/wintun"
|
||||
"golang.org/x/sys/windows"
|
||||
@@ -32,7 +31,7 @@ type winTun struct {
|
||||
vpnNetworks []netip.Prefix
|
||||
MTU int
|
||||
Routes atomic.Pointer[[]Route]
|
||||
routeTree atomic.Pointer[bart.Table[routing.Gateways]]
|
||||
routeTree atomic.Pointer[bart.Table[netip.Addr]]
|
||||
l *logrus.Logger
|
||||
|
||||
tun *wintun.NativeTun
|
||||
@@ -148,18 +147,15 @@ func (t *winTun) addRoutes(logErrors bool) error {
|
||||
foundDefault4 := false
|
||||
|
||||
for _, r := range routes {
|
||||
if len(r.Via) == 0 || !r.Install {
|
||||
if !r.Via.IsValid() || !r.Install {
|
||||
// We don't allow route MTUs so only install routes with a via
|
||||
continue
|
||||
}
|
||||
|
||||
// Add our unsafe route
|
||||
// Windows does not support multipath routes natively, so we install only a single route.
|
||||
// This is not a problem as traffic will always be sent to Nebula which handles the multipath routing internally.
|
||||
// In effect this provides multipath routing support to windows supporting loadbalancing and redundancy.
|
||||
err := luid.AddRoute(r.Cidr, r.Via[0].Addr(), uint32(r.Metric))
|
||||
err := luid.AddRoute(r.Cidr, r.Via, uint32(r.Metric))
|
||||
if err != nil {
|
||||
retErr := util.NewContextualError("Failed to add route", map[string]any{"route": r}, err)
|
||||
retErr := util.NewContextualError("Failed to add route", map[string]interface{}{"route": r}, err)
|
||||
if logErrors {
|
||||
retErr.Log(t.l)
|
||||
continue
|
||||
@@ -202,8 +198,7 @@ func (t *winTun) removeRoutes(routes []Route) error {
|
||||
continue
|
||||
}
|
||||
|
||||
// See comment on luid.AddRoute
|
||||
err := luid.DeleteRoute(r.Cidr, r.Via[0].Addr())
|
||||
err := luid.DeleteRoute(r.Cidr, r.Via)
|
||||
if err != nil {
|
||||
t.l.WithError(err).WithField("route", r).Error("Failed to remove route")
|
||||
} else {
|
||||
@@ -213,7 +208,7 @@ func (t *winTun) removeRoutes(routes []Route) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *winTun) RoutesFor(ip netip.Addr) routing.Gateways {
|
||||
func (t *winTun) RouteFor(ip netip.Addr) netip.Addr {
|
||||
r, _ := t.routeTree.Load().Lookup(ip)
|
||||
return r
|
||||
}
|
||||
|
||||
@@ -1,14 +1,11 @@
|
||||
package overlay
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/netip"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/packet"
|
||||
"github.com/slackhq/nebula/routing"
|
||||
)
|
||||
|
||||
func NewUserDeviceFromConfig(c *config.C, l *logrus.Logger, vpnNetworks []netip.Prefix, routines int) (Device, error) {
|
||||
@@ -38,21 +35,13 @@ type UserDevice struct {
|
||||
inboundWriter *io.PipeWriter
|
||||
}
|
||||
|
||||
func (d *UserDevice) RecycleRxSeg(pkt *packet.VirtIOPacket, kick bool, q int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *UserDevice) Activate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *UserDevice) Networks() []netip.Prefix { return d.vpnNetworks }
|
||||
func (d *UserDevice) Name() string { return "faketun0" }
|
||||
func (d *UserDevice) RoutesFor(ip netip.Addr) routing.Gateways {
|
||||
return routing.Gateways{routing.NewGateway(ip, 1)}
|
||||
}
|
||||
|
||||
func (d *UserDevice) NewMultiQueueReader() (TunDev, error) {
|
||||
func (d *UserDevice) Networks() []netip.Prefix { return d.vpnNetworks }
|
||||
func (d *UserDevice) Name() string { return "faketun0" }
|
||||
func (d *UserDevice) RouteFor(ip netip.Addr) netip.Addr { return ip }
|
||||
func (d *UserDevice) NewMultiQueueReader() (io.ReadWriteCloser, error) {
|
||||
return d, nil
|
||||
}
|
||||
|
||||
@@ -71,19 +60,3 @@ func (d *UserDevice) Close() error {
|
||||
d.outboundWriter.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *UserDevice) ReadMany(b []*packet.VirtIOPacket, _ int) (int, error) {
|
||||
return d.Read(b[0].Payload)
|
||||
}
|
||||
|
||||
func (d *UserDevice) AllocSeg(pkt *packet.OutPacket, q int) (int, error) {
|
||||
return 0, fmt.Errorf("user: AllocSeg not implemented")
|
||||
}
|
||||
|
||||
func (d *UserDevice) WriteOne(x *packet.OutPacket, kick bool, q int) (int, error) {
|
||||
return 0, fmt.Errorf("user: WriteOne not implemented")
|
||||
}
|
||||
|
||||
func (d *UserDevice) WriteMany(x []*packet.OutPacket, q int) (int, error) {
|
||||
return 0, fmt.Errorf("user: WriteMany not implemented")
|
||||
}
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
Significant portions of this code are derived from https://pkg.go.dev/github.com/hetznercloud/virtio-go
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 Hetzner Cloud GmbH
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -1,4 +0,0 @@
|
||||
// Package vhost implements the basic ioctl requests needed to interact with the
|
||||
// kernel-level virtio server that provides accelerated virtio devices for
|
||||
// networking and more.
|
||||
package vhost
|
||||
@@ -1,218 +0,0 @@
|
||||
package vhost
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"github.com/slackhq/nebula/overlay/virtqueue"
|
||||
"github.com/slackhq/nebula/util/virtio"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
// vhostIoctlGetFeatures can be used to retrieve the features supported by
|
||||
// the vhost implementation in the kernel.
|
||||
//
|
||||
// Response payload: [virtio.Feature]
|
||||
// Kernel name: VHOST_GET_FEATURES
|
||||
vhostIoctlGetFeatures = 0x8008af00
|
||||
|
||||
// vhostIoctlSetFeatures can be used to communicate the features supported
|
||||
// by this virtio implementation to the kernel.
|
||||
//
|
||||
// Request payload: [virtio.Feature]
|
||||
// Kernel name: VHOST_SET_FEATURES
|
||||
vhostIoctlSetFeatures = 0x4008af00
|
||||
|
||||
// vhostIoctlSetOwner can be used to set the current process as the
|
||||
// exclusive owner of a control file descriptor.
|
||||
//
|
||||
// Request payload: none
|
||||
// Kernel name: VHOST_SET_OWNER
|
||||
vhostIoctlSetOwner = 0x0000af01
|
||||
|
||||
// vhostIoctlSetMemoryLayout can be used to set up or modify the memory
|
||||
// layout which describes the IOTLB mappings in the kernel.
|
||||
//
|
||||
// Request payload: [MemoryLayout] with custom serialization
|
||||
// Kernel name: VHOST_SET_MEM_TABLE
|
||||
vhostIoctlSetMemoryLayout = 0x4008af03
|
||||
|
||||
// vhostIoctlSetQueueSize can be used to set the size of the virtqueue.
|
||||
//
|
||||
// Request payload: [QueueState]
|
||||
// Kernel name: VHOST_SET_VRING_NUM
|
||||
vhostIoctlSetQueueSize = 0x4008af10
|
||||
|
||||
// vhostIoctlSetQueueAddress can be used to set the addresses of the
|
||||
// different parts of the virtqueue.
|
||||
//
|
||||
// Request payload: [QueueAddresses]
|
||||
// Kernel name: VHOST_SET_VRING_ADDR
|
||||
vhostIoctlSetQueueAddress = 0x4028af11
|
||||
|
||||
// vhostIoctlSetAvailableRingBase can be used to set the index of the next
|
||||
// available ring entry the device will process.
|
||||
//
|
||||
// Request payload: [QueueState]
|
||||
// Kernel name: VHOST_SET_VRING_BASE
|
||||
vhostIoctlSetAvailableRingBase = 0x4008af12
|
||||
|
||||
// vhostIoctlSetQueueKickEventFD can be used to set the event file
|
||||
// descriptor to signal the device when descriptor chains were added to the
|
||||
// available ring.
|
||||
//
|
||||
// Request payload: [QueueFile]
|
||||
// Kernel name: VHOST_SET_VRING_KICK
|
||||
vhostIoctlSetQueueKickEventFD = 0x4008af20
|
||||
|
||||
// vhostIoctlSetQueueCallEventFD can be used to set the event file
|
||||
// descriptor that gets signaled by the device when descriptor chains have
|
||||
// been used by it.
|
||||
//
|
||||
// Request payload: [QueueFile]
|
||||
// Kernel name: VHOST_SET_VRING_CALL
|
||||
vhostIoctlSetQueueCallEventFD = 0x4008af21
|
||||
)
|
||||
|
||||
// QueueState is an ioctl request payload that can hold a queue index and any
|
||||
// 32-bit number.
|
||||
//
|
||||
// Kernel name: vhost_vring_state
|
||||
type QueueState struct {
|
||||
// QueueIndex is the index of the virtqueue.
|
||||
QueueIndex uint32
|
||||
// Num is any 32-bit number, depending on the request.
|
||||
Num uint32
|
||||
}
|
||||
|
||||
// QueueAddresses is an ioctl request payload that can hold the addresses of the
|
||||
// different parts of a virtqueue.
|
||||
//
|
||||
// Kernel name: vhost_vring_addr
|
||||
type QueueAddresses struct {
|
||||
// QueueIndex is the index of the virtqueue.
|
||||
QueueIndex uint32
|
||||
// Flags that are not used in this implementation.
|
||||
Flags uint32
|
||||
// DescriptorTableAddress is the address of the descriptor table in user
|
||||
// space memory. It must be 16-byte aligned.
|
||||
DescriptorTableAddress uintptr
|
||||
// UsedRingAddress is the address of the used ring in user space memory. It
|
||||
// must be 4-byte aligned.
|
||||
UsedRingAddress uintptr
|
||||
// AvailableRingAddress is the address of the available ring in user space
|
||||
// memory. It must be 2-byte aligned.
|
||||
AvailableRingAddress uintptr
|
||||
// LogAddress is used for an optional logging support, not supported by this
|
||||
// implementation.
|
||||
LogAddress uintptr
|
||||
}
|
||||
|
||||
// QueueFile is an ioctl request payload that can hold a queue index and a file
|
||||
// descriptor.
|
||||
//
|
||||
// Kernel name: vhost_vring_file
|
||||
type QueueFile struct {
|
||||
// QueueIndex is the index of the virtqueue.
|
||||
QueueIndex uint32
|
||||
// FD is the file descriptor of the file. Pass -1 to unbind from a file.
|
||||
FD int32
|
||||
}
|
||||
|
||||
// IoctlPtr is a copy of the similarly named unexported function from the Go
|
||||
// unix package. This is needed to do custom ioctl requests not supported by the
|
||||
// standard library.
|
||||
func IoctlPtr(fd int, req uint, arg unsafe.Pointer) error {
|
||||
_, _, err := unix.Syscall(unix.SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg))
|
||||
if err != 0 {
|
||||
return fmt.Errorf("ioctl request %d: %w", req, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetFeatures requests the supported feature bits from the virtio device
|
||||
// associated with the given control file descriptor.
|
||||
func GetFeatures(controlFD int) (virtio.Feature, error) {
|
||||
var features virtio.Feature
|
||||
if err := IoctlPtr(controlFD, vhostIoctlGetFeatures, unsafe.Pointer(&features)); err != nil {
|
||||
return 0, fmt.Errorf("get features: %w", err)
|
||||
}
|
||||
return features, nil
|
||||
}
|
||||
|
||||
// SetFeatures communicates the feature bits supported by this implementation
|
||||
// to the virtio device associated with the given control file descriptor.
|
||||
func SetFeatures(controlFD int, features virtio.Feature) error {
|
||||
if err := IoctlPtr(controlFD, vhostIoctlSetFeatures, unsafe.Pointer(&features)); err != nil {
|
||||
return fmt.Errorf("set features: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OwnControlFD sets the current process as the exclusive owner for the
|
||||
// given control file descriptor. This must be called before interacting with
|
||||
// the control file descriptor in any other way.
|
||||
func OwnControlFD(controlFD int) error {
|
||||
if err := IoctlPtr(controlFD, vhostIoctlSetOwner, unsafe.Pointer(nil)); err != nil {
|
||||
return fmt.Errorf("set control file descriptor owner: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetMemoryLayout sets up or modifies the memory layout for the kernel-level
|
||||
// virtio device associated with the given control file descriptor.
|
||||
func SetMemoryLayout(controlFD int, layout MemoryLayout) error {
|
||||
payload := layout.serializePayload()
|
||||
if err := IoctlPtr(controlFD, vhostIoctlSetMemoryLayout, unsafe.Pointer(&payload[0])); err != nil {
|
||||
return fmt.Errorf("set memory layout: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RegisterQueue registers a virtio queue with the kernel-level virtio server.
|
||||
// The virtqueue will be linked to the given control file descriptor and will
|
||||
// have the given index. The kernel will use this queue until the control file
|
||||
// descriptor is closed.
|
||||
func RegisterQueue(controlFD int, queueIndex uint32, queue *virtqueue.SplitQueue) error {
|
||||
if err := IoctlPtr(controlFD, vhostIoctlSetQueueSize, unsafe.Pointer(&QueueState{
|
||||
QueueIndex: queueIndex,
|
||||
Num: uint32(queue.Size()),
|
||||
})); err != nil {
|
||||
return fmt.Errorf("set queue size: %w", err)
|
||||
}
|
||||
|
||||
if err := IoctlPtr(controlFD, vhostIoctlSetQueueAddress, unsafe.Pointer(&QueueAddresses{
|
||||
QueueIndex: queueIndex,
|
||||
Flags: 0,
|
||||
DescriptorTableAddress: queue.DescriptorTable().Address(),
|
||||
UsedRingAddress: queue.UsedRing().Address(),
|
||||
AvailableRingAddress: queue.AvailableRing().Address(),
|
||||
LogAddress: 0,
|
||||
})); err != nil {
|
||||
return fmt.Errorf("set queue addresses: %w", err)
|
||||
}
|
||||
|
||||
if err := IoctlPtr(controlFD, vhostIoctlSetAvailableRingBase, unsafe.Pointer(&QueueState{
|
||||
QueueIndex: queueIndex,
|
||||
Num: 0,
|
||||
})); err != nil {
|
||||
return fmt.Errorf("set available ring base: %w", err)
|
||||
}
|
||||
|
||||
if err := IoctlPtr(controlFD, vhostIoctlSetQueueKickEventFD, unsafe.Pointer(&QueueFile{
|
||||
QueueIndex: queueIndex,
|
||||
FD: int32(queue.KickEventFD()),
|
||||
})); err != nil {
|
||||
return fmt.Errorf("set kick event file descriptor: %w", err)
|
||||
}
|
||||
|
||||
if err := IoctlPtr(controlFD, vhostIoctlSetQueueCallEventFD, unsafe.Pointer(&QueueFile{
|
||||
QueueIndex: queueIndex,
|
||||
FD: int32(queue.CallEventFD()),
|
||||
})); err != nil {
|
||||
return fmt.Errorf("set call event file descriptor: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
package vhost_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"github.com/slackhq/nebula/overlay/vhost"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestQueueState_Size(t *testing.T) {
|
||||
assert.EqualValues(t, 8, unsafe.Sizeof(vhost.QueueState{}))
|
||||
}
|
||||
|
||||
func TestQueueAddresses_Size(t *testing.T) {
|
||||
assert.EqualValues(t, 40, unsafe.Sizeof(vhost.QueueAddresses{}))
|
||||
}
|
||||
|
||||
func TestQueueFile_Size(t *testing.T) {
|
||||
assert.EqualValues(t, 8, unsafe.Sizeof(vhost.QueueFile{}))
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
package vhost
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"github.com/slackhq/nebula/overlay/virtqueue"
|
||||
)
|
||||
|
||||
// MemoryRegion describes a region of userspace memory which is being made
|
||||
// accessible to a vhost device.
|
||||
//
|
||||
// Kernel name: vhost_memory_region
|
||||
type MemoryRegion struct {
|
||||
// GuestPhysicalAddress is the physical address of the memory region within
|
||||
// the guest, when virtualization is used. When no virtualization is used,
|
||||
// this should be the same as UserspaceAddress.
|
||||
GuestPhysicalAddress uintptr
|
||||
// Size is the size of the memory region.
|
||||
Size uint64
|
||||
// UserspaceAddress is the virtual address in the userspace of the host
|
||||
// where the memory region can be found.
|
||||
UserspaceAddress uintptr
|
||||
// Padding and room for flags. Currently unused.
|
||||
_ uint64
|
||||
}
|
||||
|
||||
// MemoryLayout is a list of [MemoryRegion]s.
|
||||
type MemoryLayout []MemoryRegion
|
||||
|
||||
// NewMemoryLayoutForQueues returns a new [MemoryLayout] that describes the
|
||||
// memory pages used by the descriptor tables of the given queues.
|
||||
func NewMemoryLayoutForQueues(queues []*virtqueue.SplitQueue) MemoryLayout {
|
||||
regions := make([]MemoryRegion, 0)
|
||||
for _, queue := range queues {
|
||||
for address, size := range queue.DescriptorTable().BufferAddresses() {
|
||||
regions = append(regions, MemoryRegion{
|
||||
// There is no virtualization in play here, so the guest address
|
||||
// is the same as in the host's userspace.
|
||||
GuestPhysicalAddress: address,
|
||||
Size: uint64(size),
|
||||
UserspaceAddress: address,
|
||||
})
|
||||
}
|
||||
}
|
||||
return regions
|
||||
}
|
||||
|
||||
// serializePayload serializes the list of memory regions into a format that is
|
||||
// compatible to the vhost_memory kernel struct. The returned byte slice can be
|
||||
// used as a payload for the vhostIoctlSetMemoryLayout ioctl.
|
||||
func (regions MemoryLayout) serializePayload() []byte {
|
||||
regionCount := len(regions)
|
||||
regionSize := int(unsafe.Sizeof(MemoryRegion{}))
|
||||
payload := make([]byte, 8+regionCount*regionSize)
|
||||
|
||||
// The first 32 bits contain the number of memory regions. The following 32
|
||||
// bits are padding.
|
||||
binary.LittleEndian.PutUint32(payload[0:4], uint32(regionCount))
|
||||
|
||||
if regionCount > 0 {
|
||||
// The underlying byte array of the slice should already have the correct
|
||||
// format, so just copy that.
|
||||
copied := copy(payload[8:], unsafe.Slice((*byte)(unsafe.Pointer(®ions[0])), regionCount*regionSize))
|
||||
if copied != regionCount*regionSize {
|
||||
panic(fmt.Sprintf("copied only %d bytes of the memory regions, but expected %d",
|
||||
copied, regionCount*regionSize))
|
||||
}
|
||||
}
|
||||
|
||||
return payload
|
||||
}
|
||||
@@ -1,42 +0,0 @@
|
||||
package vhost
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestMemoryRegion_Size(t *testing.T) {
|
||||
assert.EqualValues(t, 32, unsafe.Sizeof(MemoryRegion{}))
|
||||
}
|
||||
|
||||
func TestMemoryLayout_SerializePayload(t *testing.T) {
|
||||
layout := MemoryLayout([]MemoryRegion{
|
||||
{
|
||||
GuestPhysicalAddress: 42,
|
||||
Size: 100,
|
||||
UserspaceAddress: 142,
|
||||
}, {
|
||||
GuestPhysicalAddress: 99,
|
||||
Size: 100,
|
||||
UserspaceAddress: 99,
|
||||
},
|
||||
})
|
||||
payload := layout.serializePayload()
|
||||
|
||||
assert.Equal(t, []byte{
|
||||
0x02, 0x00, 0x00, 0x00, // nregions
|
||||
0x00, 0x00, 0x00, 0x00, // padding
|
||||
// region 0
|
||||
0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // guest_phys_addr
|
||||
0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // memory_size
|
||||
0x8e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // userspace_addr
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // flags_padding
|
||||
// region 1
|
||||
0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // guest_phys_addr
|
||||
0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // memory_size
|
||||
0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // userspace_addr
|
||||
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // flags_padding
|
||||
}, payload)
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
Significant portions of this code are derived from https://pkg.go.dev/github.com/hetznercloud/virtio-go
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 Hetzner Cloud GmbH
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -1,427 +0,0 @@
|
||||
package vhostnet
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/slackhq/nebula/overlay/vhost"
|
||||
"github.com/slackhq/nebula/overlay/virtqueue"
|
||||
"github.com/slackhq/nebula/packet"
|
||||
"github.com/slackhq/nebula/util/virtio"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// ErrDeviceClosed is returned when the [Device] is closed while operations are
|
||||
// still running.
|
||||
var ErrDeviceClosed = errors.New("device was closed")
|
||||
|
||||
// The indexes for the receive and transmit queues.
|
||||
const (
|
||||
receiveQueueIndex = 0
|
||||
transmitQueueIndex = 1
|
||||
)
|
||||
|
||||
// Device represents a vhost networking device within the kernel-level virtio
|
||||
// implementation and provides methods to interact with it.
|
||||
type Device struct {
|
||||
initialized bool
|
||||
controlFD int
|
||||
|
||||
fullTable bool
|
||||
ReceiveQueue *virtqueue.SplitQueue
|
||||
TransmitQueue *virtqueue.SplitQueue
|
||||
}
|
||||
|
||||
// NewDevice initializes a new vhost networking device within the
|
||||
// kernel-level virtio implementation, sets up the virtqueues and returns a
|
||||
// [Device] instance that can be used to communicate with that vhost device.
|
||||
//
|
||||
// There are multiple options that can be passed to this constructor to
|
||||
// influence device creation:
|
||||
// - [WithQueueSize]
|
||||
// - [WithBackendFD]
|
||||
// - [WithBackendDevice]
|
||||
//
|
||||
// Remember to call [Device.Close] after use to free up resources.
|
||||
func NewDevice(options ...Option) (*Device, error) {
|
||||
var err error
|
||||
opts := optionDefaults
|
||||
opts.apply(options)
|
||||
if err = opts.validate(); err != nil {
|
||||
return nil, fmt.Errorf("invalid options: %w", err)
|
||||
}
|
||||
|
||||
dev := Device{
|
||||
controlFD: -1,
|
||||
}
|
||||
|
||||
// Clean up a partially initialized device when something fails.
|
||||
defer func() {
|
||||
if err != nil {
|
||||
_ = dev.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Retrieve a new control file descriptor. This will be used to configure
|
||||
// the vhost networking device in the kernel.
|
||||
dev.controlFD, err = unix.Open("/dev/vhost-net", os.O_RDWR, 0666)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get control file descriptor: %w", err)
|
||||
}
|
||||
if err = vhost.OwnControlFD(dev.controlFD); err != nil {
|
||||
return nil, fmt.Errorf("own control file descriptor: %w", err)
|
||||
}
|
||||
|
||||
// Advertise the supported features. This isn't much for now.
|
||||
// TODO: Add feature options and implement proper feature negotiation.
|
||||
getFeatures, err := vhost.GetFeatures(dev.controlFD) //0x1033D008000 but why
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("get features: %w", err)
|
||||
}
|
||||
if getFeatures == 0 {
|
||||
|
||||
}
|
||||
//const funky = virtio.Feature(1 << 27)
|
||||
//features := virtio.FeatureVersion1 | funky // | todo virtio.FeatureNetMergeRXBuffers
|
||||
features := virtio.FeatureVersion1 | virtio.FeatureNetMergeRXBuffers
|
||||
if err = vhost.SetFeatures(dev.controlFD, features); err != nil {
|
||||
return nil, fmt.Errorf("set features: %w", err)
|
||||
}
|
||||
|
||||
itemSize := os.Getpagesize() * 4 //todo config
|
||||
|
||||
// Initialize and register the queues needed for the networking device.
|
||||
if dev.ReceiveQueue, err = createQueue(dev.controlFD, receiveQueueIndex, opts.queueSize, itemSize); err != nil {
|
||||
return nil, fmt.Errorf("create receive queue: %w", err)
|
||||
}
|
||||
if dev.TransmitQueue, err = createQueue(dev.controlFD, transmitQueueIndex, opts.queueSize, itemSize); err != nil {
|
||||
return nil, fmt.Errorf("create transmit queue: %w", err)
|
||||
}
|
||||
|
||||
// Set up memory mappings for all buffers used by the queues. This has to
|
||||
// happen before a backend for the queues can be registered.
|
||||
memoryLayout := vhost.NewMemoryLayoutForQueues(
|
||||
[]*virtqueue.SplitQueue{dev.ReceiveQueue, dev.TransmitQueue},
|
||||
)
|
||||
if err = vhost.SetMemoryLayout(dev.controlFD, memoryLayout); err != nil {
|
||||
return nil, fmt.Errorf("setup memory layout: %w", err)
|
||||
}
|
||||
|
||||
// Set the queue backends. This activates the queues within the kernel.
|
||||
if err = SetQueueBackend(dev.controlFD, receiveQueueIndex, opts.backendFD); err != nil {
|
||||
return nil, fmt.Errorf("set receive queue backend: %w", err)
|
||||
}
|
||||
if err = SetQueueBackend(dev.controlFD, transmitQueueIndex, opts.backendFD); err != nil {
|
||||
return nil, fmt.Errorf("set transmit queue backend: %w", err)
|
||||
}
|
||||
|
||||
// Fully populate the receive queue with available buffers which the device
|
||||
// can write new packets into.
|
||||
if err = dev.refillReceiveQueue(); err != nil {
|
||||
return nil, fmt.Errorf("refill receive queue: %w", err)
|
||||
}
|
||||
if err = dev.refillTransmitQueue(); err != nil {
|
||||
return nil, fmt.Errorf("refill receive queue: %w", err)
|
||||
}
|
||||
|
||||
dev.initialized = true
|
||||
|
||||
// Make sure to clean up even when the device gets garbage collected without
|
||||
// Close being called first.
|
||||
devPtr := &dev
|
||||
runtime.SetFinalizer(devPtr, (*Device).Close)
|
||||
|
||||
return devPtr, nil
|
||||
}
|
||||
|
||||
// refillReceiveQueue offers as many new device-writable buffers to the device
|
||||
// as the queue can fit. The device will then use these to write received
|
||||
// packets.
|
||||
func (dev *Device) refillReceiveQueue() error {
|
||||
for {
|
||||
_, err := dev.ReceiveQueue.OfferInDescriptorChains()
|
||||
if err != nil {
|
||||
if errors.Is(err, virtqueue.ErrNotEnoughFreeDescriptors) {
|
||||
// Queue is full, job is done.
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("offer descriptor chain: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (dev *Device) refillTransmitQueue() error {
|
||||
//for {
|
||||
// desc, err := dev.TransmitQueue.DescriptorTable().CreateDescriptorForOutputs()
|
||||
// if err != nil {
|
||||
// if errors.Is(err, virtqueue.ErrNotEnoughFreeDescriptors) {
|
||||
// // Queue is full, job is done.
|
||||
// return nil
|
||||
// }
|
||||
// return fmt.Errorf("offer descriptor chain: %w", err)
|
||||
// } else {
|
||||
// dev.TransmitQueue.UsedRing().InitOfferSingle(desc, 0)
|
||||
// }
|
||||
//}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close cleans up the vhost networking device within the kernel and releases
|
||||
// all resources used for it.
|
||||
// The implementation will try to release as many resources as possible and
|
||||
// collect potential errors before returning them.
|
||||
func (dev *Device) Close() error {
|
||||
dev.initialized = false
|
||||
|
||||
// Closing the control file descriptor will unregister all queues from the
|
||||
// kernel.
|
||||
if dev.controlFD >= 0 {
|
||||
if err := unix.Close(dev.controlFD); err != nil {
|
||||
// Return an error and do not continue, because the memory used for
|
||||
// the queues should not be released before they were unregistered
|
||||
// from the kernel.
|
||||
return fmt.Errorf("close control file descriptor: %w", err)
|
||||
}
|
||||
dev.controlFD = -1
|
||||
}
|
||||
|
||||
var errs []error
|
||||
|
||||
if dev.ReceiveQueue != nil {
|
||||
if err := dev.ReceiveQueue.Close(); err == nil {
|
||||
dev.ReceiveQueue = nil
|
||||
} else {
|
||||
errs = append(errs, fmt.Errorf("close receive queue: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
if dev.TransmitQueue != nil {
|
||||
if err := dev.TransmitQueue.Close(); err == nil {
|
||||
dev.TransmitQueue = nil
|
||||
} else {
|
||||
errs = append(errs, fmt.Errorf("close transmit queue: %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) == 0 {
|
||||
// Everything was cleaned up. No need to run the finalizer anymore.
|
||||
runtime.SetFinalizer(dev, nil)
|
||||
}
|
||||
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
// ensureInitialized is used as a guard to prevent methods to be called on an
|
||||
// uninitialized instance.
|
||||
func (dev *Device) ensureInitialized() {
|
||||
if !dev.initialized {
|
||||
panic("device is not initialized")
|
||||
}
|
||||
}
|
||||
|
||||
// createQueue creates a new virtqueue and registers it with the vhost device
|
||||
// using the given index.
|
||||
func createQueue(controlFD int, queueIndex int, queueSize int, itemSize int) (*virtqueue.SplitQueue, error) {
|
||||
var (
|
||||
queue *virtqueue.SplitQueue
|
||||
err error
|
||||
)
|
||||
if queue, err = virtqueue.NewSplitQueue(queueSize, itemSize); err != nil {
|
||||
return nil, fmt.Errorf("create virtqueue: %w", err)
|
||||
}
|
||||
if err = vhost.RegisterQueue(controlFD, uint32(queueIndex), queue); err != nil {
|
||||
return nil, fmt.Errorf("register virtqueue with index %d: %w", queueIndex, err)
|
||||
}
|
||||
return queue, nil
|
||||
}
|
||||
|
||||
// truncateBuffers returns a new list of buffers whose combined length matches
|
||||
// exactly the specified length. When the specified length exceeds the length of
|
||||
// the buffers, this is an error. When it is smaller, the buffer list will be
|
||||
// truncated accordingly.
|
||||
func truncateBuffers(buffers [][]byte, length int) (out [][]byte) {
|
||||
for _, buffer := range buffers {
|
||||
if length < len(buffer) {
|
||||
out = append(out, buffer[:length])
|
||||
return
|
||||
}
|
||||
out = append(out, buffer)
|
||||
length -= len(buffer)
|
||||
}
|
||||
if length > 0 {
|
||||
panic("length exceeds the combined length of all buffers")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (dev *Device) GetPacketForTx() (uint16, []byte, error) {
|
||||
var err error
|
||||
var idx uint16
|
||||
if !dev.fullTable {
|
||||
|
||||
idx, err = dev.TransmitQueue.DescriptorTable().CreateDescriptorForOutputs()
|
||||
if err == virtqueue.ErrNotEnoughFreeDescriptors {
|
||||
dev.fullTable = true
|
||||
idx, err = dev.TransmitQueue.TakeSingle(context.TODO())
|
||||
}
|
||||
} else {
|
||||
idx, err = dev.TransmitQueue.TakeSingle(context.TODO())
|
||||
}
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("transmit queue: %w", err)
|
||||
}
|
||||
buf, err := dev.TransmitQueue.GetDescriptorItem(idx)
|
||||
if err != nil {
|
||||
return 0, nil, fmt.Errorf("get descriptor chain: %w", err)
|
||||
}
|
||||
return idx, buf, nil
|
||||
}
|
||||
|
||||
func (dev *Device) TransmitPacket(pkt *packet.OutPacket, kick bool) error {
|
||||
if len(pkt.SegmentIDs) == 0 {
|
||||
return nil
|
||||
}
|
||||
for idx := range pkt.SegmentIDs {
|
||||
segmentID := pkt.SegmentIDs[idx]
|
||||
dev.TransmitQueue.SetDescSize(segmentID, len(pkt.Segments[idx]))
|
||||
}
|
||||
err := dev.TransmitQueue.OfferDescriptorChains(pkt.SegmentIDs, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("offer descriptor chains: %w", err)
|
||||
}
|
||||
pkt.Reset()
|
||||
if kick {
|
||||
if err := dev.TransmitQueue.Kick(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dev *Device) TransmitPackets(pkts []*packet.OutPacket) error {
|
||||
if len(pkts) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := range pkts {
|
||||
if err := dev.TransmitPacket(pkts[i], false); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := dev.TransmitQueue.Kick(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: Make above methods cancelable by taking a context.Context argument?
|
||||
// TODO: Implement zero-copy variants to transmit and receive packets?
|
||||
|
||||
// processChains processes as many chains as needed to create one packet. The number of processed chains is returned.
|
||||
func (dev *Device) processChains(pkt *packet.VirtIOPacket, chains []virtqueue.UsedElement) (int, error) {
|
||||
//read first element to see how many descriptors we need:
|
||||
pkt.Reset()
|
||||
|
||||
err := dev.ReceiveQueue.GetDescriptorInbuffers(uint16(chains[0].DescriptorIndex), &pkt.ChainRefs)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("get descriptor chain: %w", err)
|
||||
}
|
||||
if len(pkt.ChainRefs) == 0 {
|
||||
return 1, nil
|
||||
}
|
||||
|
||||
// The specification requires that the first descriptor chain starts
|
||||
// with a virtio-net header. It is not clear, whether it is also
|
||||
// required to be fully contained in the first buffer of that
|
||||
// descriptor chain, but it is reasonable to assume that this is
|
||||
// always the case.
|
||||
// The decode method already does the buffer length check.
|
||||
if err = pkt.Header.Decode(pkt.ChainRefs[0][0:]); err != nil {
|
||||
// The device misbehaved. There is no way we can gracefully
|
||||
// recover from this, because we don't know how many of the
|
||||
// following descriptor chains belong to this packet.
|
||||
return 0, fmt.Errorf("decode vnethdr: %w", err)
|
||||
}
|
||||
|
||||
//we have the header now: what do we need to do?
|
||||
if int(pkt.Header.NumBuffers) > len(chains) {
|
||||
return 0, fmt.Errorf("number of buffers is greater than number of chains %d", len(chains))
|
||||
}
|
||||
if int(pkt.Header.NumBuffers) != 1 {
|
||||
return 0, fmt.Errorf("too smol-brain to handle more than one chain right now: %d chains", len(chains))
|
||||
}
|
||||
if chains[0].Length > 16000 {
|
||||
//todo!
|
||||
return 1, fmt.Errorf("too big packet length: %d", chains[0].Length)
|
||||
}
|
||||
|
||||
//shift the buffer out of out:
|
||||
pkt.Payload = pkt.ChainRefs[0][virtio.NetHdrSize:chains[0].Length]
|
||||
pkt.Chains = append(pkt.Chains, uint16(chains[0].DescriptorIndex))
|
||||
return 1, nil
|
||||
|
||||
//cursor := n - virtio.NetHdrSize
|
||||
//
|
||||
//if uint32(n) >= chains[0].Length && pkt.Header.NumBuffers == 1 {
|
||||
// pkt.Payload = pkt.Payload[:chains[0].Length-virtio.NetHdrSize]
|
||||
// return 1, nil
|
||||
//}
|
||||
//
|
||||
//i := 1
|
||||
//// we used chain 0 already
|
||||
//for i = 1; i < len(chains); i++ {
|
||||
// n, err = dev.ReceiveQueue.GetDescriptorChainContents(uint16(chains[i].DescriptorIndex), pkt.Payload[cursor:], int(chains[i].Length))
|
||||
// if err != nil {
|
||||
// // When this fails we may miss to free some descriptor chains. We
|
||||
// // could try to mitigate this by deferring the freeing somehow, but
|
||||
// // it's not worth the hassle. When this method fails, the queue will
|
||||
// // be in a broken state anyway.
|
||||
// return i, fmt.Errorf("get descriptor chain: %w", err)
|
||||
// }
|
||||
// cursor += n
|
||||
//}
|
||||
////todo this has to be wrong
|
||||
//pkt.Payload = pkt.Payload[:cursor]
|
||||
//return i, nil
|
||||
}
|
||||
|
||||
func (dev *Device) ReceivePackets(out []*packet.VirtIOPacket) (int, error) {
|
||||
//todo optimize?
|
||||
var chains []virtqueue.UsedElement
|
||||
var err error
|
||||
//if len(dev.extraRx) == 0 {
|
||||
chains, err = dev.ReceiveQueue.BlockAndGetHeadsCapped(context.TODO(), len(out))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(chains) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
//} else {
|
||||
// chains = dev.extraRx
|
||||
//}
|
||||
|
||||
numPackets := 0
|
||||
chainsIdx := 0
|
||||
for numPackets = 0; chainsIdx < len(chains); numPackets++ {
|
||||
if numPackets >= len(out) {
|
||||
return numPackets, fmt.Errorf("dropping %d packets, no room", len(chains)-numPackets)
|
||||
}
|
||||
numChains, err := dev.processChains(out[numPackets], chains[chainsIdx:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
chainsIdx += numChains
|
||||
}
|
||||
|
||||
// Now that we have copied all buffers, we can recycle the used descriptor chains
|
||||
//if err = dev.ReceiveQueue.OfferDescriptorChains(chains); err != nil {
|
||||
// return 0, err
|
||||
//}
|
||||
|
||||
return numPackets, nil
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
// Package vhostnet implements methods to initialize vhost networking devices
|
||||
// within the kernel-level virtio implementation and communicate with them.
|
||||
package vhostnet
|
||||
@@ -1,31 +0,0 @@
|
||||
package vhostnet
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
|
||||
"github.com/slackhq/nebula/overlay/vhost"
|
||||
)
|
||||
|
||||
const (
|
||||
// vhostNetIoctlSetBackend can be used to attach a virtqueue to a RAW socket
|
||||
// or TAP device.
|
||||
//
|
||||
// Request payload: [vhost.QueueFile]
|
||||
// Kernel name: VHOST_NET_SET_BACKEND
|
||||
vhostNetIoctlSetBackend = 0x4008af30
|
||||
)
|
||||
|
||||
// SetQueueBackend attaches a virtqueue of the vhost networking device
|
||||
// described by controlFD to the given backend file descriptor.
|
||||
// The backend file descriptor can either be a RAW socket or a TAP device. When
|
||||
// it is -1, the queue will be detached.
|
||||
func SetQueueBackend(controlFD int, queueIndex uint32, backendFD int) error {
|
||||
if err := vhost.IoctlPtr(controlFD, vhostNetIoctlSetBackend, unsafe.Pointer(&vhost.QueueFile{
|
||||
QueueIndex: queueIndex,
|
||||
FD: int32(backendFD),
|
||||
})); err != nil {
|
||||
return fmt.Errorf("set queue backend file descriptor: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
package vhostnet
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/slackhq/nebula/overlay/virtqueue"
|
||||
)
|
||||
|
||||
type optionValues struct {
|
||||
queueSize int
|
||||
backendFD int
|
||||
}
|
||||
|
||||
func (o *optionValues) apply(options []Option) {
|
||||
for _, option := range options {
|
||||
option(o)
|
||||
}
|
||||
}
|
||||
|
||||
func (o *optionValues) validate() error {
|
||||
if o.queueSize == -1 {
|
||||
return errors.New("queue size is required")
|
||||
}
|
||||
if err := virtqueue.CheckQueueSize(o.queueSize); err != nil {
|
||||
return err
|
||||
}
|
||||
if o.backendFD == -1 {
|
||||
return errors.New("backend file descriptor is required")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var optionDefaults = optionValues{
|
||||
// Required.
|
||||
queueSize: -1,
|
||||
// Required.
|
||||
backendFD: -1,
|
||||
}
|
||||
|
||||
// Option can be passed to [NewDevice] to influence device creation.
|
||||
type Option func(*optionValues)
|
||||
|
||||
// WithQueueSize returns an [Option] that sets the size of the TX and RX queues
|
||||
// that are to be created for the device. It specifies the number of
|
||||
// entries/buffers each queue can hold. This also affects the memory
|
||||
// consumption.
|
||||
// This is required and must be an integer from 1 to 32768 that is also a power
|
||||
// of 2.
|
||||
func WithQueueSize(queueSize int) Option {
|
||||
return func(o *optionValues) { o.queueSize = queueSize }
|
||||
}
|
||||
|
||||
// WithBackendFD returns an [Option] that sets the file descriptor of the
|
||||
// backend that will be used for the queues of the device. The device will write
|
||||
// and read packets to/from that backend. The file descriptor can either be of a
|
||||
// RAW socket or TUN/TAP device.
|
||||
// Either this or [WithBackendDevice] is required.
|
||||
func WithBackendFD(backendFD int) Option {
|
||||
return func(o *optionValues) { o.backendFD = backendFD }
|
||||
}
|
||||
|
||||
//// WithBackendDevice returns an [Option] that sets the given TAP device as the
|
||||
//// backend that will be used for the queues of the device. The device will
|
||||
//// write and read packets to/from that backend. The TAP device should have been
|
||||
//// created with the [tuntap.WithVirtioNetHdr] option enabled.
|
||||
//// Either this or [WithBackendFD] is required.
|
||||
//func WithBackendDevice(dev *tuntap.Device) Option {
|
||||
// return func(o *optionValues) { o.backendFD = int(dev.File().Fd()) }
|
||||
//}
|
||||
@@ -1,23 +0,0 @@
|
||||
Significant portions of this code are derived from https://pkg.go.dev/github.com/hetznercloud/virtio-go
|
||||
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2025 Hetzner Cloud GmbH
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -1,140 +0,0 @@
|
||||
package virtqueue
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// availableRingFlag is a flag that describes an [AvailableRing].
|
||||
type availableRingFlag uint16
|
||||
|
||||
const (
|
||||
// availableRingFlagNoInterrupt is used by the guest to advise the host to
|
||||
// not interrupt it when consuming a buffer. It's unreliable, so it's simply
|
||||
// an optimization.
|
||||
availableRingFlagNoInterrupt availableRingFlag = 1 << iota
|
||||
)
|
||||
|
||||
// availableRingSize is the number of bytes needed to store an [AvailableRing]
|
||||
// with the given queue size in memory.
|
||||
func availableRingSize(queueSize int) int {
|
||||
return 6 + 2*queueSize
|
||||
}
|
||||
|
||||
// availableRingAlignment is the minimum alignment of an [AvailableRing]
|
||||
// in memory, as required by the virtio spec.
|
||||
const availableRingAlignment = 2
|
||||
|
||||
// AvailableRing is used by the driver to offer descriptor chains to the device.
|
||||
// Each ring entry refers to the head of a descriptor chain. It is only written
|
||||
// to by the driver and read by the device.
|
||||
//
|
||||
// Because the size of the ring depends on the queue size, we cannot define a
|
||||
// Go struct with a static size that maps to the memory of the ring. Instead,
|
||||
// this struct only contains pointers to the corresponding memory areas.
|
||||
type AvailableRing struct {
|
||||
initialized bool
|
||||
|
||||
// flags that describe this ring.
|
||||
flags *availableRingFlag
|
||||
// ringIndex indicates where the driver would put the next entry into the
|
||||
// ring (modulo the queue size).
|
||||
ringIndex *uint16
|
||||
// ring references buffers using the index of the head of the descriptor
|
||||
// chain in the [DescriptorTable]. It wraps around at queue size.
|
||||
ring []uint16
|
||||
// usedEvent is not used by this implementation, but we reserve it anyway to
|
||||
// avoid issues in case a device may try to access it, contrary to the
|
||||
// virtio specification.
|
||||
usedEvent *uint16
|
||||
}
|
||||
|
||||
// newAvailableRing creates an available ring that uses the given underlying
|
||||
// memory. The length of the memory slice must match the size needed for the
|
||||
// ring (see [availableRingSize]) for the given queue size.
|
||||
func newAvailableRing(queueSize int, mem []byte) *AvailableRing {
|
||||
ringSize := availableRingSize(queueSize)
|
||||
if len(mem) != ringSize {
|
||||
panic(fmt.Sprintf("memory size (%v) does not match required size "+
|
||||
"for available ring: %v", len(mem), ringSize))
|
||||
}
|
||||
|
||||
return &AvailableRing{
|
||||
initialized: true,
|
||||
flags: (*availableRingFlag)(unsafe.Pointer(&mem[0])),
|
||||
ringIndex: (*uint16)(unsafe.Pointer(&mem[2])),
|
||||
ring: unsafe.Slice((*uint16)(unsafe.Pointer(&mem[4])), queueSize),
|
||||
usedEvent: (*uint16)(unsafe.Pointer(&mem[ringSize-2])),
|
||||
}
|
||||
}
|
||||
|
||||
// Address returns the pointer to the beginning of the ring in memory.
|
||||
// Do not modify the memory directly to not interfere with this implementation.
|
||||
func (r *AvailableRing) Address() uintptr {
|
||||
if !r.initialized {
|
||||
panic("available ring is not initialized")
|
||||
}
|
||||
return uintptr(unsafe.Pointer(r.flags))
|
||||
}
|
||||
|
||||
// offer adds the given descriptor chain heads to the available ring and
|
||||
// advances the ring index accordingly to make the device process the new
|
||||
// descriptor chains.
|
||||
func (r *AvailableRing) offerElements(chains []UsedElement) {
|
||||
//always called under lock
|
||||
//r.mu.Lock()
|
||||
//defer r.mu.Unlock()
|
||||
|
||||
// Add descriptor chain heads to the ring.
|
||||
for offset, x := range chains {
|
||||
// The 16-bit ring index may overflow. This is expected and is not an
|
||||
// issue because the size of the ring array (which equals the queue
|
||||
// size) is always a power of 2 and smaller than the highest possible
|
||||
// 16-bit value.
|
||||
insertIndex := int(*r.ringIndex+uint16(offset)) % len(r.ring)
|
||||
r.ring[insertIndex] = x.GetHead()
|
||||
}
|
||||
|
||||
// Increase the ring index by the number of descriptor chains added to the
|
||||
// ring.
|
||||
*r.ringIndex += uint16(len(chains))
|
||||
}
|
||||
|
||||
func (r *AvailableRing) offer(chains []uint16) {
|
||||
//always called under lock
|
||||
//r.mu.Lock()
|
||||
//defer r.mu.Unlock()
|
||||
|
||||
// Add descriptor chain heads to the ring.
|
||||
for offset, x := range chains {
|
||||
// The 16-bit ring index may overflow. This is expected and is not an
|
||||
// issue because the size of the ring array (which equals the queue
|
||||
// size) is always a power of 2 and smaller than the highest possible
|
||||
// 16-bit value.
|
||||
insertIndex := int(*r.ringIndex+uint16(offset)) % len(r.ring)
|
||||
r.ring[insertIndex] = x
|
||||
}
|
||||
|
||||
// Increase the ring index by the number of descriptor chains added to the
|
||||
// ring.
|
||||
*r.ringIndex += uint16(len(chains))
|
||||
}
|
||||
|
||||
func (r *AvailableRing) offerSingle(x uint16) {
|
||||
//always called under lock
|
||||
//r.mu.Lock()
|
||||
//defer r.mu.Unlock()
|
||||
|
||||
offset := 0
|
||||
// Add descriptor chain heads to the ring.
|
||||
|
||||
// The 16-bit ring index may overflow. This is expected and is not an
|
||||
// issue because the size of the ring array (which equals the queue
|
||||
// size) is always a power of 2 and smaller than the highest possible
|
||||
// 16-bit value.
|
||||
insertIndex := int(*r.ringIndex+uint16(offset)) % len(r.ring)
|
||||
r.ring[insertIndex] = x
|
||||
|
||||
// Increase the ring index by the number of descriptor chains added to the ring.
|
||||
*r.ringIndex += 1
|
||||
}
|
||||
@@ -1,71 +0,0 @@
|
||||
package virtqueue
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAvailableRing_MemoryLayout(t *testing.T) {
|
||||
const queueSize = 2
|
||||
|
||||
memory := make([]byte, availableRingSize(queueSize))
|
||||
r := newAvailableRing(queueSize, memory)
|
||||
|
||||
*r.flags = 0x01ff
|
||||
*r.ringIndex = 1
|
||||
r.ring[0] = 0x1234
|
||||
r.ring[1] = 0x5678
|
||||
|
||||
assert.Equal(t, []byte{
|
||||
0xff, 0x01,
|
||||
0x01, 0x00,
|
||||
0x34, 0x12,
|
||||
0x78, 0x56,
|
||||
0x00, 0x00,
|
||||
}, memory)
|
||||
}
|
||||
|
||||
func TestAvailableRing_Offer(t *testing.T) {
|
||||
const queueSize = 8
|
||||
|
||||
chainHeads := []uint16{42, 33, 69}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
startRingIndex uint16
|
||||
expectedRingIndex uint16
|
||||
expectedRing []uint16
|
||||
}{
|
||||
{
|
||||
name: "no overflow",
|
||||
startRingIndex: 0,
|
||||
expectedRingIndex: 3,
|
||||
expectedRing: []uint16{42, 33, 69, 0, 0, 0, 0, 0},
|
||||
},
|
||||
{
|
||||
name: "ring overflow",
|
||||
startRingIndex: 6,
|
||||
expectedRingIndex: 9,
|
||||
expectedRing: []uint16{69, 0, 0, 0, 0, 0, 42, 33},
|
||||
},
|
||||
{
|
||||
name: "index overflow",
|
||||
startRingIndex: 65535,
|
||||
expectedRingIndex: 2,
|
||||
expectedRing: []uint16{33, 69, 0, 0, 0, 0, 0, 42},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
memory := make([]byte, availableRingSize(queueSize))
|
||||
r := newAvailableRing(queueSize, memory)
|
||||
*r.ringIndex = tt.startRingIndex
|
||||
|
||||
r.offer(chainHeads)
|
||||
|
||||
assert.Equal(t, tt.expectedRingIndex, *r.ringIndex)
|
||||
assert.Equal(t, tt.expectedRing, r.ring)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,43 +0,0 @@
|
||||
package virtqueue
|
||||
|
||||
// descriptorFlag is a flag that describes a [Descriptor].
|
||||
type descriptorFlag uint16
|
||||
|
||||
const (
|
||||
// descriptorFlagHasNext marks a descriptor chain as continuing via the next
|
||||
// field.
|
||||
descriptorFlagHasNext descriptorFlag = 1 << iota
|
||||
// descriptorFlagWritable marks a buffer as device write-only (otherwise
|
||||
// device read-only).
|
||||
descriptorFlagWritable
|
||||
// descriptorFlagIndirect means the buffer contains a list of buffer
|
||||
// descriptors to provide an additional layer of indirection.
|
||||
// Only allowed when the [virtio.FeatureIndirectDescriptors] feature was
|
||||
// negotiated.
|
||||
descriptorFlagIndirect
|
||||
)
|
||||
|
||||
// descriptorSize is the number of bytes needed to store a [Descriptor] in
|
||||
// memory.
|
||||
const descriptorSize = 16
|
||||
|
||||
// Descriptor describes (a part of) a buffer which is either read-only for the
|
||||
// device or write-only for the device (depending on [descriptorFlagWritable]).
|
||||
// Multiple descriptors can be chained to produce a "descriptor chain" that can
|
||||
// contain both device-readable and device-writable buffers. Device-readable
|
||||
// descriptors always come first in a chain. A single, large buffer may be
|
||||
// split up by chaining multiple similar descriptors that reference different
|
||||
// memory pages. This is required, because buffers may exceed a single page size
|
||||
// and the memory accessed by the device is expected to be continuous.
|
||||
type Descriptor struct {
|
||||
// address is the address to the continuous memory holding the data for this
|
||||
// descriptor.
|
||||
address uintptr
|
||||
// length is the amount of bytes stored at address.
|
||||
length uint32
|
||||
// flags that describe this descriptor.
|
||||
flags descriptorFlag
|
||||
// next contains the index of the next descriptor continuing this descriptor
|
||||
// chain when the [descriptorFlagHasNext] flag is set.
|
||||
next uint16
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
package virtqueue
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDescriptor_Size(t *testing.T) {
|
||||
assert.EqualValues(t, descriptorSize, unsafe.Sizeof(Descriptor{}))
|
||||
}
|
||||
@@ -1,641 +0,0 @@
|
||||
package virtqueue
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrDescriptorChainEmpty is returned when a descriptor chain would contain
|
||||
// no buffers, which is not allowed.
|
||||
ErrDescriptorChainEmpty = errors.New("empty descriptor chains are not allowed")
|
||||
|
||||
// ErrNotEnoughFreeDescriptors is returned when the free descriptors are
|
||||
// exhausted, meaning that the queue is full.
|
||||
ErrNotEnoughFreeDescriptors = errors.New("not enough free descriptors, queue is full")
|
||||
|
||||
// ErrInvalidDescriptorChain is returned when a descriptor chain is not
|
||||
// valid for a given operation.
|
||||
ErrInvalidDescriptorChain = errors.New("invalid descriptor chain")
|
||||
)
|
||||
|
||||
// noFreeHead is used to mark when all descriptors are in use and we have no
|
||||
// free chain. This value is impossible to occur as an index naturally, because
|
||||
// it exceeds the maximum queue size.
|
||||
const noFreeHead = uint16(math.MaxUint16)
|
||||
|
||||
// descriptorTableSize is the number of bytes needed to store a
|
||||
// [DescriptorTable] with the given queue size in memory.
|
||||
func descriptorTableSize(queueSize int) int {
|
||||
return descriptorSize * queueSize
|
||||
}
|
||||
|
||||
// descriptorTableAlignment is the minimum alignment of a [DescriptorTable]
|
||||
// in memory, as required by the virtio spec.
|
||||
const descriptorTableAlignment = 16
|
||||
|
||||
// DescriptorTable is a table that holds [Descriptor]s, addressed via their
|
||||
// index in the slice.
|
||||
type DescriptorTable struct {
|
||||
descriptors []Descriptor
|
||||
|
||||
// freeHeadIndex is the index of the head of the descriptor chain which
|
||||
// contains all currently unused descriptors. When all descriptors are in
|
||||
// use, this has the special value of noFreeHead.
|
||||
freeHeadIndex uint16
|
||||
// freeNum tracks the number of descriptors which are currently not in use.
|
||||
freeNum uint16
|
||||
|
||||
bufferBase uintptr
|
||||
bufferSize int
|
||||
itemSize int
|
||||
}
|
||||
|
||||
// newDescriptorTable creates a descriptor table that uses the given underlying
|
||||
// memory. The Length of the memory slice must match the size needed for the
|
||||
// descriptor table (see [descriptorTableSize]) for the given queue size.
|
||||
//
|
||||
// Before this descriptor table can be used, [initialize] must be called.
|
||||
func newDescriptorTable(queueSize int, mem []byte, itemSize int) *DescriptorTable {
|
||||
dtSize := descriptorTableSize(queueSize)
|
||||
if len(mem) != dtSize {
|
||||
panic(fmt.Sprintf("memory size (%v) does not match required size "+
|
||||
"for descriptor table: %v", len(mem), dtSize))
|
||||
}
|
||||
|
||||
return &DescriptorTable{
|
||||
descriptors: unsafe.Slice((*Descriptor)(unsafe.Pointer(&mem[0])), queueSize),
|
||||
// We have no free descriptors until they were initialized.
|
||||
freeHeadIndex: noFreeHead,
|
||||
freeNum: 0,
|
||||
itemSize: itemSize, //todo configurable? needs to be page-aligned
|
||||
}
|
||||
}
|
||||
|
||||
// Address returns the pointer to the beginning of the descriptor table in
|
||||
// memory. Do not modify the memory directly to not interfere with this
|
||||
// implementation.
|
||||
func (dt *DescriptorTable) Address() uintptr {
|
||||
if dt.descriptors == nil {
|
||||
panic("descriptor table is not initialized")
|
||||
}
|
||||
//should be same as dt.bufferBase
|
||||
return uintptr(unsafe.Pointer(&dt.descriptors[0]))
|
||||
}
|
||||
|
||||
func (dt *DescriptorTable) Size() uintptr {
|
||||
if dt.descriptors == nil {
|
||||
panic("descriptor table is not initialized")
|
||||
}
|
||||
return uintptr(dt.bufferSize)
|
||||
}
|
||||
|
||||
// BufferAddresses returns a map of pointer->size for all allocations used by the table
|
||||
func (dt *DescriptorTable) BufferAddresses() map[uintptr]int {
|
||||
if dt.descriptors == nil {
|
||||
panic("descriptor table is not initialized")
|
||||
}
|
||||
|
||||
return map[uintptr]int{dt.bufferBase: dt.bufferSize}
|
||||
}
|
||||
|
||||
// initializeDescriptors allocates buffers with the size of a full memory page
|
||||
// for each descriptor in the table. While this may be a bit wasteful, it makes
|
||||
// dealing with descriptors way easier. Without this preallocation, we would
|
||||
// have to allocate and free memory on demand, increasing complexity.
|
||||
//
|
||||
// All descriptors will be marked as free and will form a free chain. The
|
||||
// addresses of all descriptors will be populated while their length remains
|
||||
// zero.
|
||||
func (dt *DescriptorTable) initializeDescriptors() error {
|
||||
numDescriptors := len(dt.descriptors)
|
||||
|
||||
// Allocate ONE large region for all buffers
|
||||
totalSize := dt.itemSize * numDescriptors
|
||||
basePtr, err := unix.MmapPtr(-1, 0, nil, uintptr(totalSize),
|
||||
unix.PROT_READ|unix.PROT_WRITE,
|
||||
unix.MAP_PRIVATE|unix.MAP_ANONYMOUS)
|
||||
if err != nil {
|
||||
return fmt.Errorf("allocate buffer memory for descriptors: %w", err)
|
||||
}
|
||||
|
||||
// Store the base for cleanup later
|
||||
dt.bufferBase = uintptr(basePtr)
|
||||
dt.bufferSize = totalSize
|
||||
|
||||
for i := range dt.descriptors {
|
||||
dt.descriptors[i] = Descriptor{
|
||||
address: dt.bufferBase + uintptr(i*dt.itemSize),
|
||||
length: 0,
|
||||
// All descriptors should form a free chain that loops around.
|
||||
flags: descriptorFlagHasNext,
|
||||
next: uint16((i + 1) % len(dt.descriptors)),
|
||||
}
|
||||
}
|
||||
|
||||
// All descriptors are free to use now.
|
||||
dt.freeHeadIndex = 0
|
||||
dt.freeNum = uint16(len(dt.descriptors))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// releaseBuffers releases all allocated buffers for this descriptor table.
|
||||
// The implementation will try to release as many buffers as possible and
|
||||
// collect potential errors before returning them.
|
||||
// The descriptor table should no longer be used after calling this.
|
||||
func (dt *DescriptorTable) releaseBuffers() error {
|
||||
for i := range dt.descriptors {
|
||||
descriptor := &dt.descriptors[i]
|
||||
descriptor.address = 0
|
||||
}
|
||||
|
||||
// As a safety measure, make sure no descriptors can be used anymore.
|
||||
dt.freeHeadIndex = noFreeHead
|
||||
dt.freeNum = 0
|
||||
|
||||
if dt.bufferBase != 0 {
|
||||
// The pointer points to memory not managed by Go, so this conversion
|
||||
// is safe. See https://github.com/golang/go/issues/58625
|
||||
dt.bufferBase = 0
|
||||
//goland:noinspection GoVetUnsafePointer
|
||||
err := unix.MunmapPtr(unsafe.Pointer(dt.bufferBase), uintptr(dt.bufferSize))
|
||||
if err != nil {
|
||||
return fmt.Errorf("release buffer memory: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// createDescriptorChain creates a new descriptor chain within the descriptor
|
||||
// table which contains a number of device-readable buffers (out buffers) and
|
||||
// device-writable buffers (in buffers).
|
||||
//
|
||||
// All buffers in the outBuffers slice will be concatenated by chaining
|
||||
// descriptors, one for each buffer in the slice. The size of the single buffers
|
||||
// must not exceed the size of a memory page (see [os.Getpagesize]).
|
||||
// When numInBuffers is greater than zero, the given number of device-writable
|
||||
// descriptors will be appended to the end of the chain, each referencing a
|
||||
// whole memory page.
|
||||
//
|
||||
// The index of the head of the new descriptor chain will be returned. Callers
|
||||
// should make sure to free the descriptor chain using [freeDescriptorChain]
|
||||
// after it was used by the device.
|
||||
//
|
||||
// When there are not enough free descriptors to hold the given number of
|
||||
// buffers, an [ErrNotEnoughFreeDescriptors] will be returned. In this case, the
|
||||
// caller should try again after some descriptor chains were used by the device
|
||||
// and returned back into the free chain.
|
||||
func (dt *DescriptorTable) createDescriptorChain(outBuffers [][]byte, numInBuffers int) (uint16, error) {
|
||||
// Calculate the number of descriptors needed to build the chain.
|
||||
numDesc := uint16(len(outBuffers) + numInBuffers)
|
||||
|
||||
// Descriptor chains must always contain at least one descriptor.
|
||||
if numDesc < 1 {
|
||||
return 0, ErrDescriptorChainEmpty
|
||||
}
|
||||
|
||||
// Do we still have enough free descriptors?
|
||||
if numDesc > dt.freeNum {
|
||||
return 0, ErrNotEnoughFreeDescriptors
|
||||
}
|
||||
|
||||
// Above validation ensured that there is at least one free descriptor, so
|
||||
// the free descriptor chain head should be valid.
|
||||
if dt.freeHeadIndex == noFreeHead {
|
||||
panic("free descriptor chain head is unset but there should be free descriptors")
|
||||
}
|
||||
|
||||
// To avoid having to iterate over the whole table to find the descriptor
|
||||
// pointing to the head just to replace the free head, we instead always
|
||||
// create descriptor chains from the descriptors coming after the head.
|
||||
// This way we only have to touch the head as a last resort, when all other
|
||||
// descriptors are already used.
|
||||
head := dt.descriptors[dt.freeHeadIndex].next
|
||||
next := head
|
||||
tail := head
|
||||
for i, buffer := range outBuffers {
|
||||
desc := &dt.descriptors[next]
|
||||
checkUnusedDescriptorLength(next, desc)
|
||||
|
||||
if len(buffer) > dt.itemSize {
|
||||
// The caller should already prevent that from happening.
|
||||
panic(fmt.Sprintf("out buffer %d has size %d which exceeds desc length %d", i, len(buffer), dt.itemSize))
|
||||
}
|
||||
|
||||
// Copy the buffer to the memory referenced by the descriptor.
|
||||
// The descriptor address points to memory not managed by Go, so this
|
||||
// conversion is safe. See https://github.com/golang/go/issues/58625
|
||||
//goland:noinspection GoVetUnsafePointer
|
||||
copy(unsafe.Slice((*byte)(unsafe.Pointer(desc.address)), dt.itemSize), buffer)
|
||||
desc.length = uint32(len(buffer))
|
||||
|
||||
// Clear the flags in case there were any others set.
|
||||
desc.flags = descriptorFlagHasNext
|
||||
|
||||
tail = next
|
||||
next = desc.next
|
||||
}
|
||||
for range numInBuffers {
|
||||
desc := &dt.descriptors[next]
|
||||
checkUnusedDescriptorLength(next, desc)
|
||||
|
||||
// Give the device the maximum available number of bytes to write into.
|
||||
desc.length = uint32(dt.itemSize)
|
||||
|
||||
// Mark the descriptor as device-writable.
|
||||
desc.flags = descriptorFlagHasNext | descriptorFlagWritable
|
||||
|
||||
tail = next
|
||||
next = desc.next
|
||||
}
|
||||
|
||||
// The last descriptor should end the chain.
|
||||
tailDesc := &dt.descriptors[tail]
|
||||
tailDesc.flags &= ^descriptorFlagHasNext
|
||||
tailDesc.next = 0 // Not necessary to clear this, it's just for looks.
|
||||
|
||||
dt.freeNum -= numDesc
|
||||
|
||||
if dt.freeNum == 0 {
|
||||
// The last descriptor in the chain should be the free chain head
|
||||
// itself.
|
||||
if tail != dt.freeHeadIndex {
|
||||
panic("descriptor chain takes up all free descriptors but does not end with the free chain head")
|
||||
}
|
||||
|
||||
// When this new chain takes up all remaining descriptors, we no longer
|
||||
// have a free chain.
|
||||
dt.freeHeadIndex = noFreeHead
|
||||
} else {
|
||||
// We took some descriptors out of the free chain, so make sure to close
|
||||
// the circle again.
|
||||
dt.descriptors[dt.freeHeadIndex].next = next
|
||||
}
|
||||
|
||||
return head, nil
|
||||
}
|
||||
|
||||
func (dt *DescriptorTable) CreateDescriptorForOutputs() (uint16, error) {
|
||||
//todo just fill the damn table
|
||||
// Do we still have enough free descriptors?
|
||||
|
||||
if 1 > dt.freeNum {
|
||||
return 0, ErrNotEnoughFreeDescriptors
|
||||
}
|
||||
|
||||
// Above validation ensured that there is at least one free descriptor, so
|
||||
// the free descriptor chain head should be valid.
|
||||
if dt.freeHeadIndex == noFreeHead {
|
||||
panic("free descriptor chain head is unset but there should be free descriptors")
|
||||
}
|
||||
|
||||
// To avoid having to iterate over the whole table to find the descriptor
|
||||
// pointing to the head just to replace the free head, we instead always
|
||||
// create descriptor chains from the descriptors coming after the head.
|
||||
// This way we only have to touch the head as a last resort, when all other
|
||||
// descriptors are already used.
|
||||
head := dt.descriptors[dt.freeHeadIndex].next
|
||||
desc := &dt.descriptors[head]
|
||||
next := desc.next
|
||||
|
||||
checkUnusedDescriptorLength(head, desc)
|
||||
|
||||
// Give the device the maximum available number of bytes to write into.
|
||||
desc.length = uint32(dt.itemSize)
|
||||
desc.flags = 0 // descriptorFlagWritable
|
||||
desc.next = 0 // Not necessary to clear this, it's just for looks.
|
||||
|
||||
dt.freeNum -= 1
|
||||
|
||||
if dt.freeNum == 0 {
|
||||
// The last descriptor in the chain should be the free chain head
|
||||
// itself.
|
||||
if next != dt.freeHeadIndex {
|
||||
panic("descriptor chain takes up all free descriptors but does not end with the free chain head")
|
||||
}
|
||||
|
||||
// When this new chain takes up all remaining descriptors, we no longer
|
||||
// have a free chain.
|
||||
dt.freeHeadIndex = noFreeHead
|
||||
} else {
|
||||
// We took some descriptors out of the free chain, so make sure to close
|
||||
// the circle again.
|
||||
dt.descriptors[dt.freeHeadIndex].next = next
|
||||
}
|
||||
|
||||
return head, nil
|
||||
}
|
||||
|
||||
func (dt *DescriptorTable) createDescriptorForInputs() (uint16, error) {
|
||||
// Do we still have enough free descriptors?
|
||||
if 1 > dt.freeNum {
|
||||
return 0, ErrNotEnoughFreeDescriptors
|
||||
}
|
||||
|
||||
// Above validation ensured that there is at least one free descriptor, so
|
||||
// the free descriptor chain head should be valid.
|
||||
if dt.freeHeadIndex == noFreeHead {
|
||||
panic("free descriptor chain head is unset but there should be free descriptors")
|
||||
}
|
||||
|
||||
// To avoid having to iterate over the whole table to find the descriptor
|
||||
// pointing to the head just to replace the free head, we instead always
|
||||
// create descriptor chains from the descriptors coming after the head.
|
||||
// This way we only have to touch the head as a last resort, when all other
|
||||
// descriptors are already used.
|
||||
head := dt.descriptors[dt.freeHeadIndex].next
|
||||
desc := &dt.descriptors[head]
|
||||
next := desc.next
|
||||
|
||||
checkUnusedDescriptorLength(head, desc)
|
||||
|
||||
// Give the device the maximum available number of bytes to write into.
|
||||
desc.length = uint32(dt.itemSize)
|
||||
desc.flags = descriptorFlagWritable
|
||||
desc.next = 0 // Not necessary to clear this, it's just for looks.
|
||||
|
||||
dt.freeNum -= 1
|
||||
|
||||
if dt.freeNum == 0 {
|
||||
// The last descriptor in the chain should be the free chain head
|
||||
// itself.
|
||||
if next != dt.freeHeadIndex {
|
||||
panic("descriptor chain takes up all free descriptors but does not end with the free chain head")
|
||||
}
|
||||
|
||||
// When this new chain takes up all remaining descriptors, we no longer
|
||||
// have a free chain.
|
||||
dt.freeHeadIndex = noFreeHead
|
||||
} else {
|
||||
// We took some descriptors out of the free chain, so make sure to close
|
||||
// the circle again.
|
||||
dt.descriptors[dt.freeHeadIndex].next = next
|
||||
}
|
||||
|
||||
return head, nil
|
||||
}
|
||||
|
||||
// TODO: Implement a zero-copy variant of createDescriptorChain?
|
||||
|
||||
// getDescriptorChain returns the device-readable buffers (out buffers) and
|
||||
// device-writable buffers (in buffers) of the descriptor chain that starts with
|
||||
// the given head index. The descriptor chain must have been created using
|
||||
// [createDescriptorChain] and must not have been freed yet (meaning that the
|
||||
// head index must not be contained in the free chain).
|
||||
//
|
||||
// Be careful to only access the returned buffer slices when the device has not
|
||||
// yet or is no longer using them. They must not be accessed after
|
||||
// [freeDescriptorChain] has been called.
|
||||
func (dt *DescriptorTable) getDescriptorChain(head uint16) (outBuffers, inBuffers [][]byte, err error) {
|
||||
if int(head) > len(dt.descriptors) {
|
||||
return nil, nil, fmt.Errorf("%w: index out of range", ErrInvalidDescriptorChain)
|
||||
}
|
||||
|
||||
// Iterate over the chain. The iteration is limited to the queue size to
|
||||
// avoid ending up in an endless loop when things go very wrong.
|
||||
next := head
|
||||
for range len(dt.descriptors) {
|
||||
if next == dt.freeHeadIndex {
|
||||
return nil, nil, fmt.Errorf("%w: must not be part of the free chain", ErrInvalidDescriptorChain)
|
||||
}
|
||||
|
||||
desc := &dt.descriptors[next]
|
||||
|
||||
// The descriptor address points to memory not managed by Go, so this
|
||||
// conversion is safe. See https://github.com/golang/go/issues/58625
|
||||
//goland:noinspection GoVetUnsafePointer
|
||||
bs := unsafe.Slice((*byte)(unsafe.Pointer(desc.address)), desc.length)
|
||||
|
||||
if desc.flags&descriptorFlagWritable == 0 {
|
||||
outBuffers = append(outBuffers, bs)
|
||||
} else {
|
||||
inBuffers = append(inBuffers, bs)
|
||||
}
|
||||
|
||||
// Is this the tail of the chain?
|
||||
if desc.flags&descriptorFlagHasNext == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Detect loops.
|
||||
if desc.next == head {
|
||||
return nil, nil, fmt.Errorf("%w: contains a loop", ErrInvalidDescriptorChain)
|
||||
}
|
||||
|
||||
next = desc.next
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (dt *DescriptorTable) getDescriptorItem(head uint16) ([]byte, error) {
|
||||
if int(head) > len(dt.descriptors) {
|
||||
return nil, fmt.Errorf("%w: index out of range", ErrInvalidDescriptorChain)
|
||||
}
|
||||
|
||||
desc := &dt.descriptors[head] //todo this is a pretty nasty hack with no checks
|
||||
|
||||
// The descriptor address points to memory not managed by Go, so this
|
||||
// conversion is safe. See https://github.com/golang/go/issues/58625
|
||||
//goland:noinspection GoVetUnsafePointer
|
||||
bs := unsafe.Slice((*byte)(unsafe.Pointer(desc.address)), desc.length)
|
||||
return bs, nil
|
||||
}
|
||||
|
||||
func (dt *DescriptorTable) getDescriptorInbuffers(head uint16, inBuffers *[][]byte) error {
|
||||
if int(head) > len(dt.descriptors) {
|
||||
return fmt.Errorf("%w: index out of range", ErrInvalidDescriptorChain)
|
||||
}
|
||||
|
||||
// Iterate over the chain. The iteration is limited to the queue size to
|
||||
// avoid ending up in an endless loop when things go very wrong.
|
||||
next := head
|
||||
for range len(dt.descriptors) {
|
||||
if next == dt.freeHeadIndex {
|
||||
return fmt.Errorf("%w: must not be part of the free chain", ErrInvalidDescriptorChain)
|
||||
}
|
||||
|
||||
desc := &dt.descriptors[next]
|
||||
|
||||
// The descriptor address points to memory not managed by Go, so this
|
||||
// conversion is safe. See https://github.com/golang/go/issues/58625
|
||||
//goland:noinspection GoVetUnsafePointer
|
||||
bs := unsafe.Slice((*byte)(unsafe.Pointer(desc.address)), desc.length)
|
||||
|
||||
if desc.flags&descriptorFlagWritable == 0 {
|
||||
return fmt.Errorf("there should not be an outbuffer in %d", head)
|
||||
} else {
|
||||
*inBuffers = append(*inBuffers, bs)
|
||||
}
|
||||
|
||||
// Is this the tail of the chain?
|
||||
if desc.flags&descriptorFlagHasNext == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Detect loops.
|
||||
if desc.next == head {
|
||||
return fmt.Errorf("%w: contains a loop", ErrInvalidDescriptorChain)
|
||||
}
|
||||
|
||||
next = desc.next
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dt *DescriptorTable) getDescriptorChainContents(head uint16, out []byte, maxLen int) (int, error) {
|
||||
if int(head) > len(dt.descriptors) {
|
||||
return 0, fmt.Errorf("%w: index out of range", ErrInvalidDescriptorChain)
|
||||
}
|
||||
|
||||
// Iterate over the chain. The iteration is limited to the queue size to
|
||||
// avoid ending up in an endless loop when things go very wrong.
|
||||
|
||||
length := 0
|
||||
//find length
|
||||
next := head
|
||||
for range len(dt.descriptors) {
|
||||
if next == dt.freeHeadIndex {
|
||||
return 0, fmt.Errorf("%w: must not be part of the free chain", ErrInvalidDescriptorChain)
|
||||
}
|
||||
|
||||
desc := &dt.descriptors[next]
|
||||
|
||||
if desc.flags&descriptorFlagWritable == 0 {
|
||||
return 0, fmt.Errorf("receive queue contains device-readable buffer")
|
||||
}
|
||||
length += int(desc.length)
|
||||
|
||||
// Is this the tail of the chain?
|
||||
if desc.flags&descriptorFlagHasNext == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// Detect loops.
|
||||
if desc.next == head {
|
||||
return 0, fmt.Errorf("%w: contains a loop", ErrInvalidDescriptorChain)
|
||||
}
|
||||
|
||||
next = desc.next
|
||||
}
|
||||
if maxLen > 0 {
|
||||
//todo length = min(maxLen, length)
|
||||
}
|
||||
//set out to length:
|
||||
out = out[:length]
|
||||
|
||||
//now do the copying
|
||||
copied := 0
|
||||
for range len(dt.descriptors) {
|
||||
desc := &dt.descriptors[next]
|
||||
|
||||
// The descriptor address points to memory not managed by Go, so this
|
||||
// conversion is safe. See https://github.com/golang/go/issues/58625
|
||||
//goland:noinspection GoVetUnsafePointer
|
||||
bs := unsafe.Slice((*byte)(unsafe.Pointer(desc.address)), min(uint32(length-copied), desc.length))
|
||||
copied += copy(out[copied:], bs)
|
||||
|
||||
// Is this the tail of the chain?
|
||||
if desc.flags&descriptorFlagHasNext == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
// we did this already, no need to detect loops.
|
||||
next = desc.next
|
||||
}
|
||||
if copied != length {
|
||||
panic(fmt.Sprintf("expected to copy %d bytes but only copied %d bytes", length, copied))
|
||||
}
|
||||
|
||||
return length, nil
|
||||
}
|
||||
|
||||
// freeDescriptorChain can be used to free a descriptor chain when it is no
|
||||
// longer in use. The descriptor chain that starts with the given index will be
|
||||
// put back into the free chain, so the descriptors can be used for later calls
|
||||
// of [createDescriptorChain].
|
||||
// The descriptor chain must have been created using [createDescriptorChain] and
|
||||
// must not have been freed yet (meaning that the head index must not be
|
||||
// contained in the free chain).
|
||||
func (dt *DescriptorTable) freeDescriptorChain(head uint16) error {
|
||||
if int(head) > len(dt.descriptors) {
|
||||
return fmt.Errorf("%w: index out of range", ErrInvalidDescriptorChain)
|
||||
}
|
||||
|
||||
// Iterate over the chain. The iteration is limited to the queue size to
|
||||
// avoid ending up in an endless loop when things go very wrong.
|
||||
next := head
|
||||
var tailDesc *Descriptor
|
||||
var chainLen uint16
|
||||
for range len(dt.descriptors) {
|
||||
if next == dt.freeHeadIndex {
|
||||
return fmt.Errorf("%w: must not be part of the free chain", ErrInvalidDescriptorChain)
|
||||
}
|
||||
|
||||
desc := &dt.descriptors[next]
|
||||
chainLen++
|
||||
|
||||
// Set the length of all unused descriptors back to zero.
|
||||
desc.length = 0
|
||||
|
||||
// Unset all flags except the next flag.
|
||||
desc.flags &= descriptorFlagHasNext
|
||||
|
||||
// Is this the tail of the chain?
|
||||
if desc.flags&descriptorFlagHasNext == 0 {
|
||||
tailDesc = desc
|
||||
break
|
||||
}
|
||||
|
||||
// Detect loops.
|
||||
if desc.next == head {
|
||||
return fmt.Errorf("%w: contains a loop", ErrInvalidDescriptorChain)
|
||||
}
|
||||
|
||||
next = desc.next
|
||||
}
|
||||
if tailDesc == nil {
|
||||
// A descriptor chain longer than the queue size but without loops
|
||||
// should be impossible.
|
||||
panic(fmt.Sprintf("could not find a tail for descriptor chain starting at %d", head))
|
||||
}
|
||||
|
||||
// The tail descriptor does not have the next flag set, but when it comes
|
||||
// back into the free chain, it should have.
|
||||
tailDesc.flags = descriptorFlagHasNext
|
||||
|
||||
if dt.freeHeadIndex == noFreeHead {
|
||||
// The whole free chain was used up, so we turn this returned descriptor
|
||||
// chain into the new free chain by completing the circle and using its
|
||||
// head.
|
||||
tailDesc.next = head
|
||||
dt.freeHeadIndex = head
|
||||
} else {
|
||||
// Attach the returned chain at the beginning of the free chain but
|
||||
// right after the free chain head.
|
||||
freeHeadDesc := &dt.descriptors[dt.freeHeadIndex]
|
||||
tailDesc.next = freeHeadDesc.next
|
||||
freeHeadDesc.next = head
|
||||
}
|
||||
|
||||
dt.freeNum += chainLen
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkUnusedDescriptorLength asserts that the length of an unused descriptor
|
||||
// is zero, as it should be.
|
||||
// This is not a requirement by the virtio spec but rather a thing we do to
|
||||
// notice when our algorithm goes sideways.
|
||||
func checkUnusedDescriptorLength(index uint16, desc *Descriptor) {
|
||||
if desc.length != 0 {
|
||||
panic(fmt.Sprintf("descriptor %d should be unused but has a non-zero length", index))
|
||||
}
|
||||
}
|
||||
@@ -1,407 +0,0 @@
|
||||
package virtqueue
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
"unsafe"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDescriptorTable_InitializeDescriptors(t *testing.T) {
|
||||
const queueSize = 32
|
||||
|
||||
dt := DescriptorTable{
|
||||
descriptors: make([]Descriptor, queueSize),
|
||||
}
|
||||
|
||||
assert.NoError(t, dt.initializeDescriptors())
|
||||
t.Cleanup(func() {
|
||||
assert.NoError(t, dt.releaseBuffers())
|
||||
})
|
||||
|
||||
for i, descriptor := range dt.descriptors {
|
||||
assert.NotZero(t, descriptor.address)
|
||||
assert.Zero(t, descriptor.length)
|
||||
assert.EqualValues(t, descriptorFlagHasNext, descriptor.flags)
|
||||
assert.EqualValues(t, (i+1)%queueSize, descriptor.next)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDescriptorTable_DescriptorChains(t *testing.T) {
|
||||
// Use a very short queue size to not make this test overly verbose.
|
||||
const queueSize = 8
|
||||
|
||||
pageSize := os.Getpagesize() * 2
|
||||
|
||||
// Initialize descriptor table.
|
||||
dt := DescriptorTable{
|
||||
descriptors: make([]Descriptor, queueSize),
|
||||
}
|
||||
assert.NoError(t, dt.initializeDescriptors())
|
||||
t.Cleanup(func() {
|
||||
assert.NoError(t, dt.releaseBuffers())
|
||||
})
|
||||
|
||||
// Some utilities for easier checking if the descriptor table looks as
|
||||
// expected.
|
||||
type desc struct {
|
||||
buffer []byte
|
||||
flags descriptorFlag
|
||||
next uint16
|
||||
}
|
||||
assertDescriptorTable := func(expected [queueSize]desc) {
|
||||
for i := 0; i < queueSize; i++ {
|
||||
actualDesc := &dt.descriptors[i]
|
||||
expectedDesc := &expected[i]
|
||||
assert.Equal(t, uint32(len(expectedDesc.buffer)), actualDesc.length)
|
||||
if len(expectedDesc.buffer) > 0 {
|
||||
//goland:noinspection GoVetUnsafePointer
|
||||
assert.EqualValues(t,
|
||||
unsafe.Slice((*byte)(unsafe.Pointer(actualDesc.address)), actualDesc.length),
|
||||
expectedDesc.buffer)
|
||||
}
|
||||
assert.Equal(t, expectedDesc.flags, actualDesc.flags)
|
||||
if expectedDesc.flags&descriptorFlagHasNext != 0 {
|
||||
assert.Equal(t, expectedDesc.next, actualDesc.next)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Initial state: All descriptors are in the free chain.
|
||||
assert.Equal(t, uint16(0), dt.freeHeadIndex)
|
||||
assert.Equal(t, uint16(8), dt.freeNum)
|
||||
assertDescriptorTable([queueSize]desc{
|
||||
{
|
||||
// Free head.
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 1,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 2,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 3,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 4,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 5,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 6,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 7,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 0,
|
||||
},
|
||||
})
|
||||
|
||||
// Create the first chain.
|
||||
firstChain, err := dt.createDescriptorChain([][]byte{
|
||||
makeTestBuffer(t, 26),
|
||||
makeTestBuffer(t, 256),
|
||||
}, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint16(1), firstChain)
|
||||
|
||||
// Now there should be a new chain next to the free chain.
|
||||
assert.Equal(t, uint16(0), dt.freeHeadIndex)
|
||||
assert.Equal(t, uint16(5), dt.freeNum)
|
||||
assertDescriptorTable([queueSize]desc{
|
||||
{
|
||||
// Free head.
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 4,
|
||||
},
|
||||
{
|
||||
// Head of first chain.
|
||||
buffer: makeTestBuffer(t, 26),
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 2,
|
||||
},
|
||||
{
|
||||
buffer: makeTestBuffer(t, 256),
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 3,
|
||||
},
|
||||
{
|
||||
// Tail of first chain.
|
||||
buffer: make([]byte, pageSize),
|
||||
flags: descriptorFlagWritable,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 5,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 6,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 7,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 0,
|
||||
},
|
||||
})
|
||||
|
||||
// Create a second chain with only a single in buffer.
|
||||
secondChain, err := dt.createDescriptorChain(nil, 1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint16(4), secondChain)
|
||||
|
||||
// Now there should be two chains next to the free chain.
|
||||
assert.Equal(t, uint16(0), dt.freeHeadIndex)
|
||||
assert.Equal(t, uint16(4), dt.freeNum)
|
||||
assertDescriptorTable([queueSize]desc{
|
||||
{
|
||||
// Free head.
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 5,
|
||||
},
|
||||
{
|
||||
// Head of the first chain.
|
||||
buffer: makeTestBuffer(t, 26),
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 2,
|
||||
},
|
||||
{
|
||||
buffer: makeTestBuffer(t, 256),
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 3,
|
||||
},
|
||||
{
|
||||
// Tail of the first chain.
|
||||
buffer: make([]byte, pageSize),
|
||||
flags: descriptorFlagWritable,
|
||||
},
|
||||
{
|
||||
// Head and tail of the second chain.
|
||||
buffer: make([]byte, pageSize),
|
||||
flags: descriptorFlagWritable,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 6,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 7,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 0,
|
||||
},
|
||||
})
|
||||
|
||||
// Create a third chain taking up all remaining descriptors.
|
||||
thirdChain, err := dt.createDescriptorChain([][]byte{
|
||||
makeTestBuffer(t, 42),
|
||||
makeTestBuffer(t, 96),
|
||||
makeTestBuffer(t, 33),
|
||||
makeTestBuffer(t, 222),
|
||||
}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint16(5), thirdChain)
|
||||
|
||||
// Now there should be three chains and no free chain.
|
||||
assert.Equal(t, noFreeHead, dt.freeHeadIndex)
|
||||
assert.Equal(t, uint16(0), dt.freeNum)
|
||||
assertDescriptorTable([queueSize]desc{
|
||||
{
|
||||
// Tail of the third chain.
|
||||
buffer: makeTestBuffer(t, 222),
|
||||
},
|
||||
{
|
||||
// Head of the first chain.
|
||||
buffer: makeTestBuffer(t, 26),
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 2,
|
||||
},
|
||||
{
|
||||
buffer: makeTestBuffer(t, 256),
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 3,
|
||||
},
|
||||
{
|
||||
// Tail of the first chain.
|
||||
buffer: make([]byte, pageSize),
|
||||
flags: descriptorFlagWritable,
|
||||
},
|
||||
{
|
||||
// Head and tail of the second chain.
|
||||
buffer: make([]byte, pageSize),
|
||||
flags: descriptorFlagWritable,
|
||||
},
|
||||
{
|
||||
// Head of the third chain.
|
||||
buffer: makeTestBuffer(t, 42),
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 6,
|
||||
},
|
||||
{
|
||||
buffer: makeTestBuffer(t, 96),
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 7,
|
||||
},
|
||||
{
|
||||
buffer: makeTestBuffer(t, 33),
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 0,
|
||||
},
|
||||
})
|
||||
|
||||
// Free the third chain.
|
||||
assert.NoError(t, dt.freeDescriptorChain(thirdChain))
|
||||
|
||||
// Now there should be two chains and a free chain again.
|
||||
assert.Equal(t, uint16(5), dt.freeHeadIndex)
|
||||
assert.Equal(t, uint16(4), dt.freeNum)
|
||||
assertDescriptorTable([queueSize]desc{
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 5,
|
||||
},
|
||||
{
|
||||
// Head of the first chain.
|
||||
buffer: makeTestBuffer(t, 26),
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 2,
|
||||
},
|
||||
{
|
||||
buffer: makeTestBuffer(t, 256),
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 3,
|
||||
},
|
||||
{
|
||||
// Tail of the first chain.
|
||||
buffer: make([]byte, pageSize),
|
||||
flags: descriptorFlagWritable,
|
||||
},
|
||||
{
|
||||
// Head and tail of the second chain.
|
||||
buffer: make([]byte, pageSize),
|
||||
flags: descriptorFlagWritable,
|
||||
},
|
||||
{
|
||||
// Free head.
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 6,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 7,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 0,
|
||||
},
|
||||
})
|
||||
|
||||
// Free the first chain.
|
||||
assert.NoError(t, dt.freeDescriptorChain(firstChain))
|
||||
|
||||
// Now there should be only a single chain next to the free chain.
|
||||
assert.Equal(t, uint16(5), dt.freeHeadIndex)
|
||||
assert.Equal(t, uint16(7), dt.freeNum)
|
||||
assertDescriptorTable([queueSize]desc{
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 5,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 2,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 3,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 6,
|
||||
},
|
||||
{
|
||||
// Head and tail of the second chain.
|
||||
buffer: make([]byte, pageSize),
|
||||
flags: descriptorFlagWritable,
|
||||
},
|
||||
{
|
||||
// Free head.
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 1,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 7,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 0,
|
||||
},
|
||||
})
|
||||
|
||||
// Free the second chain.
|
||||
assert.NoError(t, dt.freeDescriptorChain(secondChain))
|
||||
|
||||
// Now all descriptors should be in the free chain again.
|
||||
assert.Equal(t, uint16(5), dt.freeHeadIndex)
|
||||
assert.Equal(t, uint16(8), dt.freeNum)
|
||||
assertDescriptorTable([queueSize]desc{
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 5,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 2,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 3,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 6,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 1,
|
||||
},
|
||||
{
|
||||
// Free head.
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 4,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 7,
|
||||
},
|
||||
{
|
||||
flags: descriptorFlagHasNext,
|
||||
next: 0,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func makeTestBuffer(t *testing.T, length int) []byte {
|
||||
t.Helper()
|
||||
buf := make([]byte, length)
|
||||
for i := 0; i < length; i++ {
|
||||
buf[i] = byte(length - i)
|
||||
}
|
||||
return buf
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
// Package virtqueue implements the driver-side for a virtio queue as described
|
||||
// in the specification:
|
||||
// https://docs.oasis-open.org/virtio/virtio/v1.2/csd01/virtio-v1.2-csd01.html#x1-270006
|
||||
// This package does not make assumptions about the device that consumes the
|
||||
// queue. It rather just allocates the queue structures in memory and provides
|
||||
// methods to interact with it.
|
||||
package virtqueue
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user