mirror of
https://github.com/slackhq/nebula.git
synced 2025-11-22 00:15:37 +01:00
Compare commits
131 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a92056a7db | ||
|
|
4eb1da0958 | ||
|
|
50b24c102e | ||
|
|
c0130f8161 | ||
|
|
f19a28645e | ||
|
|
fd1906b16f | ||
|
|
d6e4b88bb5 | ||
|
|
18f69af455 | ||
|
|
aa18d7fa4f | ||
|
|
b5c3486796 | ||
|
|
f39bfbb7fa | ||
|
|
4f4941e187 | ||
|
|
5f17db5dfa | ||
|
|
f31bab5f1a | ||
|
|
9cd944d320 | ||
|
|
f7db0eb5cc | ||
|
|
7e7d5e00ca | ||
|
|
24f336ec56 | ||
|
|
d7f52dec41 | ||
|
|
e54f9dd206 | ||
|
|
df78158cfa | ||
|
|
8b55caa15e | ||
|
|
7ed9f2a688 | ||
|
|
3aca576b07 | ||
|
|
a99618e95c | ||
|
|
8e94eb974e | ||
|
|
41e2e1de02 | ||
|
|
d95fb4a314 | ||
|
|
cdcea00669 | ||
|
|
9bd92a7fc2 | ||
|
|
a5a07cc760 | ||
|
|
c1711bc9c5 | ||
|
|
7efa750aef | ||
|
|
a390125935 | ||
|
|
bbb15f8cb1 | ||
|
|
8b68a08723 | ||
|
|
f8fb9759e9 | ||
|
|
1f1d660200 | ||
|
|
279265058f | ||
|
|
2a778de07e | ||
|
|
2affd371e3 | ||
|
|
cc8b3cc961 | ||
|
|
f346cf4109 | ||
|
|
8f44f22c37 | ||
|
|
8822f1366c | ||
|
|
e3f5a129c1 | ||
|
|
0f0534d739 | ||
|
|
c5a403b7a8 | ||
|
|
f23d328561 | ||
|
|
a977ee653d | ||
|
|
1f83d1758d | ||
|
|
3210198276 | ||
|
|
0cef634635 | ||
|
|
637dc18bf8 | ||
|
|
ea36949d8a | ||
|
|
0564d0a2cf | ||
|
|
b22ba6eb49 | ||
|
|
3a221812f6 | ||
|
|
927ff4cc03 | ||
|
|
e5945a60aa | ||
|
|
072edd56b3 | ||
|
|
beb5f6bddc | ||
|
|
8be9792059 | ||
|
|
af2fc48378 | ||
|
|
1d2f95e718 | ||
|
|
3a8743d511 | ||
|
|
0209402942 | ||
|
|
fb55f5b762 | ||
|
|
01cddb8013 | ||
|
|
1083279a45 | ||
|
|
fe16ea566d | ||
|
|
3356e03d85 | ||
|
|
f41db52560 | ||
|
|
5181cb0474 | ||
|
|
a44e1b8b05 | ||
|
|
276978377a | ||
|
|
777eb96aea | ||
|
|
0912ef14f4 | ||
|
|
77a8ce1712 | ||
|
|
87b628ba24 | ||
|
|
50d6a1e8ca | ||
|
|
e78fe0b9ef | ||
|
|
5fccbb8676 | ||
|
|
c289c7a7ca | ||
|
|
e3fbfbfd4d | ||
|
|
282ca4368e | ||
|
|
280fa026ea | ||
|
|
dbdb48f182 | ||
|
|
f7e392995a | ||
|
|
d271df8da8 | ||
|
|
eea5e6a5df | ||
|
|
790268a176 | ||
|
|
06b480e177 | ||
|
|
076ebc6c6e | ||
|
|
7edcf620c0 | ||
|
|
5a131b2975 | ||
|
|
223cc6e660 | ||
|
|
5671c6607c | ||
|
|
7ecafbe61d | ||
|
|
546eb3bfbc | ||
|
|
7364d99e34 | ||
|
|
83b6dc7b16 | ||
|
|
3d0da7c859 | ||
|
|
ed00f5d530 | ||
|
|
38e56a4858 | ||
|
|
fce93ccb54 | ||
|
|
0d715effbc | ||
|
|
0c003b64f1 | ||
|
|
14d0106716 | ||
|
|
959b015b3b | ||
|
|
0bffa76b5e | ||
|
|
03e70210a5 | ||
|
|
9c6592b159 | ||
|
|
e5af94e27a | ||
|
|
96f51f78ea | ||
|
|
a10baeee92 | ||
|
|
52c9e360e7 | ||
|
|
8caaff7109 | ||
|
|
1e3c155896 | ||
|
|
f5db03c834 | ||
|
|
c5ce945852 | ||
|
|
7e380bde7e | ||
|
|
a3e59a38ef | ||
|
|
8ba5d64dbc | ||
|
|
3bbf5f4e67 | ||
|
|
928731acfe | ||
|
|
57eb80e9fb | ||
|
|
96f4dcaab8 | ||
|
|
6d8c5f437c | ||
|
|
165b671e70 | ||
|
|
6be0bad68a |
20
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
20
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -14,7 +14,7 @@ body:
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: What version of `nebula` are you using?
|
||||
label: What version of `nebula` are you using? (`nebula -version`)
|
||||
placeholder: 0.0.0
|
||||
validations:
|
||||
required: true
|
||||
@@ -41,10 +41,17 @@ body:
|
||||
attributes:
|
||||
label: Logs from affected hosts
|
||||
description: |
|
||||
Provide logs from all affected hosts during the time of the issue.
|
||||
Please provide logs from ALL affected hosts during the time of the issue. If you do not provide logs we will be unable to assist you!
|
||||
|
||||
[Learn how to find Nebula logs here.](https://nebula.defined.net/docs/guides/viewing-nebula-logs/)
|
||||
|
||||
Improve formatting by using <code>```</code> at the beginning and end of each log block.
|
||||
value: |
|
||||
```
|
||||
|
||||
```
|
||||
validations:
|
||||
required: false
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: configs
|
||||
@@ -52,6 +59,11 @@ body:
|
||||
label: Config files from affected hosts
|
||||
description: |
|
||||
Provide config files for all affected hosts.
|
||||
|
||||
Improve formatting by using <code>```</code> at the beginning and end of each config file.
|
||||
value: |
|
||||
```
|
||||
|
||||
```
|
||||
validations:
|
||||
required: false
|
||||
required: true
|
||||
|
||||
22
.github/dependabot.yml
vendored
Normal file
22
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
groups:
|
||||
golang-x-dependencies:
|
||||
patterns:
|
||||
- "golang.org/x/*"
|
||||
zx2c4-dependencies:
|
||||
patterns:
|
||||
- "golang.zx2c4.com/*"
|
||||
protobuf-dependencies:
|
||||
patterns:
|
||||
- "github.com/golang/protobuf"
|
||||
- "google.golang.org/protobuf"
|
||||
17
.github/workflows/gofmt.yml
vendored
17
.github/workflows/gofmt.yml
vendored
@@ -14,21 +14,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Set up Go 1.20
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.20"
|
||||
id: go
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- uses: actions/cache@v2
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-gofmt1.20-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-gofmt1.20-
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
|
||||
- name: Install goimports
|
||||
run: |
|
||||
|
||||
297
.github/workflows/release.yml
vendored
297
.github/workflows/release.yml
vendored
@@ -7,25 +7,24 @@ name: Create release and upload binaries
|
||||
|
||||
jobs:
|
||||
build-linux:
|
||||
name: Build Linux All
|
||||
name: Build Linux/BSD All
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go 1.20
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.20"
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" release-linux release-freebsd
|
||||
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" release-linux release-freebsd release-openbsd release-netbsd
|
||||
mkdir release
|
||||
mv build/*.tar.gz release
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: linux-latest
|
||||
path: release
|
||||
@@ -34,13 +33,12 @@ jobs:
|
||||
name: Build Windows
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- name: Set up Go 1.20
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.20"
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
@@ -57,7 +55,7 @@ jobs:
|
||||
mv dist\windows\wintun build\dist\windows\
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: windows-latest
|
||||
path: build
|
||||
@@ -68,17 +66,16 @@ jobs:
|
||||
HAS_SIGNING_CREDS: ${{ secrets.AC_USERNAME != '' }}
|
||||
runs-on: macos-11
|
||||
steps:
|
||||
- name: Set up Go 1.20
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.20"
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
|
||||
- name: Import certificates
|
||||
if: env.HAS_SIGNING_CREDS == 'true'
|
||||
uses: Apple-Actions/import-codesign-certs@v1
|
||||
uses: Apple-Actions/import-codesign-certs@v2
|
||||
with:
|
||||
p12-file-base64: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_P12_BASE64 }}
|
||||
p12-password: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_PASSWORD }}
|
||||
@@ -107,22 +104,72 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: darwin-latest
|
||||
path: ./release/*
|
||||
|
||||
build-docker:
|
||||
name: Create and Upload Docker Images
|
||||
# Technically we only need build-linux to succeed, but if any platforms fail we'll
|
||||
# want to investigate and restart the build
|
||||
needs: [build-linux, build-darwin, build-windows]
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
HAS_DOCKER_CREDS: ${{ vars.DOCKERHUB_USERNAME != '' && secrets.DOCKERHUB_TOKEN != '' }}
|
||||
# XXX It's not possible to write a conditional here, so instead we do it on every step
|
||||
#if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
||||
steps:
|
||||
# Be sure to checkout the code before downloading artifacts, or they will
|
||||
# be overwritten
|
||||
- name: Checkout code
|
||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download artifacts
|
||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: linux-latest
|
||||
path: artifacts
|
||||
|
||||
- name: Login to Docker Hub
|
||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build and push images
|
||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
||||
env:
|
||||
DOCKER_IMAGE_REPO: ${{ vars.DOCKER_IMAGE_REPO || 'nebulaoss/nebula' }}
|
||||
DOCKER_IMAGE_TAG: ${{ vars.DOCKER_IMAGE_TAG || 'latest' }}
|
||||
run: |
|
||||
mkdir -p build/linux-{amd64,arm64}
|
||||
tar -zxvf artifacts/nebula-linux-amd64.tar.gz -C build/linux-amd64/
|
||||
tar -zxvf artifacts/nebula-linux-arm64.tar.gz -C build/linux-arm64/
|
||||
docker buildx build . --push -f docker/Dockerfile --platform linux/amd64,linux/arm64 --tag "${DOCKER_IMAGE_REPO}:${DOCKER_IMAGE_TAG}" --tag "${DOCKER_IMAGE_REPO}:${GITHUB_REF#refs/tags/v}"
|
||||
|
||||
release:
|
||||
name: Create and Upload Release
|
||||
needs: [build-linux, build-darwin, build-windows]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Zip Windows
|
||||
run: |
|
||||
cd windows-latest
|
||||
cd artifacts/windows-latest
|
||||
cp windows-amd64/* .
|
||||
zip -r nebula-windows-amd64.zip nebula.exe nebula-cert.exe dist
|
||||
cp windows-arm64/* .
|
||||
@@ -130,6 +177,7 @@ jobs:
|
||||
|
||||
- name: Create sha256sum
|
||||
run: |
|
||||
cd artifacts
|
||||
for dir in linux-latest darwin-latest windows-latest
|
||||
do
|
||||
(
|
||||
@@ -159,195 +207,12 @@ jobs:
|
||||
|
||||
- name: Create Release
|
||||
id: create_release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{ github.ref }}
|
||||
release_name: Release ${{ github.ref }}
|
||||
draft: false
|
||||
prerelease: false
|
||||
|
||||
##
|
||||
## Upload assets (I wish we could just upload the whole folder at once...
|
||||
##
|
||||
|
||||
- name: Upload SHASUM256.txt
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./SHASUM256.txt
|
||||
asset_name: SHASUM256.txt
|
||||
asset_content_type: text/plain
|
||||
|
||||
- name: Upload darwin zip
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./darwin-latest/nebula-darwin.zip
|
||||
asset_name: nebula-darwin.zip
|
||||
asset_content_type: application/zip
|
||||
|
||||
- name: Upload windows-amd64
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./windows-latest/nebula-windows-amd64.zip
|
||||
asset_name: nebula-windows-amd64.zip
|
||||
asset_content_type: application/zip
|
||||
|
||||
- name: Upload windows-arm64
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./windows-latest/nebula-windows-arm64.zip
|
||||
asset_name: nebula-windows-arm64.zip
|
||||
asset_content_type: application/zip
|
||||
|
||||
- name: Upload linux-amd64
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-amd64.tar.gz
|
||||
asset_name: nebula-linux-amd64.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-386
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-386.tar.gz
|
||||
asset_name: nebula-linux-386.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-ppc64le
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-ppc64le.tar.gz
|
||||
asset_name: nebula-linux-ppc64le.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-arm-5
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-arm-5.tar.gz
|
||||
asset_name: nebula-linux-arm-5.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-arm-6
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-arm-6.tar.gz
|
||||
asset_name: nebula-linux-arm-6.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-arm-7
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-arm-7.tar.gz
|
||||
asset_name: nebula-linux-arm-7.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-arm64
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-arm64.tar.gz
|
||||
asset_name: nebula-linux-arm64.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-mips
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-mips.tar.gz
|
||||
asset_name: nebula-linux-mips.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-mipsle
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-mipsle.tar.gz
|
||||
asset_name: nebula-linux-mipsle.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-mips64
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-mips64.tar.gz
|
||||
asset_name: nebula-linux-mips64.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-mips64le
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-mips64le.tar.gz
|
||||
asset_name: nebula-linux-mips64le.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-mips-softfloat
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-mips-softfloat.tar.gz
|
||||
asset_name: nebula-linux-mips-softfloat.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-riscv64
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-riscv64.tar.gz
|
||||
asset_name: nebula-linux-riscv64.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload freebsd-amd64
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-freebsd-amd64.tar.gz
|
||||
asset_name: nebula-freebsd-amd64.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
run: |
|
||||
cd artifacts
|
||||
gh release create \
|
||||
--verify-tag \
|
||||
--title "Release ${{ github.ref_name }}" \
|
||||
"${{ github.ref_name }}" \
|
||||
SHASUM256.txt *-latest/*.zip *-latest/*.tar.gz
|
||||
|
||||
48
.github/workflows/smoke-extra.yml
vendored
Normal file
48
.github/workflows/smoke-extra.yml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
name: smoke-extra
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
types: [opened, synchronize, labeled, reopened]
|
||||
paths:
|
||||
- '.github/workflows/smoke**'
|
||||
- '**Makefile'
|
||||
- '**.go'
|
||||
- '**.proto'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
jobs:
|
||||
|
||||
smoke-extra:
|
||||
if: github.ref == 'refs/heads/master' || contains(github.event.pull_request.labels.*.name, 'smoke-test-extra')
|
||||
name: Run extra smoke tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
|
||||
- name: install vagrant
|
||||
run: sudo apt-get update && sudo apt-get install -y vagrant virtualbox
|
||||
|
||||
- name: freebsd-amd64
|
||||
run: make smoke-vagrant/freebsd-amd64
|
||||
|
||||
- name: openbsd-amd64
|
||||
run: make smoke-vagrant/openbsd-amd64
|
||||
|
||||
- name: netbsd-amd64
|
||||
run: make smoke-vagrant/netbsd-amd64
|
||||
|
||||
- name: linux-386
|
||||
run: make smoke-vagrant/linux-386
|
||||
|
||||
- name: linux-amd64-ipv6disable
|
||||
run: make smoke-vagrant/linux-amd64-ipv6disable
|
||||
|
||||
timeout-minutes: 30
|
||||
19
.github/workflows/smoke.yml
vendored
19
.github/workflows/smoke.yml
vendored
@@ -18,24 +18,15 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Set up Go 1.20
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.20"
|
||||
id: go
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- uses: actions/cache@v2
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go1.20-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go1.20-
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
|
||||
- name: build
|
||||
run: make bin-docker
|
||||
run: make bin-docker CGO_ENABLED=1 BUILD_ARGS=-race
|
||||
|
||||
- name: setup docker image
|
||||
working-directory: ./.github/workflows/smoke
|
||||
|
||||
2
.github/workflows/smoke/build-relay.sh
vendored
2
.github/workflows/smoke/build-relay.sh
vendored
@@ -41,4 +41,4 @@ EOF
|
||||
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
|
||||
)
|
||||
|
||||
sudo docker build -t nebula:smoke-relay .
|
||||
docker build -t nebula:smoke-relay .
|
||||
|
||||
7
.github/workflows/smoke/build.sh
vendored
7
.github/workflows/smoke/build.sh
vendored
@@ -11,6 +11,11 @@ mkdir ./build
|
||||
cp ../../../../build/linux-amd64/nebula .
|
||||
cp ../../../../build/linux-amd64/nebula-cert .
|
||||
|
||||
if [ "$1" ]
|
||||
then
|
||||
cp "../../../../build/$1/nebula" "$1-nebula"
|
||||
fi
|
||||
|
||||
HOST="lighthouse1" \
|
||||
AM_LIGHTHOUSE=true \
|
||||
../genconfig.sh >lighthouse1.yml
|
||||
@@ -36,4 +41,4 @@ mkdir ./build
|
||||
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
|
||||
)
|
||||
|
||||
sudo docker build -t "nebula:${NAME:-smoke}" .
|
||||
docker build -t "nebula:${NAME:-smoke}" .
|
||||
|
||||
2
.github/workflows/smoke/genconfig.sh
vendored
2
.github/workflows/smoke/genconfig.sh
vendored
@@ -47,7 +47,7 @@ listen:
|
||||
port: ${LISTEN_PORT:-4242}
|
||||
|
||||
tun:
|
||||
dev: ${TUN_DEV:-nebula1}
|
||||
dev: ${TUN_DEV:-tun0}
|
||||
|
||||
firewall:
|
||||
inbound_action: reject
|
||||
|
||||
52
.github/workflows/smoke/smoke-relay.sh
vendored
52
.github/workflows/smoke/smoke-relay.sh
vendored
@@ -14,24 +14,24 @@ cleanup() {
|
||||
set +e
|
||||
if [ "$(jobs -r)" ]
|
||||
then
|
||||
sudo docker kill lighthouse1 host2 host3 host4
|
||||
docker kill lighthouse1 host2 host3 host4
|
||||
fi
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
sudo docker run --name lighthouse1 --rm nebula:smoke-relay -config lighthouse1.yml -test
|
||||
sudo docker run --name host2 --rm nebula:smoke-relay -config host2.yml -test
|
||||
sudo docker run --name host3 --rm nebula:smoke-relay -config host3.yml -test
|
||||
sudo docker run --name host4 --rm nebula:smoke-relay -config host4.yml -test
|
||||
docker run --name lighthouse1 --rm nebula:smoke-relay -config lighthouse1.yml -test
|
||||
docker run --name host2 --rm nebula:smoke-relay -config host2.yml -test
|
||||
docker run --name host3 --rm nebula:smoke-relay -config host3.yml -test
|
||||
docker run --name host4 --rm nebula:smoke-relay -config host4.yml -test
|
||||
|
||||
sudo docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
||||
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
||||
sleep 1
|
||||
sudo docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
||||
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
||||
sleep 1
|
||||
sudo docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
||||
docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
||||
sleep 1
|
||||
sudo docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
|
||||
docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
|
||||
sleep 1
|
||||
|
||||
set +x
|
||||
@@ -39,44 +39,44 @@ echo
|
||||
echo " *** Testing ping from lighthouse1"
|
||||
echo
|
||||
set -x
|
||||
sudo docker exec lighthouse1 ping -c1 192.168.100.2
|
||||
sudo docker exec lighthouse1 ping -c1 192.168.100.3
|
||||
sudo docker exec lighthouse1 ping -c1 192.168.100.4
|
||||
docker exec lighthouse1 ping -c1 192.168.100.2
|
||||
docker exec lighthouse1 ping -c1 192.168.100.3
|
||||
docker exec lighthouse1 ping -c1 192.168.100.4
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host2"
|
||||
echo
|
||||
set -x
|
||||
sudo docker exec host2 ping -c1 192.168.100.1
|
||||
docker exec host2 ping -c1 192.168.100.1
|
||||
# Should fail because no relay configured in this direction
|
||||
! sudo docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
! sudo docker exec host2 ping -c1 192.168.100.4 -w5 || exit 1
|
||||
! docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
! docker exec host2 ping -c1 192.168.100.4 -w5 || exit 1
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host3"
|
||||
echo
|
||||
set -x
|
||||
sudo docker exec host3 ping -c1 192.168.100.1
|
||||
sudo docker exec host3 ping -c1 192.168.100.2
|
||||
sudo docker exec host3 ping -c1 192.168.100.4
|
||||
docker exec host3 ping -c1 192.168.100.1
|
||||
docker exec host3 ping -c1 192.168.100.2
|
||||
docker exec host3 ping -c1 192.168.100.4
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host4"
|
||||
echo
|
||||
set -x
|
||||
sudo docker exec host4 ping -c1 192.168.100.1
|
||||
docker exec host4 ping -c1 192.168.100.1
|
||||
# Should fail because relays not allowed
|
||||
! sudo docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
|
||||
sudo docker exec host4 ping -c1 192.168.100.3
|
||||
! docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
|
||||
docker exec host4 ping -c1 192.168.100.3
|
||||
|
||||
sudo docker exec host4 sh -c 'kill 1'
|
||||
sudo docker exec host3 sh -c 'kill 1'
|
||||
sudo docker exec host2 sh -c 'kill 1'
|
||||
sudo docker exec lighthouse1 sh -c 'kill 1'
|
||||
sleep 1
|
||||
docker exec host4 sh -c 'kill 1'
|
||||
docker exec host3 sh -c 'kill 1'
|
||||
docker exec host2 sh -c 'kill 1'
|
||||
docker exec lighthouse1 sh -c 'kill 1'
|
||||
sleep 5
|
||||
|
||||
if [ "$(jobs -r)" ]
|
||||
then
|
||||
|
||||
105
.github/workflows/smoke/smoke-vagrant.sh
vendored
Executable file
105
.github/workflows/smoke/smoke-vagrant.sh
vendored
Executable file
@@ -0,0 +1,105 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e -x
|
||||
|
||||
set -o pipefail
|
||||
|
||||
export VAGRANT_CWD="$PWD/vagrant-$1"
|
||||
|
||||
mkdir -p logs
|
||||
|
||||
cleanup() {
|
||||
echo
|
||||
echo " *** cleanup"
|
||||
echo
|
||||
|
||||
set +e
|
||||
if [ "$(jobs -r)" ]
|
||||
then
|
||||
docker kill lighthouse1 host2
|
||||
fi
|
||||
vagrant destroy -f
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
CONTAINER="nebula:${NAME:-smoke}"
|
||||
|
||||
docker run --name lighthouse1 --rm "$CONTAINER" -config lighthouse1.yml -test
|
||||
docker run --name host2 --rm "$CONTAINER" -config host2.yml -test
|
||||
|
||||
vagrant up
|
||||
vagrant ssh -c "cd /nebula && /nebula/$1-nebula -config host3.yml -test"
|
||||
|
||||
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
||||
sleep 1
|
||||
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
||||
sleep 1
|
||||
vagrant ssh -c "cd /nebula && sudo sh -c 'echo \$\$ >/nebula/pid && exec /nebula/$1-nebula -config host3.yml'" &
|
||||
sleep 15
|
||||
|
||||
# grab tcpdump pcaps for debugging
|
||||
docker exec lighthouse1 tcpdump -i nebula1 -q -w - -U 2>logs/lighthouse1.inside.log >logs/lighthouse1.inside.pcap &
|
||||
docker exec lighthouse1 tcpdump -i eth0 -q -w - -U 2>logs/lighthouse1.outside.log >logs/lighthouse1.outside.pcap &
|
||||
docker exec host2 tcpdump -i nebula1 -q -w - -U 2>logs/host2.inside.log >logs/host2.inside.pcap &
|
||||
docker exec host2 tcpdump -i eth0 -q -w - -U 2>logs/host2.outside.log >logs/host2.outside.pcap &
|
||||
# vagrant ssh -c "tcpdump -i nebula1 -q -w - -U" 2>logs/host3.inside.log >logs/host3.inside.pcap &
|
||||
# vagrant ssh -c "tcpdump -i eth0 -q -w - -U" 2>logs/host3.outside.log >logs/host3.outside.pcap &
|
||||
|
||||
docker exec host2 ncat -nklv 0.0.0.0 2000 &
|
||||
vagrant ssh -c "ncat -nklv 0.0.0.0 2000" &
|
||||
#docker exec host2 ncat -e '/usr/bin/echo host2' -nkluv 0.0.0.0 3000 &
|
||||
#vagrant ssh -c "ncat -e '/usr/bin/echo host3' -nkluv 0.0.0.0 3000" &
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from lighthouse1"
|
||||
echo
|
||||
set -x
|
||||
docker exec lighthouse1 ping -c1 192.168.100.2
|
||||
docker exec lighthouse1 ping -c1 192.168.100.3
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host2"
|
||||
echo
|
||||
set -x
|
||||
docker exec host2 ping -c1 192.168.100.1
|
||||
# Should fail because not allowed by host3 inbound firewall
|
||||
! docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ncat from host2"
|
||||
echo
|
||||
set -x
|
||||
# Should fail because not allowed by host3 inbound firewall
|
||||
#! docker exec host2 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
||||
#! docker exec host2 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host3"
|
||||
echo
|
||||
set -x
|
||||
vagrant ssh -c "ping -c1 192.168.100.1"
|
||||
vagrant ssh -c "ping -c1 192.168.100.2"
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ncat from host3"
|
||||
echo
|
||||
set -x
|
||||
#vagrant ssh -c "ncat -nzv -w5 192.168.100.2 2000"
|
||||
#vagrant ssh -c "ncat -nzuv -w5 192.168.100.2 3000" | grep -q host2
|
||||
|
||||
vagrant ssh -c "sudo xargs kill </nebula/pid"
|
||||
docker exec host2 sh -c 'kill 1'
|
||||
docker exec lighthouse1 sh -c 'kill 1'
|
||||
sleep 1
|
||||
|
||||
if [ "$(jobs -r)" ]
|
||||
then
|
||||
echo "nebula still running after SIGTERM sent" >&2
|
||||
exit 1
|
||||
fi
|
||||
92
.github/workflows/smoke/smoke.sh
vendored
92
.github/workflows/smoke/smoke.sh
vendored
@@ -14,7 +14,7 @@ cleanup() {
|
||||
set +e
|
||||
if [ "$(jobs -r)" ]
|
||||
then
|
||||
sudo docker kill lighthouse1 host2 host3 host4
|
||||
docker kill lighthouse1 host2 host3 host4
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -22,51 +22,51 @@ trap cleanup EXIT
|
||||
|
||||
CONTAINER="nebula:${NAME:-smoke}"
|
||||
|
||||
sudo docker run --name lighthouse1 --rm "$CONTAINER" -config lighthouse1.yml -test
|
||||
sudo docker run --name host2 --rm "$CONTAINER" -config host2.yml -test
|
||||
sudo docker run --name host3 --rm "$CONTAINER" -config host3.yml -test
|
||||
sudo docker run --name host4 --rm "$CONTAINER" -config host4.yml -test
|
||||
docker run --name lighthouse1 --rm "$CONTAINER" -config lighthouse1.yml -test
|
||||
docker run --name host2 --rm "$CONTAINER" -config host2.yml -test
|
||||
docker run --name host3 --rm "$CONTAINER" -config host3.yml -test
|
||||
docker run --name host4 --rm "$CONTAINER" -config host4.yml -test
|
||||
|
||||
sudo docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
||||
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
||||
sleep 1
|
||||
sudo docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
||||
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
||||
sleep 1
|
||||
sudo docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
||||
docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
||||
sleep 1
|
||||
sudo docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
|
||||
docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
|
||||
sleep 1
|
||||
|
||||
# grab tcpdump pcaps for debugging
|
||||
sudo docker exec lighthouse1 tcpdump -i nebula1 -q -w - -U 2>logs/lighthouse1.inside.log >logs/lighthouse1.inside.pcap &
|
||||
sudo docker exec lighthouse1 tcpdump -i eth0 -q -w - -U 2>logs/lighthouse1.outside.log >logs/lighthouse1.outside.pcap &
|
||||
sudo docker exec host2 tcpdump -i nebula1 -q -w - -U 2>logs/host2.inside.log >logs/host2.inside.pcap &
|
||||
sudo docker exec host2 tcpdump -i eth0 -q -w - -U 2>logs/host2.outside.log >logs/host2.outside.pcap &
|
||||
sudo docker exec host3 tcpdump -i nebula1 -q -w - -U 2>logs/host3.inside.log >logs/host3.inside.pcap &
|
||||
sudo docker exec host3 tcpdump -i eth0 -q -w - -U 2>logs/host3.outside.log >logs/host3.outside.pcap &
|
||||
sudo docker exec host4 tcpdump -i nebula1 -q -w - -U 2>logs/host4.inside.log >logs/host4.inside.pcap &
|
||||
sudo docker exec host4 tcpdump -i eth0 -q -w - -U 2>logs/host4.outside.log >logs/host4.outside.pcap &
|
||||
docker exec lighthouse1 tcpdump -i nebula1 -q -w - -U 2>logs/lighthouse1.inside.log >logs/lighthouse1.inside.pcap &
|
||||
docker exec lighthouse1 tcpdump -i eth0 -q -w - -U 2>logs/lighthouse1.outside.log >logs/lighthouse1.outside.pcap &
|
||||
docker exec host2 tcpdump -i nebula1 -q -w - -U 2>logs/host2.inside.log >logs/host2.inside.pcap &
|
||||
docker exec host2 tcpdump -i eth0 -q -w - -U 2>logs/host2.outside.log >logs/host2.outside.pcap &
|
||||
docker exec host3 tcpdump -i nebula1 -q -w - -U 2>logs/host3.inside.log >logs/host3.inside.pcap &
|
||||
docker exec host3 tcpdump -i eth0 -q -w - -U 2>logs/host3.outside.log >logs/host3.outside.pcap &
|
||||
docker exec host4 tcpdump -i nebula1 -q -w - -U 2>logs/host4.inside.log >logs/host4.inside.pcap &
|
||||
docker exec host4 tcpdump -i eth0 -q -w - -U 2>logs/host4.outside.log >logs/host4.outside.pcap &
|
||||
|
||||
sudo docker exec host2 ncat -nklv 0.0.0.0 2000 &
|
||||
sudo docker exec host3 ncat -nklv 0.0.0.0 2000 &
|
||||
sudo docker exec host2 ncat -e '/usr/bin/echo host2' -nkluv 0.0.0.0 3000 &
|
||||
sudo docker exec host3 ncat -e '/usr/bin/echo host3' -nkluv 0.0.0.0 3000 &
|
||||
docker exec host2 ncat -nklv 0.0.0.0 2000 &
|
||||
docker exec host3 ncat -nklv 0.0.0.0 2000 &
|
||||
docker exec host2 ncat -e '/usr/bin/echo host2' -nkluv 0.0.0.0 3000 &
|
||||
docker exec host3 ncat -e '/usr/bin/echo host3' -nkluv 0.0.0.0 3000 &
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from lighthouse1"
|
||||
echo
|
||||
set -x
|
||||
sudo docker exec lighthouse1 ping -c1 192.168.100.2
|
||||
sudo docker exec lighthouse1 ping -c1 192.168.100.3
|
||||
docker exec lighthouse1 ping -c1 192.168.100.2
|
||||
docker exec lighthouse1 ping -c1 192.168.100.3
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host2"
|
||||
echo
|
||||
set -x
|
||||
sudo docker exec host2 ping -c1 192.168.100.1
|
||||
docker exec host2 ping -c1 192.168.100.1
|
||||
# Should fail because not allowed by host3 inbound firewall
|
||||
! sudo docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
! docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
|
||||
set +x
|
||||
echo
|
||||
@@ -74,34 +74,34 @@ echo " *** Testing ncat from host2"
|
||||
echo
|
||||
set -x
|
||||
# Should fail because not allowed by host3 inbound firewall
|
||||
! sudo docker exec host2 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
||||
! sudo docker exec host2 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
||||
! docker exec host2 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
||||
! docker exec host2 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host3"
|
||||
echo
|
||||
set -x
|
||||
sudo docker exec host3 ping -c1 192.168.100.1
|
||||
sudo docker exec host3 ping -c1 192.168.100.2
|
||||
docker exec host3 ping -c1 192.168.100.1
|
||||
docker exec host3 ping -c1 192.168.100.2
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ncat from host3"
|
||||
echo
|
||||
set -x
|
||||
sudo docker exec host3 ncat -nzv -w5 192.168.100.2 2000
|
||||
sudo docker exec host3 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2
|
||||
docker exec host3 ncat -nzv -w5 192.168.100.2 2000
|
||||
docker exec host3 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host4"
|
||||
echo
|
||||
set -x
|
||||
sudo docker exec host4 ping -c1 192.168.100.1
|
||||
docker exec host4 ping -c1 192.168.100.1
|
||||
# Should fail because not allowed by host4 outbound firewall
|
||||
! sudo docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
|
||||
! sudo docker exec host4 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
! docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
|
||||
! docker exec host4 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
|
||||
set +x
|
||||
echo
|
||||
@@ -109,10 +109,10 @@ echo " *** Testing ncat from host4"
|
||||
echo
|
||||
set -x
|
||||
# Should fail because not allowed by host4 outbound firewall
|
||||
! sudo docker exec host4 ncat -nzv -w5 192.168.100.2 2000 || exit 1
|
||||
! sudo docker exec host4 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
||||
! sudo docker exec host4 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2 || exit 1
|
||||
! sudo docker exec host4 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
||||
! docker exec host4 ncat -nzv -w5 192.168.100.2 2000 || exit 1
|
||||
! docker exec host4 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
||||
! docker exec host4 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2 || exit 1
|
||||
! docker exec host4 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
||||
|
||||
set +x
|
||||
echo
|
||||
@@ -120,16 +120,16 @@ echo " *** Testing conntrack"
|
||||
echo
|
||||
set -x
|
||||
# host2 can ping host3 now that host3 pinged it first
|
||||
sudo docker exec host2 ping -c1 192.168.100.3
|
||||
docker exec host2 ping -c1 192.168.100.3
|
||||
# host4 can ping host2 once conntrack established
|
||||
sudo docker exec host2 ping -c1 192.168.100.4
|
||||
sudo docker exec host4 ping -c1 192.168.100.2
|
||||
docker exec host2 ping -c1 192.168.100.4
|
||||
docker exec host4 ping -c1 192.168.100.2
|
||||
|
||||
sudo docker exec host4 sh -c 'kill 1'
|
||||
sudo docker exec host3 sh -c 'kill 1'
|
||||
sudo docker exec host2 sh -c 'kill 1'
|
||||
sudo docker exec lighthouse1 sh -c 'kill 1'
|
||||
sleep 1
|
||||
docker exec host4 sh -c 'kill 1'
|
||||
docker exec host3 sh -c 'kill 1'
|
||||
docker exec host2 sh -c 'kill 1'
|
||||
docker exec lighthouse1 sh -c 'kill 1'
|
||||
sleep 5
|
||||
|
||||
if [ "$(jobs -r)" ]
|
||||
then
|
||||
|
||||
7
.github/workflows/smoke/vagrant-freebsd-amd64/Vagrantfile
vendored
Normal file
7
.github/workflows/smoke/vagrant-freebsd-amd64/Vagrantfile
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "generic/freebsd14"
|
||||
|
||||
config.vm.synced_folder "../build", "/nebula", type: "rsync"
|
||||
end
|
||||
7
.github/workflows/smoke/vagrant-linux-386/Vagrantfile
vendored
Normal file
7
.github/workflows/smoke/vagrant-linux-386/Vagrantfile
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "ubuntu/xenial32"
|
||||
|
||||
config.vm.synced_folder "../build", "/nebula"
|
||||
end
|
||||
16
.github/workflows/smoke/vagrant-linux-amd64-ipv6disable/Vagrantfile
vendored
Normal file
16
.github/workflows/smoke/vagrant-linux-amd64-ipv6disable/Vagrantfile
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "ubuntu/jammy64"
|
||||
|
||||
config.vm.synced_folder "../build", "/nebula"
|
||||
|
||||
config.vm.provision :shell do |shell|
|
||||
shell.inline = <<-EOF
|
||||
sed -i 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="ipv6.disable=1"/' /etc/default/grub
|
||||
update-grub
|
||||
EOF
|
||||
shell.privileged = true
|
||||
shell.reboot = true
|
||||
end
|
||||
end
|
||||
7
.github/workflows/smoke/vagrant-netbsd-amd64/Vagrantfile
vendored
Normal file
7
.github/workflows/smoke/vagrant-netbsd-amd64/Vagrantfile
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "generic/netbsd9"
|
||||
|
||||
config.vm.synced_folder "../build", "/nebula", type: "rsync"
|
||||
end
|
||||
7
.github/workflows/smoke/vagrant-openbsd-amd64/Vagrantfile
vendored
Normal file
7
.github/workflows/smoke/vagrant-openbsd-amd64/Vagrantfile
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "generic/openbsd7"
|
||||
|
||||
config.vm.synced_folder "../build", "/nebula", type: "rsync"
|
||||
end
|
||||
66
.github/workflows/test.yml
vendored
66
.github/workflows/test.yml
vendored
@@ -18,21 +18,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Set up Go 1.20
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.20"
|
||||
id: go
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- uses: actions/cache@v2
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go1.20-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go1.20-
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
|
||||
- name: Build
|
||||
run: make all
|
||||
@@ -46,10 +37,13 @@ jobs:
|
||||
- name: End 2 end
|
||||
run: make e2evv
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
- name: Build test mobile
|
||||
run: make build-test-mobile
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: e2e packet flow
|
||||
path: e2e/mermaid/
|
||||
name: e2e packet flow linux-latest
|
||||
path: e2e/mermaid/linux-latest
|
||||
if-no-files-found: warn
|
||||
|
||||
test-linux-boringcrypto:
|
||||
@@ -57,21 +51,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- name: Set up Go 1.20
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.20"
|
||||
id: go
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- uses: actions/cache@v2
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go1.20-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go1.20-
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
|
||||
- name: Build
|
||||
run: make bin-boringcrypto
|
||||
@@ -90,21 +75,12 @@ jobs:
|
||||
os: [windows-latest, macos-11]
|
||||
steps:
|
||||
|
||||
- name: Set up Go 1.20
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.20"
|
||||
id: go
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- uses: actions/cache@v2
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go1.20-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go1.20-
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
|
||||
- name: Build nebula
|
||||
run: go build ./cmd/nebula
|
||||
@@ -121,8 +97,8 @@ jobs:
|
||||
- name: End 2 end
|
||||
run: make e2evv
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: e2e packet flow
|
||||
path: e2e/mermaid/
|
||||
name: e2e packet flow ${{ matrix.os }}
|
||||
path: e2e/mermaid/${{ matrix.os }}
|
||||
if-no-files-found: warn
|
||||
|
||||
166
CHANGELOG.md
166
CHANGELOG.md
@@ -7,6 +7,163 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [1.9.1] - 2024-05-29
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed a potential deadlock in GetOrHandshake. (#1151)
|
||||
|
||||
## [1.9.0] - 2024-05-07
|
||||
|
||||
### Deprecated
|
||||
|
||||
- This release adds a new setting `default_local_cidr_any` that defaults to
|
||||
true to match previous behavior, but will default to false in the next
|
||||
release (1.10). When set to false, `local_cidr` is matched correctly for
|
||||
firewall rules on hosts acting as unsafe routers, and should be set for any
|
||||
firewall rules you want to allow unsafe route hosts to access. See the issue
|
||||
and example config for more details. (#1071, #1099)
|
||||
|
||||
### Added
|
||||
|
||||
- Nebula now has an official Docker image `nebulaoss/nebula` that is
|
||||
distroless and contains just the `nebula` and `nebula-cert` binaries. You
|
||||
can find it here: https://hub.docker.com/r/nebulaoss/nebula (#1037)
|
||||
|
||||
- Experimental binaries for `loong64` are now provided. (#1003)
|
||||
|
||||
- Added example service script for OpenRC. (#711)
|
||||
|
||||
- The SSH daemon now supports inlined host keys. (#1054)
|
||||
|
||||
- The SSH daemon now supports certificates with `sshd.trusted_cas`. (#1098)
|
||||
|
||||
### Changed
|
||||
|
||||
- Config setting `tun.unsafe_routes` is now reloadable. (#1083)
|
||||
|
||||
- Small documentation and internal improvements. (#1065, #1067, #1069, #1108,
|
||||
#1109, #1111, #1135)
|
||||
|
||||
- Various dependency updates. (#1139, #1138, #1134, #1133, #1126, #1123, #1110,
|
||||
#1094, #1092, #1087, #1086, #1085, #1072, #1063, #1059, #1055, #1053, #1047,
|
||||
#1046, #1034, #1022)
|
||||
|
||||
### Removed
|
||||
|
||||
- Support for the deprecated `local_range` option has been removed. Please
|
||||
change to `preferred_ranges` (which is also now reloadable). (#1043)
|
||||
|
||||
- We are now building with go1.22, which means that for Windows you need at
|
||||
least Windows 10 or Windows Server 2016. This is because support for earlier
|
||||
versions was removed in Go 1.21. See https://go.dev/doc/go1.21#windows (#981)
|
||||
|
||||
- Removed vagrant example, as it was unmaintained. (#1129)
|
||||
|
||||
- Removed Fedora and Arch nebula.service files, as they are maintained in the
|
||||
upstream repos. (#1128, #1132)
|
||||
|
||||
- Remove the TCP round trip tracking metrics, as they never had correct data
|
||||
and were an experiment to begin with. (#1114)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed a potential deadlock introduced in 1.8.1. (#1112)
|
||||
|
||||
- Fixed support for Linux when IPv6 has been disabled at the OS level. (#787)
|
||||
|
||||
- DNS will return NXDOMAIN now when there are no results. (#845)
|
||||
|
||||
- Allow `::` in `lighthouse.dns.host`. (#1115)
|
||||
|
||||
- Capitalization of `NotAfter` fixed in DNS TXT response. (#1127)
|
||||
|
||||
- Don't log invalid certificates. It is untrusted data and can cause a large
|
||||
volume of logs. (#1116)
|
||||
|
||||
## [1.8.2] - 2024-01-08
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix multiple routines when listen.port is zero. This was a regression
|
||||
introduced in v1.6.0. (#1057)
|
||||
|
||||
### Changed
|
||||
|
||||
- Small dependency update for Noise. (#1038)
|
||||
|
||||
## [1.8.1] - 2023-12-19
|
||||
|
||||
### Security
|
||||
|
||||
- Update `golang.org/x/crypto`, which includes a fix for CVE-2023-48795. (#1048)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix a deadlock introduced in v1.8.0 that could occur during handshakes. (#1044)
|
||||
|
||||
- Fix mobile builds. (#1035)
|
||||
|
||||
## [1.8.0] - 2023-12-06
|
||||
|
||||
### Deprecated
|
||||
|
||||
- The next minor release of Nebula, 1.9.0, will require at least Windows 10 or
|
||||
Windows Server 2016. This is because support for earlier versions was removed
|
||||
in Go 1.21. See https://go.dev/doc/go1.21#windows
|
||||
|
||||
### Added
|
||||
|
||||
- Linux: Notify systemd of service readiness. This should resolve timing issues
|
||||
with services that depend on Nebula being active. For an example of how to
|
||||
enable this, see: `examples/service_scripts/nebula.service`. (#929)
|
||||
|
||||
- Windows: Use Registered IO (RIO) when possible. Testing on a Windows 11
|
||||
machine shows ~50x improvement in throughput. (#905)
|
||||
|
||||
- NetBSD, OpenBSD: Added rudimentary support. (#916, #812)
|
||||
|
||||
- FreeBSD: Add support for naming tun devices. (#903)
|
||||
|
||||
### Changed
|
||||
|
||||
- `pki.disconnect_invalid` will now default to true. This means that once a
|
||||
certificate expires, the tunnel will be disconnected. If you use SIGHUP to
|
||||
reload certificates without restarting Nebula, you should ensure all of your
|
||||
clients are on 1.7.0 or newer before you enable this feature. (#859)
|
||||
|
||||
- Limit how often a busy tunnel can requery the lighthouse. The new config
|
||||
option `timers.requery_wait_duration` defaults to `60s`. (#940)
|
||||
|
||||
- The internal structures for hostmaps were refactored to reduce memory usage
|
||||
and the potential for subtle bugs. (#843, #938, #953, #954, #955)
|
||||
|
||||
- Lots of dependency updates.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Windows: Retry wintun device creation if it fails the first time. (#985)
|
||||
|
||||
- Fix issues with firewall reject packets that could cause panics. (#957)
|
||||
|
||||
- Fix relay migration during re-handshakes. (#964)
|
||||
|
||||
- Various other refactors and fixes. (#935, #952, #972, #961, #996, #1002,
|
||||
#987, #1004, #1030, #1032, ...)
|
||||
|
||||
## [1.7.2] - 2023-06-01
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix a freeze during config reload if the `static_host_map` config was changed. (#886)
|
||||
|
||||
## [1.7.1] - 2023-05-18
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix IPv4 addresses returned by `static_host_map` DNS lookup queries being
|
||||
treated as IPv6 addresses. (#877)
|
||||
|
||||
## [1.7.0] - 2023-05-17
|
||||
|
||||
### Added
|
||||
@@ -475,7 +632,14 @@ created.)
|
||||
|
||||
- Initial public release.
|
||||
|
||||
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.7.0...HEAD
|
||||
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.9.1...HEAD
|
||||
[1.9.1]: https://github.com/slackhq/nebula/releases/tag/v1.9.1
|
||||
[1.9.0]: https://github.com/slackhq/nebula/releases/tag/v1.9.0
|
||||
[1.8.2]: https://github.com/slackhq/nebula/releases/tag/v1.8.2
|
||||
[1.8.1]: https://github.com/slackhq/nebula/releases/tag/v1.8.1
|
||||
[1.8.0]: https://github.com/slackhq/nebula/releases/tag/v1.8.0
|
||||
[1.7.2]: https://github.com/slackhq/nebula/releases/tag/v1.7.2
|
||||
[1.7.1]: https://github.com/slackhq/nebula/releases/tag/v1.7.1
|
||||
[1.7.0]: https://github.com/slackhq/nebula/releases/tag/v1.7.0
|
||||
[1.6.1]: https://github.com/slackhq/nebula/releases/tag/v1.6.1
|
||||
[1.6.0]: https://github.com/slackhq/nebula/releases/tag/v1.6.0
|
||||
|
||||
@@ -33,6 +33,5 @@ l.WithError(err).
|
||||
WithField("vpnIp", IntIp(hostinfo.hostId)).
|
||||
WithField("udpAddr", addr).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix"}).
|
||||
WithField("cert", remoteCert).
|
||||
Info("Invalid certificate from host")
|
||||
```
|
||||
60
Makefile
60
Makefile
@@ -1,20 +1,14 @@
|
||||
GOMINVERSION = 1.20
|
||||
NEBULA_CMD_PATH = "./cmd/nebula"
|
||||
GO111MODULE = on
|
||||
export GO111MODULE
|
||||
CGO_ENABLED = 0
|
||||
export CGO_ENABLED
|
||||
|
||||
# Set up OS specific bits
|
||||
ifeq ($(OS),Windows_NT)
|
||||
#TODO: we should be able to ditch awk as well
|
||||
GOVERSION := $(shell go version | awk "{print substr($$3, 3)}")
|
||||
GOISMIN := $(shell IF "$(GOVERSION)" GEQ "$(GOMINVERSION)" ECHO 1)
|
||||
NEBULA_CMD_SUFFIX = .exe
|
||||
NULL_FILE = nul
|
||||
# RIO on windows does pointer stuff that makes go vet angry
|
||||
VET_FLAGS = -unsafeptr=false
|
||||
else
|
||||
GOVERSION := $(shell go version | awk '{print substr($$3, 3)}')
|
||||
GOISMIN := $(shell expr "$(GOVERSION)" ">=" "$(GOMINVERSION)")
|
||||
NEBULA_CMD_SUFFIX =
|
||||
NULL_FILE = /dev/null
|
||||
endif
|
||||
@@ -28,6 +22,9 @@ ifndef BUILD_NUMBER
|
||||
endif
|
||||
endif
|
||||
|
||||
DOCKER_IMAGE_REPO ?= nebulaoss/nebula
|
||||
DOCKER_IMAGE_TAG ?= latest
|
||||
|
||||
LDFLAGS = -X main.Build=$(BUILD_NUMBER)
|
||||
|
||||
ALL_LINUX = linux-amd64 \
|
||||
@@ -42,12 +39,24 @@ ALL_LINUX = linux-amd64 \
|
||||
linux-mips64 \
|
||||
linux-mips64le \
|
||||
linux-mips-softfloat \
|
||||
linux-riscv64
|
||||
linux-riscv64 \
|
||||
linux-loong64
|
||||
|
||||
ALL_FREEBSD = freebsd-amd64 \
|
||||
freebsd-arm64
|
||||
|
||||
ALL_OPENBSD = openbsd-amd64 \
|
||||
openbsd-arm64
|
||||
|
||||
ALL_NETBSD = netbsd-amd64 \
|
||||
netbsd-arm64
|
||||
|
||||
ALL = $(ALL_LINUX) \
|
||||
$(ALL_FREEBSD) \
|
||||
$(ALL_OPENBSD) \
|
||||
$(ALL_NETBSD) \
|
||||
darwin-amd64 \
|
||||
darwin-arm64 \
|
||||
freebsd-amd64 \
|
||||
windows-amd64 \
|
||||
windows-arm64
|
||||
|
||||
@@ -69,13 +78,21 @@ e2evvvv: e2ev
|
||||
e2e-bench: TEST_FLAGS = -bench=. -benchmem -run=^$
|
||||
e2e-bench: e2e
|
||||
|
||||
DOCKER_BIN = build/linux-amd64/nebula build/linux-amd64/nebula-cert
|
||||
|
||||
all: $(ALL:%=build/%/nebula) $(ALL:%=build/%/nebula-cert)
|
||||
|
||||
docker: docker/linux-$(shell go env GOARCH)
|
||||
|
||||
release: $(ALL:%=build/nebula-%.tar.gz)
|
||||
|
||||
release-linux: $(ALL_LINUX:%=build/nebula-%.tar.gz)
|
||||
|
||||
release-freebsd: build/nebula-freebsd-amd64.tar.gz
|
||||
release-freebsd: $(ALL_FREEBSD:%=build/nebula-%.tar.gz)
|
||||
|
||||
release-openbsd: $(ALL_OPENBSD:%=build/nebula-%.tar.gz)
|
||||
|
||||
release-netbsd: $(ALL_NETBSD:%=build/nebula-%.tar.gz)
|
||||
|
||||
release-boringcrypto: build/nebula-linux-$(shell go env GOARCH)-boringcrypto.tar.gz
|
||||
|
||||
@@ -93,6 +110,9 @@ bin-darwin: build/darwin-amd64/nebula build/darwin-amd64/nebula-cert
|
||||
bin-freebsd: build/freebsd-amd64/nebula build/freebsd-amd64/nebula-cert
|
||||
mv $? .
|
||||
|
||||
bin-freebsd-arm64: build/freebsd-arm64/nebula build/freebsd-arm64/nebula-cert
|
||||
mv $? .
|
||||
|
||||
bin-boringcrypto: build/linux-$(shell go env GOARCH)-boringcrypto/nebula build/linux-$(shell go env GOARCH)-boringcrypto/nebula-cert
|
||||
mv $? .
|
||||
|
||||
@@ -136,8 +156,11 @@ build/nebula-%.tar.gz: build/%/nebula build/%/nebula-cert
|
||||
build/nebula-%.zip: build/%/nebula.exe build/%/nebula-cert.exe
|
||||
cd build/$* && zip ../nebula-$*.zip nebula.exe nebula-cert.exe
|
||||
|
||||
docker/%: build/%/nebula build/%/nebula-cert
|
||||
docker build . $(DOCKER_BUILD_ARGS) -f docker/Dockerfile --platform "$(subst -,/,$*)" --tag "${DOCKER_IMAGE_REPO}:${DOCKER_IMAGE_TAG}" --tag "${DOCKER_IMAGE_REPO}:$(BUILD_NUMBER)"
|
||||
|
||||
vet:
|
||||
go vet -v ./...
|
||||
go vet $(VET_FLAGS) -v ./...
|
||||
|
||||
test:
|
||||
go test -v ./...
|
||||
@@ -149,6 +172,12 @@ test-cov-html:
|
||||
go test -coverprofile=coverage.out
|
||||
go tool cover -html=coverage.out
|
||||
|
||||
build-test-mobile:
|
||||
GOARCH=amd64 GOOS=ios go build $(shell go list ./... | grep -v '/cmd/\|/examples/')
|
||||
GOARCH=arm64 GOOS=ios go build $(shell go list ./... | grep -v '/cmd/\|/examples/')
|
||||
GOARCH=amd64 GOOS=android go build $(shell go list ./... | grep -v '/cmd/\|/examples/')
|
||||
GOARCH=arm64 GOOS=android go build $(shell go list ./... | grep -v '/cmd/\|/examples/')
|
||||
|
||||
bench:
|
||||
go test -bench=.
|
||||
|
||||
@@ -190,8 +219,13 @@ smoke-relay-docker: bin-docker
|
||||
cd .github/workflows/smoke/ && ./smoke-relay.sh
|
||||
|
||||
smoke-docker-race: BUILD_ARGS = -race
|
||||
smoke-docker-race: CGO_ENABLED = 1
|
||||
smoke-docker-race: smoke-docker
|
||||
|
||||
smoke-vagrant/%: bin-docker build/%/nebula
|
||||
cd .github/workflows/smoke/ && ./build.sh $*
|
||||
cd .github/workflows/smoke/ && ./smoke-vagrant.sh $*
|
||||
|
||||
.FORCE:
|
||||
.PHONY: e2e e2ev e2evv e2evvv e2evvvv test test-cov-html bench bench-cpu bench-cpu-long bin proto release service smoke-docker smoke-docker-race
|
||||
.PHONY: bench bench-cpu bench-cpu-long bin build-test-mobile e2e e2ev e2evv e2evvv e2evvvv proto release service smoke-docker smoke-docker-race test test-cov-html smoke-vagrant/%
|
||||
.DEFAULT_GOAL := bin
|
||||
|
||||
20
README.md
20
README.md
@@ -27,20 +27,36 @@ Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for
|
||||
|
||||
#### Distribution Packages
|
||||
|
||||
- [Arch Linux](https://archlinux.org/packages/community/x86_64/nebula/)
|
||||
- [Arch Linux](https://archlinux.org/packages/extra/x86_64/nebula/)
|
||||
```
|
||||
$ sudo pacman -S nebula
|
||||
```
|
||||
|
||||
- [Fedora Linux](https://src.fedoraproject.org/rpms/nebula)
|
||||
```
|
||||
$ sudo dnf install nebula
|
||||
```
|
||||
|
||||
- [Debian Linux](https://packages.debian.org/source/stable/nebula)
|
||||
```
|
||||
$ sudo apt install nebula
|
||||
```
|
||||
|
||||
- [Alpine Linux](https://pkgs.alpinelinux.org/packages?name=nebula)
|
||||
```
|
||||
$ sudo apk add nebula
|
||||
```
|
||||
|
||||
- [macOS Homebrew](https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/nebula.rb)
|
||||
```
|
||||
$ brew install nebula
|
||||
```
|
||||
|
||||
- [Docker](https://hub.docker.com/r/nebulaoss/nebula)
|
||||
```
|
||||
$ docker pull nebulaoss/nebula
|
||||
```
|
||||
|
||||
#### Mobile
|
||||
|
||||
- [iOS](https://apps.apple.com/us/app/mobile-nebula/id1509587936?itsct=apps_box&itscg=30200)
|
||||
@@ -108,7 +124,7 @@ For each host, copy the nebula binary to the host, along with `config.yml` from
|
||||
|
||||
## Building Nebula from source
|
||||
|
||||
Download go and clone this repo. Change to the nebula directory.
|
||||
Make sure you have [go](https://go.dev/doc/install) installed and clone this repo. Change to the nebula directory.
|
||||
|
||||
To build nebula for all platforms:
|
||||
`make all`
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
|
||||
type AllowList struct {
|
||||
// The values of this cidrTree are `bool`, signifying allow/deny
|
||||
cidrTree *cidr.Tree6
|
||||
cidrTree *cidr.Tree6[bool]
|
||||
}
|
||||
|
||||
type RemoteAllowList struct {
|
||||
@@ -20,7 +20,7 @@ type RemoteAllowList struct {
|
||||
|
||||
// Inside Range Specific, keys of this tree are inside CIDRs and values
|
||||
// are *AllowList
|
||||
insideAllowLists *cidr.Tree6
|
||||
insideAllowLists *cidr.Tree6[*AllowList]
|
||||
}
|
||||
|
||||
type LocalAllowList struct {
|
||||
@@ -88,7 +88,7 @@ func newAllowList(k string, raw interface{}, handleKey func(key string, value in
|
||||
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, raw)
|
||||
}
|
||||
|
||||
tree := cidr.NewTree6()
|
||||
tree := cidr.NewTree6[bool]()
|
||||
|
||||
// Keep track of the rules we have added for both ipv4 and ipv6
|
||||
type allowListRules struct {
|
||||
@@ -218,13 +218,13 @@ func getAllowListInterfaces(k string, v interface{}) ([]AllowListNameRule, error
|
||||
return nameRules, nil
|
||||
}
|
||||
|
||||
func getRemoteAllowRanges(c *config.C, k string) (*cidr.Tree6, error) {
|
||||
func getRemoteAllowRanges(c *config.C, k string) (*cidr.Tree6[*AllowList], error) {
|
||||
value := c.Get(k)
|
||||
if value == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
remoteAllowRanges := cidr.NewTree6()
|
||||
remoteAllowRanges := cidr.NewTree6[*AllowList]()
|
||||
|
||||
rawMap, ok := value.(map[interface{}]interface{})
|
||||
if !ok {
|
||||
@@ -257,13 +257,8 @@ func (al *AllowList) Allow(ip net.IP) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
result := al.cidrTree.MostSpecificContains(ip)
|
||||
switch v := result.(type) {
|
||||
case bool:
|
||||
return v
|
||||
default:
|
||||
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
|
||||
}
|
||||
_, result := al.cidrTree.MostSpecificContains(ip)
|
||||
return result
|
||||
}
|
||||
|
||||
func (al *AllowList) AllowIpV4(ip iputil.VpnIp) bool {
|
||||
@@ -271,13 +266,8 @@ func (al *AllowList) AllowIpV4(ip iputil.VpnIp) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
result := al.cidrTree.MostSpecificContainsIpV4(ip)
|
||||
switch v := result.(type) {
|
||||
case bool:
|
||||
return v
|
||||
default:
|
||||
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
|
||||
}
|
||||
_, result := al.cidrTree.MostSpecificContainsIpV4(ip)
|
||||
return result
|
||||
}
|
||||
|
||||
func (al *AllowList) AllowIpV6(hi, lo uint64) bool {
|
||||
@@ -285,13 +275,8 @@ func (al *AllowList) AllowIpV6(hi, lo uint64) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
result := al.cidrTree.MostSpecificContainsIpV6(hi, lo)
|
||||
switch v := result.(type) {
|
||||
case bool:
|
||||
return v
|
||||
default:
|
||||
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
|
||||
}
|
||||
_, result := al.cidrTree.MostSpecificContainsIpV6(hi, lo)
|
||||
return result
|
||||
}
|
||||
|
||||
func (al *LocalAllowList) Allow(ip net.IP) bool {
|
||||
@@ -352,9 +337,9 @@ func (al *RemoteAllowList) AllowIpV6(vpnIp iputil.VpnIp, hi, lo uint64) bool {
|
||||
|
||||
func (al *RemoteAllowList) getInsideAllowList(vpnIp iputil.VpnIp) *AllowList {
|
||||
if al.insideAllowLists != nil {
|
||||
inside := al.insideAllowLists.MostSpecificContainsIpV4(vpnIp)
|
||||
if inside != nil {
|
||||
return inside.(*AllowList)
|
||||
ok, inside := al.insideAllowLists.MostSpecificContainsIpV4(vpnIp)
|
||||
if ok {
|
||||
return inside
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -100,7 +100,7 @@ func TestNewAllowListFromConfig(t *testing.T) {
|
||||
func TestAllowList_Allow(t *testing.T) {
|
||||
assert.Equal(t, true, ((*AllowList)(nil)).Allow(net.ParseIP("1.1.1.1")))
|
||||
|
||||
tree := cidr.NewTree6()
|
||||
tree := cidr.NewTree6[bool]()
|
||||
tree.AddCIDR(cidr.Parse("0.0.0.0/0"), true)
|
||||
tree.AddCIDR(cidr.Parse("10.0.0.0/8"), false)
|
||||
tree.AddCIDR(cidr.Parse("10.42.42.42/32"), true)
|
||||
|
||||
@@ -51,13 +51,13 @@ func (c *calculatedRemote) Apply(ip iputil.VpnIp) *Ip4AndPort {
|
||||
return &Ip4AndPort{Ip: uint32(masked), Port: c.port}
|
||||
}
|
||||
|
||||
func NewCalculatedRemotesFromConfig(c *config.C, k string) (*cidr.Tree4, error) {
|
||||
func NewCalculatedRemotesFromConfig(c *config.C, k string) (*cidr.Tree4[[]*calculatedRemote], error) {
|
||||
value := c.Get(k)
|
||||
if value == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
calculatedRemotes := cidr.NewTree4()
|
||||
calculatedRemotes := cidr.NewTree4[[]*calculatedRemote]()
|
||||
|
||||
rawMap, ok := value.(map[any]any)
|
||||
if !ok {
|
||||
|
||||
163
cert.go
163
cert.go
@@ -1,163 +0,0 @@
|
||||
package nebula
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/config"
|
||||
)
|
||||
|
||||
type CertState struct {
|
||||
certificate *cert.NebulaCertificate
|
||||
rawCertificate []byte
|
||||
rawCertificateNoKey []byte
|
||||
publicKey []byte
|
||||
privateKey []byte
|
||||
}
|
||||
|
||||
func NewCertState(certificate *cert.NebulaCertificate, privateKey []byte) (*CertState, error) {
|
||||
// Marshal the certificate to ensure it is valid
|
||||
rawCertificate, err := certificate.Marshal()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid nebula certificate on interface: %s", err)
|
||||
}
|
||||
|
||||
publicKey := certificate.Details.PublicKey
|
||||
cs := &CertState{
|
||||
rawCertificate: rawCertificate,
|
||||
certificate: certificate, // PublicKey has been set to nil above
|
||||
privateKey: privateKey,
|
||||
publicKey: publicKey,
|
||||
}
|
||||
|
||||
cs.certificate.Details.PublicKey = nil
|
||||
rawCertNoKey, err := cs.certificate.Marshal()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshalling certificate no key: %s", err)
|
||||
}
|
||||
cs.rawCertificateNoKey = rawCertNoKey
|
||||
// put public key back
|
||||
cs.certificate.Details.PublicKey = cs.publicKey
|
||||
return cs, nil
|
||||
}
|
||||
|
||||
func NewCertStateFromConfig(c *config.C) (*CertState, error) {
|
||||
var pemPrivateKey []byte
|
||||
var err error
|
||||
|
||||
privPathOrPEM := c.GetString("pki.key", "")
|
||||
|
||||
if privPathOrPEM == "" {
|
||||
return nil, errors.New("no pki.key path or PEM data provided")
|
||||
}
|
||||
|
||||
if strings.Contains(privPathOrPEM, "-----BEGIN") {
|
||||
pemPrivateKey = []byte(privPathOrPEM)
|
||||
privPathOrPEM = "<inline>"
|
||||
} else {
|
||||
pemPrivateKey, err = ioutil.ReadFile(privPathOrPEM)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read pki.key file %s: %s", privPathOrPEM, err)
|
||||
}
|
||||
}
|
||||
|
||||
rawKey, _, curve, err := cert.UnmarshalPrivateKey(pemPrivateKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while unmarshaling pki.key %s: %s", privPathOrPEM, err)
|
||||
}
|
||||
|
||||
var rawCert []byte
|
||||
|
||||
pubPathOrPEM := c.GetString("pki.cert", "")
|
||||
|
||||
if pubPathOrPEM == "" {
|
||||
return nil, errors.New("no pki.cert path or PEM data provided")
|
||||
}
|
||||
|
||||
if strings.Contains(pubPathOrPEM, "-----BEGIN") {
|
||||
rawCert = []byte(pubPathOrPEM)
|
||||
pubPathOrPEM = "<inline>"
|
||||
} else {
|
||||
rawCert, err = ioutil.ReadFile(pubPathOrPEM)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read pki.cert file %s: %s", pubPathOrPEM, err)
|
||||
}
|
||||
}
|
||||
|
||||
nebulaCert, _, err := cert.UnmarshalNebulaCertificateFromPEM(rawCert)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while unmarshaling pki.cert %s: %s", pubPathOrPEM, err)
|
||||
}
|
||||
|
||||
if nebulaCert.Expired(time.Now()) {
|
||||
return nil, fmt.Errorf("nebula certificate for this host is expired")
|
||||
}
|
||||
|
||||
if len(nebulaCert.Details.Ips) == 0 {
|
||||
return nil, fmt.Errorf("no IPs encoded in certificate")
|
||||
}
|
||||
|
||||
if err = nebulaCert.VerifyPrivateKey(curve, rawKey); err != nil {
|
||||
return nil, fmt.Errorf("private key is not a pair with public key in nebula cert")
|
||||
}
|
||||
|
||||
return NewCertState(nebulaCert, rawKey)
|
||||
}
|
||||
|
||||
func loadCAFromConfig(l *logrus.Logger, c *config.C) (*cert.NebulaCAPool, error) {
|
||||
var rawCA []byte
|
||||
var err error
|
||||
|
||||
caPathOrPEM := c.GetString("pki.ca", "")
|
||||
if caPathOrPEM == "" {
|
||||
return nil, errors.New("no pki.ca path or PEM data provided")
|
||||
}
|
||||
|
||||
if strings.Contains(caPathOrPEM, "-----BEGIN") {
|
||||
rawCA = []byte(caPathOrPEM)
|
||||
|
||||
} else {
|
||||
rawCA, err = ioutil.ReadFile(caPathOrPEM)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read pki.ca file %s: %s", caPathOrPEM, err)
|
||||
}
|
||||
}
|
||||
|
||||
CAs, err := cert.NewCAPoolFromBytes(rawCA)
|
||||
if errors.Is(err, cert.ErrExpired) {
|
||||
var expired int
|
||||
for _, cert := range CAs.CAs {
|
||||
if cert.Expired(time.Now()) {
|
||||
expired++
|
||||
l.WithField("cert", cert).Warn("expired certificate present in CA pool")
|
||||
}
|
||||
}
|
||||
|
||||
if expired >= len(CAs.CAs) {
|
||||
return nil, errors.New("no valid CA certificates present")
|
||||
}
|
||||
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("error while adding CA certificate to CA trust store: %s", err)
|
||||
}
|
||||
|
||||
for _, fp := range c.GetStringSlice("pki.blocklist", []string{}) {
|
||||
l.WithField("fingerprint", fp).Info("Blocklisting cert")
|
||||
CAs.BlocklistFingerprint(fp)
|
||||
}
|
||||
|
||||
// Support deprecated config for at least one minor release to allow for migrations
|
||||
//TODO: remove in 2022 or later
|
||||
for _, fp := range c.GetStringSlice("pki.blacklist", []string{}) {
|
||||
l.WithField("fingerprint", fp).Info("Blocklisting cert")
|
||||
l.Warn("pki.blacklist is deprecated and will not be supported in a future release. Please migrate your config to use pki.blocklist")
|
||||
CAs.BlocklistFingerprint(fp)
|
||||
}
|
||||
|
||||
return CAs, nil
|
||||
}
|
||||
@@ -272,6 +272,9 @@ func EncryptAndMarshalSigningPrivateKey(curve Curve, b []byte, passphrase []byte
|
||||
},
|
||||
Ciphertext: ciphertext,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch curve {
|
||||
case Curve_CURVE25519:
|
||||
@@ -321,7 +324,7 @@ func UnmarshalEd25519PrivateKey(b []byte) (ed25519.PrivateKey, []byte, error) {
|
||||
return k.Bytes, r, nil
|
||||
}
|
||||
|
||||
// UnmarshalNebulaCertificate will unmarshal a protobuf byte representation of a nebula cert into its
|
||||
// UnmarshalNebulaEncryptedData will unmarshal a protobuf byte representation of a nebula cert into its
|
||||
// protobuf-generated struct.
|
||||
func UnmarshalNebulaEncryptedData(b []byte) (*NebulaEncryptedData, error) {
|
||||
if len(b) == 0 {
|
||||
|
||||
@@ -77,6 +77,9 @@ func aes256Decrypt(passphrase []byte, kdfParams *Argon2Parameters, data []byte)
|
||||
}
|
||||
|
||||
gcm, err := cipher.NewGCM(block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nonce, ciphertext, err := splitNonceCiphertext(data, gcm.NonceSize())
|
||||
if err != nil {
|
||||
|
||||
102
cidr/tree4.go
102
cidr/tree4.go
@@ -6,35 +6,36 @@ import (
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
)
|
||||
|
||||
type Node struct {
|
||||
left *Node
|
||||
right *Node
|
||||
parent *Node
|
||||
value interface{}
|
||||
type Node[T any] struct {
|
||||
left *Node[T]
|
||||
right *Node[T]
|
||||
parent *Node[T]
|
||||
hasValue bool
|
||||
value T
|
||||
}
|
||||
|
||||
type entry struct {
|
||||
type entry[T any] struct {
|
||||
CIDR *net.IPNet
|
||||
Value *interface{}
|
||||
Value T
|
||||
}
|
||||
|
||||
type Tree4 struct {
|
||||
root *Node
|
||||
list []entry
|
||||
type Tree4[T any] struct {
|
||||
root *Node[T]
|
||||
list []entry[T]
|
||||
}
|
||||
|
||||
const (
|
||||
startbit = iputil.VpnIp(0x80000000)
|
||||
)
|
||||
|
||||
func NewTree4() *Tree4 {
|
||||
tree := new(Tree4)
|
||||
tree.root = &Node{}
|
||||
tree.list = []entry{}
|
||||
func NewTree4[T any]() *Tree4[T] {
|
||||
tree := new(Tree4[T])
|
||||
tree.root = &Node[T]{}
|
||||
tree.list = []entry[T]{}
|
||||
return tree
|
||||
}
|
||||
|
||||
func (tree *Tree4) AddCIDR(cidr *net.IPNet, val interface{}) {
|
||||
func (tree *Tree4[T]) AddCIDR(cidr *net.IPNet, val T) {
|
||||
bit := startbit
|
||||
node := tree.root
|
||||
next := tree.root
|
||||
@@ -68,14 +69,15 @@ func (tree *Tree4) AddCIDR(cidr *net.IPNet, val interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
tree.list = append(tree.list, entry{CIDR: cidr, Value: &val})
|
||||
tree.list = append(tree.list, entry[T]{CIDR: cidr, Value: val})
|
||||
node.value = val
|
||||
node.hasValue = true
|
||||
return
|
||||
}
|
||||
|
||||
// Build up the rest of the tree we don't already have
|
||||
for bit&mask != 0 {
|
||||
next = &Node{}
|
||||
next = &Node[T]{}
|
||||
next.parent = node
|
||||
|
||||
if ip&bit != 0 {
|
||||
@@ -90,17 +92,18 @@ func (tree *Tree4) AddCIDR(cidr *net.IPNet, val interface{}) {
|
||||
|
||||
// Final node marks our cidr, set the value
|
||||
node.value = val
|
||||
tree.list = append(tree.list, entry{CIDR: cidr, Value: &val})
|
||||
node.hasValue = true
|
||||
tree.list = append(tree.list, entry[T]{CIDR: cidr, Value: val})
|
||||
}
|
||||
|
||||
// Contains finds the first match, which may be the least specific
|
||||
func (tree *Tree4) Contains(ip iputil.VpnIp) (value interface{}) {
|
||||
func (tree *Tree4[T]) Contains(ip iputil.VpnIp) (ok bool, value T) {
|
||||
bit := startbit
|
||||
node := tree.root
|
||||
|
||||
for node != nil {
|
||||
if node.value != nil {
|
||||
return node.value
|
||||
if node.hasValue {
|
||||
return true, node.value
|
||||
}
|
||||
|
||||
if ip&bit != 0 {
|
||||
@@ -113,17 +116,18 @@ func (tree *Tree4) Contains(ip iputil.VpnIp) (value interface{}) {
|
||||
|
||||
}
|
||||
|
||||
return value
|
||||
return false, value
|
||||
}
|
||||
|
||||
// MostSpecificContains finds the most specific match
|
||||
func (tree *Tree4) MostSpecificContains(ip iputil.VpnIp) (value interface{}) {
|
||||
func (tree *Tree4[T]) MostSpecificContains(ip iputil.VpnIp) (ok bool, value T) {
|
||||
bit := startbit
|
||||
node := tree.root
|
||||
|
||||
for node != nil {
|
||||
if node.value != nil {
|
||||
if node.hasValue {
|
||||
value = node.value
|
||||
ok = true
|
||||
}
|
||||
|
||||
if ip&bit != 0 {
|
||||
@@ -135,17 +139,25 @@ func (tree *Tree4) MostSpecificContains(ip iputil.VpnIp) (value interface{}) {
|
||||
bit >>= 1
|
||||
}
|
||||
|
||||
return value
|
||||
return ok, value
|
||||
}
|
||||
|
||||
// Match finds the most specific match
|
||||
func (tree *Tree4) Match(ip iputil.VpnIp) (value interface{}) {
|
||||
type eachFunc[T any] func(T) bool
|
||||
|
||||
// EachContains will call a function, passing the value, for each entry until the function returns true or the search is complete
|
||||
// The final return value will be true if the provided function returned true
|
||||
func (tree *Tree4[T]) EachContains(ip iputil.VpnIp, each eachFunc[T]) bool {
|
||||
bit := startbit
|
||||
node := tree.root
|
||||
lastNode := node
|
||||
|
||||
for node != nil {
|
||||
lastNode = node
|
||||
if node.hasValue {
|
||||
// If the each func returns true then we can exit the loop
|
||||
if each(node.value) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if ip&bit != 0 {
|
||||
node = node.right
|
||||
} else {
|
||||
@@ -155,13 +167,37 @@ func (tree *Tree4) Match(ip iputil.VpnIp) (value interface{}) {
|
||||
bit >>= 1
|
||||
}
|
||||
|
||||
if bit == 0 && lastNode != nil {
|
||||
value = lastNode.value
|
||||
return false
|
||||
}
|
||||
|
||||
// GetCIDR returns the entry added by the most recent matching AddCIDR call
|
||||
func (tree *Tree4[T]) GetCIDR(cidr *net.IPNet) (ok bool, value T) {
|
||||
bit := startbit
|
||||
node := tree.root
|
||||
|
||||
ip := iputil.Ip2VpnIp(cidr.IP)
|
||||
mask := iputil.Ip2VpnIp(cidr.Mask)
|
||||
|
||||
// Find our last ancestor in the tree
|
||||
for node != nil && bit&mask != 0 {
|
||||
if ip&bit != 0 {
|
||||
node = node.right
|
||||
} else {
|
||||
node = node.left
|
||||
}
|
||||
|
||||
bit = bit >> 1
|
||||
}
|
||||
return value
|
||||
|
||||
if bit&mask == 0 && node != nil {
|
||||
value = node.value
|
||||
ok = node.hasValue
|
||||
}
|
||||
|
||||
return ok, value
|
||||
}
|
||||
|
||||
// List will return all CIDRs and their current values. Do not modify the contents!
|
||||
func (tree *Tree4) List() []entry {
|
||||
func (tree *Tree4[T]) List() []entry[T] {
|
||||
return tree.list
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func TestCIDRTree_List(t *testing.T) {
|
||||
tree := NewTree4()
|
||||
tree := NewTree4[string]()
|
||||
tree.AddCIDR(Parse("1.0.0.0/16"), "1")
|
||||
tree.AddCIDR(Parse("1.0.0.0/8"), "2")
|
||||
tree.AddCIDR(Parse("1.0.0.0/16"), "3")
|
||||
@@ -17,13 +17,13 @@ func TestCIDRTree_List(t *testing.T) {
|
||||
list := tree.List()
|
||||
assert.Len(t, list, 2)
|
||||
assert.Equal(t, "1.0.0.0/8", list[0].CIDR.String())
|
||||
assert.Equal(t, "2", *list[0].Value)
|
||||
assert.Equal(t, "2", list[0].Value)
|
||||
assert.Equal(t, "1.0.0.0/16", list[1].CIDR.String())
|
||||
assert.Equal(t, "4", *list[1].Value)
|
||||
assert.Equal(t, "4", list[1].Value)
|
||||
}
|
||||
|
||||
func TestCIDRTree_Contains(t *testing.T) {
|
||||
tree := NewTree4()
|
||||
tree := NewTree4[string]()
|
||||
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
||||
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
||||
@@ -33,35 +33,43 @@ func TestCIDRTree_Contains(t *testing.T) {
|
||||
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
|
||||
|
||||
tests := []struct {
|
||||
Found bool
|
||||
Result interface{}
|
||||
IP string
|
||||
}{
|
||||
{"1", "1.0.0.0"},
|
||||
{"1", "1.255.255.255"},
|
||||
{"2", "2.1.0.0"},
|
||||
{"2", "2.1.255.255"},
|
||||
{"3", "3.1.1.0"},
|
||||
{"3", "3.1.1.255"},
|
||||
{"4a", "4.1.1.255"},
|
||||
{"4a", "4.1.1.1"},
|
||||
{"5", "240.0.0.0"},
|
||||
{"5", "255.255.255.255"},
|
||||
{nil, "239.0.0.0"},
|
||||
{nil, "4.1.2.2"},
|
||||
{true, "1", "1.0.0.0"},
|
||||
{true, "1", "1.255.255.255"},
|
||||
{true, "2", "2.1.0.0"},
|
||||
{true, "2", "2.1.255.255"},
|
||||
{true, "3", "3.1.1.0"},
|
||||
{true, "3", "3.1.1.255"},
|
||||
{true, "4a", "4.1.1.255"},
|
||||
{true, "4a", "4.1.1.1"},
|
||||
{true, "5", "240.0.0.0"},
|
||||
{true, "5", "255.255.255.255"},
|
||||
{false, "", "239.0.0.0"},
|
||||
{false, "", "4.1.2.2"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
assert.Equal(t, tt.Result, tree.Contains(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
|
||||
ok, r := tree.Contains(iputil.Ip2VpnIp(net.ParseIP(tt.IP)))
|
||||
assert.Equal(t, tt.Found, ok)
|
||||
assert.Equal(t, tt.Result, r)
|
||||
}
|
||||
|
||||
tree = NewTree4()
|
||||
tree = NewTree4[string]()
|
||||
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
|
||||
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
|
||||
ok, r := tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0")))
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, "cool", r)
|
||||
|
||||
ok, r = tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255")))
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, "cool", r)
|
||||
}
|
||||
|
||||
func TestCIDRTree_MostSpecificContains(t *testing.T) {
|
||||
tree := NewTree4()
|
||||
tree := NewTree4[string]()
|
||||
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
||||
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
||||
@@ -71,59 +79,76 @@ func TestCIDRTree_MostSpecificContains(t *testing.T) {
|
||||
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
|
||||
|
||||
tests := []struct {
|
||||
Found bool
|
||||
Result interface{}
|
||||
IP string
|
||||
}{
|
||||
{"1", "1.0.0.0"},
|
||||
{"1", "1.255.255.255"},
|
||||
{"2", "2.1.0.0"},
|
||||
{"2", "2.1.255.255"},
|
||||
{"3", "3.1.1.0"},
|
||||
{"3", "3.1.1.255"},
|
||||
{"4a", "4.1.1.255"},
|
||||
{"4b", "4.1.1.2"},
|
||||
{"4c", "4.1.1.1"},
|
||||
{"5", "240.0.0.0"},
|
||||
{"5", "255.255.255.255"},
|
||||
{nil, "239.0.0.0"},
|
||||
{nil, "4.1.2.2"},
|
||||
{true, "1", "1.0.0.0"},
|
||||
{true, "1", "1.255.255.255"},
|
||||
{true, "2", "2.1.0.0"},
|
||||
{true, "2", "2.1.255.255"},
|
||||
{true, "3", "3.1.1.0"},
|
||||
{true, "3", "3.1.1.255"},
|
||||
{true, "4a", "4.1.1.255"},
|
||||
{true, "4b", "4.1.1.2"},
|
||||
{true, "4c", "4.1.1.1"},
|
||||
{true, "5", "240.0.0.0"},
|
||||
{true, "5", "255.255.255.255"},
|
||||
{false, "", "239.0.0.0"},
|
||||
{false, "", "4.1.2.2"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
assert.Equal(t, tt.Result, tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
|
||||
ok, r := tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP(tt.IP)))
|
||||
assert.Equal(t, tt.Found, ok)
|
||||
assert.Equal(t, tt.Result, r)
|
||||
}
|
||||
|
||||
tree = NewTree4()
|
||||
tree = NewTree4[string]()
|
||||
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||
assert.Equal(t, "cool", tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
|
||||
assert.Equal(t, "cool", tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
|
||||
ok, r := tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0")))
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, "cool", r)
|
||||
|
||||
ok, r = tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255")))
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, "cool", r)
|
||||
}
|
||||
|
||||
func TestCIDRTree_Match(t *testing.T) {
|
||||
tree := NewTree4()
|
||||
tree.AddCIDR(Parse("4.1.1.0/32"), "1a")
|
||||
tree.AddCIDR(Parse("4.1.1.1/32"), "1b")
|
||||
func TestTree4_GetCIDR(t *testing.T) {
|
||||
tree := NewTree4[string]()
|
||||
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
||||
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
||||
tree.AddCIDR(Parse("4.1.1.0/24"), "4a")
|
||||
tree.AddCIDR(Parse("4.1.1.1/32"), "4b")
|
||||
tree.AddCIDR(Parse("4.1.2.1/32"), "4c")
|
||||
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
|
||||
|
||||
tests := []struct {
|
||||
Found bool
|
||||
Result interface{}
|
||||
IP string
|
||||
IPNet *net.IPNet
|
||||
}{
|
||||
{"1a", "4.1.1.0"},
|
||||
{"1b", "4.1.1.1"},
|
||||
{true, "1", Parse("1.0.0.0/8")},
|
||||
{true, "2", Parse("2.1.0.0/16")},
|
||||
{true, "3", Parse("3.1.1.0/24")},
|
||||
{true, "4a", Parse("4.1.1.0/24")},
|
||||
{true, "4b", Parse("4.1.1.1/32")},
|
||||
{true, "4c", Parse("4.1.2.1/32")},
|
||||
{true, "5", Parse("254.0.0.0/4")},
|
||||
{false, "", Parse("2.0.0.0/8")},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
assert.Equal(t, tt.Result, tree.Match(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
|
||||
ok, r := tree.GetCIDR(tt.IPNet)
|
||||
assert.Equal(t, tt.Found, ok)
|
||||
assert.Equal(t, tt.Result, r)
|
||||
}
|
||||
|
||||
tree = NewTree4()
|
||||
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
|
||||
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
|
||||
}
|
||||
|
||||
func BenchmarkCIDRTree_Contains(b *testing.B) {
|
||||
tree := NewTree4()
|
||||
tree := NewTree4[string]()
|
||||
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
|
||||
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
|
||||
tree.AddCIDR(Parse("192.2.1.1/32"), "1")
|
||||
@@ -143,25 +168,3 @@ func BenchmarkCIDRTree_Contains(b *testing.B) {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkCIDRTree_Match(b *testing.B) {
|
||||
tree := NewTree4()
|
||||
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
|
||||
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
|
||||
tree.AddCIDR(Parse("192.2.1.1/32"), "1")
|
||||
tree.AddCIDR(Parse("172.2.1.1/32"), "1")
|
||||
|
||||
ip := iputil.Ip2VpnIp(net.ParseIP("1.2.1.1"))
|
||||
b.Run("found", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
tree.Match(ip)
|
||||
}
|
||||
})
|
||||
|
||||
ip = iputil.Ip2VpnIp(net.ParseIP("1.2.1.255"))
|
||||
b.Run("not found", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
tree.Match(ip)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -8,20 +8,20 @@ import (
|
||||
|
||||
const startbit6 = uint64(1 << 63)
|
||||
|
||||
type Tree6 struct {
|
||||
root4 *Node
|
||||
root6 *Node
|
||||
type Tree6[T any] struct {
|
||||
root4 *Node[T]
|
||||
root6 *Node[T]
|
||||
}
|
||||
|
||||
func NewTree6() *Tree6 {
|
||||
tree := new(Tree6)
|
||||
tree.root4 = &Node{}
|
||||
tree.root6 = &Node{}
|
||||
func NewTree6[T any]() *Tree6[T] {
|
||||
tree := new(Tree6[T])
|
||||
tree.root4 = &Node[T]{}
|
||||
tree.root6 = &Node[T]{}
|
||||
return tree
|
||||
}
|
||||
|
||||
func (tree *Tree6) AddCIDR(cidr *net.IPNet, val interface{}) {
|
||||
var node, next *Node
|
||||
func (tree *Tree6[T]) AddCIDR(cidr *net.IPNet, val T) {
|
||||
var node, next *Node[T]
|
||||
|
||||
cidrIP, ipv4 := isIPV4(cidr.IP)
|
||||
if ipv4 {
|
||||
@@ -56,7 +56,7 @@ func (tree *Tree6) AddCIDR(cidr *net.IPNet, val interface{}) {
|
||||
|
||||
// Build up the rest of the tree we don't already have
|
||||
for bit&mask != 0 {
|
||||
next = &Node{}
|
||||
next = &Node[T]{}
|
||||
next.parent = node
|
||||
|
||||
if ip&bit != 0 {
|
||||
@@ -72,11 +72,12 @@ func (tree *Tree6) AddCIDR(cidr *net.IPNet, val interface{}) {
|
||||
|
||||
// Final node marks our cidr, set the value
|
||||
node.value = val
|
||||
node.hasValue = true
|
||||
}
|
||||
|
||||
// Finds the most specific match
|
||||
func (tree *Tree6) MostSpecificContains(ip net.IP) (value interface{}) {
|
||||
var node *Node
|
||||
func (tree *Tree6[T]) MostSpecificContains(ip net.IP) (ok bool, value T) {
|
||||
var node *Node[T]
|
||||
|
||||
wholeIP, ipv4 := isIPV4(ip)
|
||||
if ipv4 {
|
||||
@@ -90,8 +91,9 @@ func (tree *Tree6) MostSpecificContains(ip net.IP) (value interface{}) {
|
||||
bit := startbit
|
||||
|
||||
for node != nil {
|
||||
if node.value != nil {
|
||||
if node.hasValue {
|
||||
value = node.value
|
||||
ok = true
|
||||
}
|
||||
|
||||
if bit == 0 {
|
||||
@@ -108,16 +110,17 @@ func (tree *Tree6) MostSpecificContains(ip net.IP) (value interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
return value
|
||||
return ok, value
|
||||
}
|
||||
|
||||
func (tree *Tree6) MostSpecificContainsIpV4(ip iputil.VpnIp) (value interface{}) {
|
||||
func (tree *Tree6[T]) MostSpecificContainsIpV4(ip iputil.VpnIp) (ok bool, value T) {
|
||||
bit := startbit
|
||||
node := tree.root4
|
||||
|
||||
for node != nil {
|
||||
if node.value != nil {
|
||||
if node.hasValue {
|
||||
value = node.value
|
||||
ok = true
|
||||
}
|
||||
|
||||
if ip&bit != 0 {
|
||||
@@ -129,10 +132,10 @@ func (tree *Tree6) MostSpecificContainsIpV4(ip iputil.VpnIp) (value interface{})
|
||||
bit >>= 1
|
||||
}
|
||||
|
||||
return value
|
||||
return ok, value
|
||||
}
|
||||
|
||||
func (tree *Tree6) MostSpecificContainsIpV6(hi, lo uint64) (value interface{}) {
|
||||
func (tree *Tree6[T]) MostSpecificContainsIpV6(hi, lo uint64) (ok bool, value T) {
|
||||
ip := hi
|
||||
node := tree.root6
|
||||
|
||||
@@ -140,8 +143,9 @@ func (tree *Tree6) MostSpecificContainsIpV6(hi, lo uint64) (value interface{}) {
|
||||
bit := startbit6
|
||||
|
||||
for node != nil {
|
||||
if node.value != nil {
|
||||
if node.hasValue {
|
||||
value = node.value
|
||||
ok = true
|
||||
}
|
||||
|
||||
if bit == 0 {
|
||||
@@ -160,7 +164,7 @@ func (tree *Tree6) MostSpecificContainsIpV6(hi, lo uint64) (value interface{}) {
|
||||
ip = lo
|
||||
}
|
||||
|
||||
return value
|
||||
return ok, value
|
||||
}
|
||||
|
||||
func isIPV4(ip net.IP) (net.IP, bool) {
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
)
|
||||
|
||||
func TestCIDR6Tree_MostSpecificContains(t *testing.T) {
|
||||
tree := NewTree6()
|
||||
tree := NewTree6[string]()
|
||||
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
||||
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
||||
@@ -22,53 +22,68 @@ func TestCIDR6Tree_MostSpecificContains(t *testing.T) {
|
||||
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
|
||||
|
||||
tests := []struct {
|
||||
Found bool
|
||||
Result interface{}
|
||||
IP string
|
||||
}{
|
||||
{"1", "1.0.0.0"},
|
||||
{"1", "1.255.255.255"},
|
||||
{"2", "2.1.0.0"},
|
||||
{"2", "2.1.255.255"},
|
||||
{"3", "3.1.1.0"},
|
||||
{"3", "3.1.1.255"},
|
||||
{"4a", "4.1.1.255"},
|
||||
{"4b", "4.1.1.2"},
|
||||
{"4c", "4.1.1.1"},
|
||||
{"5", "240.0.0.0"},
|
||||
{"5", "255.255.255.255"},
|
||||
{"6a", "1:2:0:4:1:1:1:1"},
|
||||
{"6b", "1:2:0:4:5:1:1:1"},
|
||||
{"6c", "1:2:0:4:5:0:0:0"},
|
||||
{nil, "239.0.0.0"},
|
||||
{nil, "4.1.2.2"},
|
||||
{true, "1", "1.0.0.0"},
|
||||
{true, "1", "1.255.255.255"},
|
||||
{true, "2", "2.1.0.0"},
|
||||
{true, "2", "2.1.255.255"},
|
||||
{true, "3", "3.1.1.0"},
|
||||
{true, "3", "3.1.1.255"},
|
||||
{true, "4a", "4.1.1.255"},
|
||||
{true, "4b", "4.1.1.2"},
|
||||
{true, "4c", "4.1.1.1"},
|
||||
{true, "5", "240.0.0.0"},
|
||||
{true, "5", "255.255.255.255"},
|
||||
{true, "6a", "1:2:0:4:1:1:1:1"},
|
||||
{true, "6b", "1:2:0:4:5:1:1:1"},
|
||||
{true, "6c", "1:2:0:4:5:0:0:0"},
|
||||
{false, "", "239.0.0.0"},
|
||||
{false, "", "4.1.2.2"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
assert.Equal(t, tt.Result, tree.MostSpecificContains(net.ParseIP(tt.IP)))
|
||||
ok, r := tree.MostSpecificContains(net.ParseIP(tt.IP))
|
||||
assert.Equal(t, tt.Found, ok)
|
||||
assert.Equal(t, tt.Result, r)
|
||||
}
|
||||
|
||||
tree = NewTree6()
|
||||
tree = NewTree6[string]()
|
||||
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||
tree.AddCIDR(Parse("::/0"), "cool6")
|
||||
assert.Equal(t, "cool", tree.MostSpecificContains(net.ParseIP("0.0.0.0")))
|
||||
assert.Equal(t, "cool", tree.MostSpecificContains(net.ParseIP("255.255.255.255")))
|
||||
assert.Equal(t, "cool6", tree.MostSpecificContains(net.ParseIP("::")))
|
||||
assert.Equal(t, "cool6", tree.MostSpecificContains(net.ParseIP("1:2:3:4:5:6:7:8")))
|
||||
ok, r := tree.MostSpecificContains(net.ParseIP("0.0.0.0"))
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, "cool", r)
|
||||
|
||||
ok, r = tree.MostSpecificContains(net.ParseIP("255.255.255.255"))
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, "cool", r)
|
||||
|
||||
ok, r = tree.MostSpecificContains(net.ParseIP("::"))
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, "cool6", r)
|
||||
|
||||
ok, r = tree.MostSpecificContains(net.ParseIP("1:2:3:4:5:6:7:8"))
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, "cool6", r)
|
||||
}
|
||||
|
||||
func TestCIDR6Tree_MostSpecificContainsIpV6(t *testing.T) {
|
||||
tree := NewTree6()
|
||||
tree := NewTree6[string]()
|
||||
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/64"), "6a")
|
||||
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/80"), "6b")
|
||||
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
|
||||
|
||||
tests := []struct {
|
||||
Found bool
|
||||
Result interface{}
|
||||
IP string
|
||||
}{
|
||||
{"6a", "1:2:0:4:1:1:1:1"},
|
||||
{"6b", "1:2:0:4:5:1:1:1"},
|
||||
{"6c", "1:2:0:4:5:0:0:0"},
|
||||
{true, "6a", "1:2:0:4:1:1:1:1"},
|
||||
{true, "6b", "1:2:0:4:5:1:1:1"},
|
||||
{true, "6c", "1:2:0:4:5:0:0:0"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -76,6 +91,8 @@ func TestCIDR6Tree_MostSpecificContainsIpV6(t *testing.T) {
|
||||
hi := binary.BigEndian.Uint64(ip[:8])
|
||||
lo := binary.BigEndian.Uint64(ip[8:])
|
||||
|
||||
assert.Equal(t, tt.Result, tree.MostSpecificContainsIpV6(hi, lo))
|
||||
ok, r := tree.MostSpecificContainsIpV6(hi, lo)
|
||||
assert.Equal(t, tt.Found, ok)
|
||||
assert.Equal(t, tt.Result, r)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net"
|
||||
"os"
|
||||
@@ -181,9 +180,15 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while generating ecdsa keys: %s", err)
|
||||
}
|
||||
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L60
|
||||
rawPriv = key.D.FillBytes(make([]byte, 32))
|
||||
pub = elliptic.Marshal(elliptic.P256(), key.X, key.Y)
|
||||
|
||||
// ecdh.PrivateKey lets us get at the encoded bytes, even though
|
||||
// we aren't using ECDH here.
|
||||
eKey, err := key.ECDH()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while converting ecdsa key: %s", err)
|
||||
}
|
||||
rawPriv = eKey.Bytes()
|
||||
pub = eKey.PublicKey().Bytes()
|
||||
}
|
||||
|
||||
nc := cert.NebulaCertificate{
|
||||
@@ -213,27 +218,27 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
|
||||
return fmt.Errorf("error while signing: %s", err)
|
||||
}
|
||||
|
||||
var b []byte
|
||||
if *cf.encryption {
|
||||
b, err := cert.EncryptAndMarshalSigningPrivateKey(curve, rawPriv, passphrase, kdfParams)
|
||||
b, err = cert.EncryptAndMarshalSigningPrivateKey(curve, rawPriv, passphrase, kdfParams)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while encrypting out-key: %s", err)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(*cf.outKeyPath, b, 0600)
|
||||
} else {
|
||||
err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalSigningPrivateKey(curve, rawPriv), 0600)
|
||||
b = cert.MarshalSigningPrivateKey(curve, rawPriv)
|
||||
}
|
||||
|
||||
err = os.WriteFile(*cf.outKeyPath, b, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-key: %s", err)
|
||||
}
|
||||
|
||||
b, err := nc.MarshalToPEM()
|
||||
b, err = nc.MarshalToPEM()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while marshalling certificate: %s", err)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(*cf.outCertPath, b, 0600)
|
||||
err = os.WriteFile(*cf.outCertPath, b, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-crt: %s", err)
|
||||
}
|
||||
@@ -244,7 +249,7 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
|
||||
return fmt.Errorf("error while generating qr code: %s", err)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(*cf.outQRPath, b, 0600)
|
||||
err = os.WriteFile(*cf.outQRPath, b, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-qr: %s", err)
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"bytes"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -107,7 +106,7 @@ func Test_ca(t *testing.T) {
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// create temp key file
|
||||
keyF, err := ioutil.TempFile("", "test.key")
|
||||
keyF, err := os.CreateTemp("", "test.key")
|
||||
assert.Nil(t, err)
|
||||
os.Remove(keyF.Name())
|
||||
|
||||
@@ -120,7 +119,7 @@ func Test_ca(t *testing.T) {
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// create temp cert file
|
||||
crtF, err := ioutil.TempFile("", "test.crt")
|
||||
crtF, err := os.CreateTemp("", "test.crt")
|
||||
assert.Nil(t, err)
|
||||
os.Remove(crtF.Name())
|
||||
os.Remove(keyF.Name())
|
||||
@@ -134,13 +133,13 @@ func Test_ca(t *testing.T) {
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// read cert and key files
|
||||
rb, _ := ioutil.ReadFile(keyF.Name())
|
||||
rb, _ := os.ReadFile(keyF.Name())
|
||||
lKey, b, err := cert.UnmarshalEd25519PrivateKey(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, lKey, 64)
|
||||
|
||||
rb, _ = ioutil.ReadFile(crtF.Name())
|
||||
rb, _ = os.ReadFile(crtF.Name())
|
||||
lCrt, b, err := cert.UnmarshalNebulaCertificateFromPEM(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
@@ -166,7 +165,7 @@ func Test_ca(t *testing.T) {
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// read encrypted key file and verify default params
|
||||
rb, _ = ioutil.ReadFile(keyF.Name())
|
||||
rb, _ = os.ReadFile(keyF.Name())
|
||||
k, _ := pem.Decode(rb)
|
||||
ned, err := cert.UnmarshalNebulaEncryptedData(k.Bytes)
|
||||
assert.Nil(t, err)
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/slackhq/nebula/cert"
|
||||
@@ -54,12 +53,12 @@ func keygen(args []string, out io.Writer, errOut io.Writer) error {
|
||||
return fmt.Errorf("invalid curve: %s", *cf.curve)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
|
||||
err = os.WriteFile(*cf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-key: %s", err)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(*cf.outPubPath, cert.MarshalPublicKey(curve, pub), 0600)
|
||||
err = os.WriteFile(*cf.outPubPath, cert.MarshalPublicKey(curve, pub), 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-pub: %s", err)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
@@ -54,7 +53,7 @@ func Test_keygen(t *testing.T) {
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// create temp key file
|
||||
keyF, err := ioutil.TempFile("", "test.key")
|
||||
keyF, err := os.CreateTemp("", "test.key")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(keyF.Name())
|
||||
|
||||
@@ -67,7 +66,7 @@ func Test_keygen(t *testing.T) {
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// create temp pub file
|
||||
pubF, err := ioutil.TempFile("", "test.pub")
|
||||
pubF, err := os.CreateTemp("", "test.pub")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(pubF.Name())
|
||||
|
||||
@@ -80,13 +79,13 @@ func Test_keygen(t *testing.T) {
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// read cert and key files
|
||||
rb, _ := ioutil.ReadFile(keyF.Name())
|
||||
rb, _ := os.ReadFile(keyF.Name())
|
||||
lKey, b, err := cert.UnmarshalX25519PrivateKey(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, lKey, 32)
|
||||
|
||||
rb, _ = ioutil.ReadFile(pubF.Name())
|
||||
rb, _ = os.ReadFile(pubF.Name())
|
||||
lPub, b, err := cert.UnmarshalX25519PublicKey(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
@@ -41,7 +40,7 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
rawCert, err := ioutil.ReadFile(*pf.path)
|
||||
rawCert, err := os.ReadFile(*pf.path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read cert; %s", err)
|
||||
}
|
||||
@@ -87,7 +86,7 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
|
||||
return fmt.Errorf("error while generating qr code: %s", err)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(*pf.outQRPath, b, 0600)
|
||||
err = os.WriteFile(*pf.outQRPath, b, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-qr: %s", err)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -54,7 +53,7 @@ func Test_printCert(t *testing.T) {
|
||||
// invalid cert at path
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
tf, err := ioutil.TempFile("", "print-cert")
|
||||
tf, err := os.CreateTemp("", "print-cert")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(tf.Name())
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
@@ -73,7 +72,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
||||
return newHelpErrorf("cannot set both -in-pub and -out-key")
|
||||
}
|
||||
|
||||
rawCAKey, err := ioutil.ReadFile(*sf.caKeyPath)
|
||||
rawCAKey, err := os.ReadFile(*sf.caKeyPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while reading ca-key: %s", err)
|
||||
}
|
||||
@@ -112,7 +111,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
||||
return fmt.Errorf("error while parsing ca-key: %s", err)
|
||||
}
|
||||
|
||||
rawCACert, err := ioutil.ReadFile(*sf.caCertPath)
|
||||
rawCACert, err := os.ReadFile(*sf.caCertPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while reading ca-crt: %s", err)
|
||||
}
|
||||
@@ -178,7 +177,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
||||
|
||||
var pub, rawPriv []byte
|
||||
if *sf.inPubPath != "" {
|
||||
rawPub, err := ioutil.ReadFile(*sf.inPubPath)
|
||||
rawPub, err := os.ReadFile(*sf.inPubPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while reading in-pub: %s", err)
|
||||
}
|
||||
@@ -235,7 +234,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
||||
return fmt.Errorf("refusing to overwrite existing key: %s", *sf.outKeyPath)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(*sf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
|
||||
err = os.WriteFile(*sf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-key: %s", err)
|
||||
}
|
||||
@@ -246,7 +245,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
||||
return fmt.Errorf("error while marshalling certificate: %s", err)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(*sf.outCertPath, b, 0600)
|
||||
err = os.WriteFile(*sf.outCertPath, b, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-crt: %s", err)
|
||||
}
|
||||
@@ -257,7 +256,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
||||
return fmt.Errorf("error while generating qr code: %s", err)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(*sf.outQRPath, b, 0600)
|
||||
err = os.WriteFile(*sf.outQRPath, b, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-qr: %s", err)
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -104,7 +103,7 @@ func Test_signCert(t *testing.T) {
|
||||
// failed to unmarshal key
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
caKeyF, err := ioutil.TempFile("", "sign-cert.key")
|
||||
caKeyF, err := os.CreateTemp("", "sign-cert.key")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(caKeyF.Name())
|
||||
|
||||
@@ -128,7 +127,7 @@ func Test_signCert(t *testing.T) {
|
||||
// failed to unmarshal cert
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
caCrtF, err := ioutil.TempFile("", "sign-cert.crt")
|
||||
caCrtF, err := os.CreateTemp("", "sign-cert.crt")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(caCrtF.Name())
|
||||
|
||||
@@ -159,7 +158,7 @@ func Test_signCert(t *testing.T) {
|
||||
// failed to unmarshal pub
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
inPubF, err := ioutil.TempFile("", "in.pub")
|
||||
inPubF, err := os.CreateTemp("", "in.pub")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(inPubF.Name())
|
||||
|
||||
@@ -206,7 +205,7 @@ func Test_signCert(t *testing.T) {
|
||||
|
||||
// mismatched ca key
|
||||
_, caPriv2, _ := ed25519.GenerateKey(rand.Reader)
|
||||
caKeyF2, err := ioutil.TempFile("", "sign-cert-2.key")
|
||||
caKeyF2, err := os.CreateTemp("", "sign-cert-2.key")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(caKeyF2.Name())
|
||||
caKeyF2.Write(cert.MarshalEd25519PrivateKey(caPriv2))
|
||||
@@ -227,7 +226,7 @@ func Test_signCert(t *testing.T) {
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
// create temp key file
|
||||
keyF, err := ioutil.TempFile("", "test.key")
|
||||
keyF, err := os.CreateTemp("", "test.key")
|
||||
assert.Nil(t, err)
|
||||
os.Remove(keyF.Name())
|
||||
|
||||
@@ -241,7 +240,7 @@ func Test_signCert(t *testing.T) {
|
||||
os.Remove(keyF.Name())
|
||||
|
||||
// create temp cert file
|
||||
crtF, err := ioutil.TempFile("", "test.crt")
|
||||
crtF, err := os.CreateTemp("", "test.crt")
|
||||
assert.Nil(t, err)
|
||||
os.Remove(crtF.Name())
|
||||
|
||||
@@ -254,13 +253,13 @@ func Test_signCert(t *testing.T) {
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
// read cert and key files
|
||||
rb, _ := ioutil.ReadFile(keyF.Name())
|
||||
rb, _ := os.ReadFile(keyF.Name())
|
||||
lKey, b, err := cert.UnmarshalX25519PrivateKey(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, lKey, 32)
|
||||
|
||||
rb, _ = ioutil.ReadFile(crtF.Name())
|
||||
rb, _ = os.ReadFile(crtF.Name())
|
||||
lCrt, b, err := cert.UnmarshalNebulaCertificateFromPEM(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
@@ -296,7 +295,7 @@ func Test_signCert(t *testing.T) {
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
// read cert file and check pub key matches in-pub
|
||||
rb, _ = ioutil.ReadFile(crtF.Name())
|
||||
rb, _ = os.ReadFile(crtF.Name())
|
||||
lCrt, b, err = cert.UnmarshalNebulaCertificateFromPEM(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
@@ -348,11 +347,11 @@ func Test_signCert(t *testing.T) {
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
|
||||
caKeyF, err = ioutil.TempFile("", "sign-cert.key")
|
||||
caKeyF, err = os.CreateTemp("", "sign-cert.key")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(caKeyF.Name())
|
||||
|
||||
caCrtF, err = ioutil.TempFile("", "sign-cert.crt")
|
||||
caCrtF, err = os.CreateTemp("", "sign-cert.crt")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(caCrtF.Name())
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -40,7 +39,7 @@ func verify(args []string, out io.Writer, errOut io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
rawCACert, err := ioutil.ReadFile(*vf.caPath)
|
||||
rawCACert, err := os.ReadFile(*vf.caPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while reading ca: %s", err)
|
||||
}
|
||||
@@ -57,7 +56,7 @@ func verify(args []string, out io.Writer, errOut io.Writer) error {
|
||||
}
|
||||
}
|
||||
|
||||
rawCert, err := ioutil.ReadFile(*vf.certPath)
|
||||
rawCert, err := os.ReadFile(*vf.certPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read crt; %s", err)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -56,7 +55,7 @@ func Test_verify(t *testing.T) {
|
||||
// invalid ca at path
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
caFile, err := ioutil.TempFile("", "verify-ca")
|
||||
caFile, err := os.CreateTemp("", "verify-ca")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(caFile.Name())
|
||||
|
||||
@@ -92,7 +91,7 @@ func Test_verify(t *testing.T) {
|
||||
// invalid crt at path
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
certFile, err := ioutil.TempFile("", "verify-cert")
|
||||
certFile, err := os.CreateTemp("", "verify-cert")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(certFile.Name())
|
||||
|
||||
|
||||
@@ -59,13 +59,8 @@ func main() {
|
||||
}
|
||||
|
||||
ctrl, err := nebula.Main(c, *configTest, Build, l, nil)
|
||||
|
||||
switch v := err.(type) {
|
||||
case util.ContextualError:
|
||||
v.Log(l)
|
||||
os.Exit(1)
|
||||
case error:
|
||||
l.WithError(err).Error("Failed to start")
|
||||
if err != nil {
|
||||
util.LogWithContextIfNeeded("Failed to start", err, l)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
||||
@@ -53,18 +53,14 @@ func main() {
|
||||
}
|
||||
|
||||
ctrl, err := nebula.Main(c, *configTest, Build, l, nil)
|
||||
|
||||
switch v := err.(type) {
|
||||
case util.ContextualError:
|
||||
v.Log(l)
|
||||
os.Exit(1)
|
||||
case error:
|
||||
l.WithError(err).Error("Failed to start")
|
||||
if err != nil {
|
||||
util.LogWithContextIfNeeded("Failed to start", err, l)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if !*configTest {
|
||||
ctrl.Start()
|
||||
notifyReady(l)
|
||||
ctrl.ShutdownBlock()
|
||||
}
|
||||
|
||||
|
||||
42
cmd/nebula/notify_linux.go
Normal file
42
cmd/nebula/notify_linux.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SdNotifyReady tells systemd the service is ready and dependent services can now be started
|
||||
// https://www.freedesktop.org/software/systemd/man/sd_notify.html
|
||||
// https://www.freedesktop.org/software/systemd/man/systemd.service.html
|
||||
const SdNotifyReady = "READY=1"
|
||||
|
||||
func notifyReady(l *logrus.Logger) {
|
||||
sockName := os.Getenv("NOTIFY_SOCKET")
|
||||
if sockName == "" {
|
||||
l.Debugln("NOTIFY_SOCKET systemd env var not set, not sending ready signal")
|
||||
return
|
||||
}
|
||||
|
||||
conn, err := net.DialTimeout("unixgram", sockName, time.Second)
|
||||
if err != nil {
|
||||
l.WithError(err).Error("failed to connect to systemd notification socket")
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
err = conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
if err != nil {
|
||||
l.WithError(err).Error("failed to set the write deadline for the systemd notification socket")
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = conn.Write([]byte(SdNotifyReady)); err != nil {
|
||||
l.WithError(err).Error("failed to signal the systemd notification socket")
|
||||
return
|
||||
}
|
||||
|
||||
l.Debugln("notified systemd the service is ready")
|
||||
}
|
||||
10
cmd/nebula/notify_notlinux.go
Normal file
10
cmd/nebula/notify_notlinux.go
Normal file
@@ -0,0 +1,10 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package main
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
func notifyReady(_ *logrus.Logger) {
|
||||
// No init service to notify
|
||||
}
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
"dario.cat/mergo"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
@@ -121,6 +121,10 @@ func (c *C) HasChanged(k string) bool {
|
||||
// CatchHUP will listen for the HUP signal in a go routine and reload all configs found in the
|
||||
// original path provided to Load. The old settings are shallow copied for change detection after the reload.
|
||||
func (c *C) CatchHUP(ctx context.Context) {
|
||||
if c.path == "" {
|
||||
return
|
||||
}
|
||||
|
||||
ch := make(chan os.Signal, 1)
|
||||
signal.Notify(ch, syscall.SIGHUP)
|
||||
|
||||
@@ -236,6 +240,15 @@ func (c *C) GetInt(k string, d int) int {
|
||||
return v
|
||||
}
|
||||
|
||||
// GetUint32 will get the uint32 for k or return the default d if not found or invalid
|
||||
func (c *C) GetUint32(k string, d uint32) uint32 {
|
||||
r := c.GetInt(k, int(d))
|
||||
if uint64(r) > uint64(math.MaxUint32) {
|
||||
return d
|
||||
}
|
||||
return uint32(r)
|
||||
}
|
||||
|
||||
// GetBool will get the bool for k or return the default d if not found or invalid
|
||||
func (c *C) GetBool(k string, d bool) bool {
|
||||
r := strings.ToLower(c.GetString(k, fmt.Sprintf("%v", d)))
|
||||
@@ -348,7 +361,7 @@ func (c *C) parse() error {
|
||||
var m map[interface{}]interface{}
|
||||
|
||||
for _, path := range c.files {
|
||||
b, err := ioutil.ReadFile(path)
|
||||
b, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
"dario.cat/mergo"
|
||||
"github.com/slackhq/nebula/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -16,10 +15,10 @@ import (
|
||||
|
||||
func TestConfig_Load(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
dir, err := ioutil.TempDir("", "config-test")
|
||||
dir, err := os.MkdirTemp("", "config-test")
|
||||
// invalid yaml
|
||||
c := NewC(l)
|
||||
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644)
|
||||
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644)
|
||||
assert.EqualError(t, c.Load(dir), "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into map[interface {}]interface {}")
|
||||
|
||||
// simple multi config merge
|
||||
@@ -29,8 +28,8 @@ func TestConfig_Load(t *testing.T) {
|
||||
|
||||
assert.Nil(t, err)
|
||||
|
||||
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
||||
ioutil.WriteFile(filepath.Join(dir, "02.yml"), []byte("outer:\n inner: override\nnew: hi"), 0644)
|
||||
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
||||
os.WriteFile(filepath.Join(dir, "02.yml"), []byte("outer:\n inner: override\nnew: hi"), 0644)
|
||||
assert.Nil(t, c.Load(dir))
|
||||
expected := map[interface{}]interface{}{
|
||||
"outer": map[interface{}]interface{}{
|
||||
@@ -120,9 +119,9 @@ func TestConfig_HasChanged(t *testing.T) {
|
||||
func TestConfig_ReloadConfig(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
done := make(chan bool, 1)
|
||||
dir, err := ioutil.TempDir("", "config-test")
|
||||
dir, err := os.MkdirTemp("", "config-test")
|
||||
assert.Nil(t, err)
|
||||
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
||||
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
||||
|
||||
c := NewC(l)
|
||||
assert.Nil(t, c.Load(dir))
|
||||
@@ -131,7 +130,7 @@ func TestConfig_ReloadConfig(t *testing.T) {
|
||||
assert.False(t, c.HasChanged("outer"))
|
||||
assert.False(t, c.HasChanged(""))
|
||||
|
||||
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: ho"), 0644)
|
||||
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: ho"), 0644)
|
||||
|
||||
c.RegisterReloadCallback(func(c *C) {
|
||||
done <- true
|
||||
|
||||
@@ -23,6 +23,7 @@ const (
|
||||
swapPrimary trafficDecision = 3
|
||||
migrateRelays trafficDecision = 4
|
||||
tryRehandshake trafficDecision = 5
|
||||
sendTestPacket trafficDecision = 6
|
||||
)
|
||||
|
||||
type connectionManager struct {
|
||||
@@ -176,7 +177,7 @@ func (n *connectionManager) Run(ctx context.Context) {
|
||||
}
|
||||
|
||||
func (n *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
|
||||
decision, hostinfo, primary := n.makeTrafficDecision(localIndex, p, nb, out, now)
|
||||
decision, hostinfo, primary := n.makeTrafficDecision(localIndex, now)
|
||||
|
||||
switch decision {
|
||||
case deleteTunnel:
|
||||
@@ -197,6 +198,9 @@ func (n *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte,
|
||||
|
||||
case tryRehandshake:
|
||||
n.tryRehandshake(hostinfo)
|
||||
|
||||
case sendTestPacket:
|
||||
n.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
|
||||
}
|
||||
|
||||
n.resetRelayTrafficCheck(hostinfo)
|
||||
@@ -231,7 +235,7 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
|
||||
index = existing.LocalIndex
|
||||
switch r.Type {
|
||||
case TerminalType:
|
||||
relayFrom = newhostinfo.vpnIp
|
||||
relayFrom = n.intf.myVpnIp
|
||||
relayTo = existing.PeerIp
|
||||
case ForwardingType:
|
||||
relayFrom = existing.PeerIp
|
||||
@@ -256,7 +260,7 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
|
||||
}
|
||||
switch r.Type {
|
||||
case TerminalType:
|
||||
relayFrom = newhostinfo.vpnIp
|
||||
relayFrom = n.intf.myVpnIp
|
||||
relayTo = r.PeerIp
|
||||
case ForwardingType:
|
||||
relayFrom = r.PeerIp
|
||||
@@ -289,7 +293,7 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *connectionManager) makeTrafficDecision(localIndex uint32, p, nb, out []byte, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
|
||||
func (n *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
|
||||
n.hostMap.RLock()
|
||||
defer n.hostMap.RUnlock()
|
||||
|
||||
@@ -356,6 +360,7 @@ func (n *connectionManager) makeTrafficDecision(localIndex uint32, p, nb, out []
|
||||
return deleteTunnel, hostinfo, nil
|
||||
}
|
||||
|
||||
decision := doNothing
|
||||
if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo {
|
||||
if !outTraffic {
|
||||
// If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel.
|
||||
@@ -380,7 +385,7 @@ func (n *connectionManager) makeTrafficDecision(localIndex uint32, p, nb, out []
|
||||
}
|
||||
|
||||
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
|
||||
n.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
|
||||
decision = sendTestPacket
|
||||
|
||||
} else {
|
||||
if n.l.Level >= logrus.DebugLevel {
|
||||
@@ -390,7 +395,7 @@ func (n *connectionManager) makeTrafficDecision(localIndex uint32, p, nb, out []
|
||||
|
||||
n.pendingDeletion[hostinfo.localIndexId] = struct{}{}
|
||||
n.trafficTimer.Add(hostinfo.localIndexId, n.pendingDeletionInterval)
|
||||
return doNothing, nil, nil
|
||||
return decision, hostinfo, nil
|
||||
}
|
||||
|
||||
func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
|
||||
@@ -405,8 +410,8 @@ func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
certState := n.intf.certState.Load()
|
||||
return bytes.Equal(current.ConnectionState.certState.certificate.Signature, certState.certificate.Signature)
|
||||
certState := n.intf.pki.GetCertState()
|
||||
return bytes.Equal(current.ConnectionState.myCert.Signature, certState.Certificate.Signature)
|
||||
}
|
||||
|
||||
func (n *connectionManager) swapPrimary(current, primary *HostInfo) {
|
||||
@@ -427,12 +432,12 @@ func (n *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostIn
|
||||
return false
|
||||
}
|
||||
|
||||
valid, err := remoteCert.VerifyWithCache(now, n.intf.caPool)
|
||||
valid, err := remoteCert.VerifyWithCache(now, n.intf.pki.GetCAPool())
|
||||
if valid {
|
||||
return false
|
||||
}
|
||||
|
||||
if !n.intf.disconnectInvalid && err != cert.ErrBlockListed {
|
||||
if !n.intf.disconnectInvalid.Load() && err != cert.ErrBlockListed {
|
||||
// Block listed certificates should always be disconnected
|
||||
return false
|
||||
}
|
||||
@@ -452,7 +457,7 @@ func (n *connectionManager) sendPunch(hostinfo *HostInfo) {
|
||||
}
|
||||
|
||||
if n.punchy.GetTargetEverything() {
|
||||
hostinfo.remotes.ForEach(n.hostMap.preferredRanges, func(addr *udp.Addr, preferred bool) {
|
||||
hostinfo.remotes.ForEach(n.hostMap.GetPreferredRanges(), func(addr *udp.Addr, preferred bool) {
|
||||
n.metricsTxPunchy.Inc(1)
|
||||
n.intf.outside.WriteTo([]byte{1}, addr)
|
||||
})
|
||||
@@ -464,8 +469,8 @@ func (n *connectionManager) sendPunch(hostinfo *HostInfo) {
|
||||
}
|
||||
|
||||
func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) {
|
||||
certState := n.intf.certState.Load()
|
||||
if bytes.Equal(hostinfo.ConnectionState.certState.certificate.Signature, certState.certificate.Signature) {
|
||||
certState := n.intf.pki.GetCertState()
|
||||
if bytes.Equal(hostinfo.ConnectionState.myCert.Signature, certState.Certificate.Signature) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -473,18 +478,5 @@ func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) {
|
||||
WithField("reason", "local certificate is not current").
|
||||
Info("Re-handshaking with remote")
|
||||
|
||||
//TODO: this is copied from getOrHandshake to keep the extra checks out of the hot path, figure it out
|
||||
newHostinfo := n.intf.handshakeManager.AddVpnIp(hostinfo.vpnIp, n.intf.initHostInfo)
|
||||
if !newHostinfo.HandshakeReady {
|
||||
ixHandshakeStage0(n.intf, newHostinfo.vpnIp, newHostinfo)
|
||||
}
|
||||
|
||||
//If this is a static host, we don't need to wait for the HostQueryReply
|
||||
//We can trigger the handshake right now
|
||||
if _, ok := n.intf.lightHouse.GetStaticHostList()[hostinfo.vpnIp]; ok {
|
||||
select {
|
||||
case n.intf.handshakeManager.trigger <- hostinfo.vpnIp:
|
||||
default:
|
||||
}
|
||||
}
|
||||
n.intf.handshakeManager.StartHandshake(hostinfo.vpnIp, nil)
|
||||
}
|
||||
|
||||
@@ -21,8 +21,9 @@ var vpnIp iputil.VpnIp
|
||||
|
||||
func newTestLighthouse() *LightHouse {
|
||||
lh := &LightHouse{
|
||||
l: test.NewLogger(),
|
||||
addrMap: map[iputil.VpnIp]*RemoteList{},
|
||||
l: test.NewLogger(),
|
||||
addrMap: map[iputil.VpnIp]*RemoteList{},
|
||||
queryChan: make(chan iputil.VpnIp, 10),
|
||||
}
|
||||
lighthouses := map[iputil.VpnIp]struct{}{}
|
||||
staticList := map[iputil.VpnIp]struct{}{}
|
||||
@@ -42,25 +43,28 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
||||
preferredRanges := []*net.IPNet{localrange}
|
||||
|
||||
// Very incomplete mock objects
|
||||
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
|
||||
hostMap := newHostMap(l, vpncidr)
|
||||
hostMap.preferredRanges.Store(&preferredRanges)
|
||||
|
||||
cs := &CertState{
|
||||
rawCertificate: []byte{},
|
||||
privateKey: []byte{},
|
||||
certificate: &cert.NebulaCertificate{},
|
||||
rawCertificateNoKey: []byte{},
|
||||
RawCertificate: []byte{},
|
||||
PrivateKey: []byte{},
|
||||
Certificate: &cert.NebulaCertificate{},
|
||||
RawCertificateNoKey: []byte{},
|
||||
}
|
||||
|
||||
lh := newTestLighthouse()
|
||||
ifce := &Interface{
|
||||
hostMap: hostMap,
|
||||
inside: &test.NoopTun{},
|
||||
outside: &udp.Conn{},
|
||||
outside: &udp.NoopConn{},
|
||||
firewall: &Firewall{},
|
||||
lightHouse: lh,
|
||||
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
|
||||
pki: &PKI{},
|
||||
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
|
||||
l: l,
|
||||
}
|
||||
ifce.certState.Store(cs)
|
||||
ifce.pki.cs.Store(cs)
|
||||
|
||||
// Create manager
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -78,8 +82,8 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
||||
remoteIndexId: 9901,
|
||||
}
|
||||
hostinfo.ConnectionState = &ConnectionState{
|
||||
certState: cs,
|
||||
H: &noise.HandshakeState{},
|
||||
myCert: &cert.NebulaCertificate{},
|
||||
H: &noise.HandshakeState{},
|
||||
}
|
||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||
|
||||
@@ -121,25 +125,28 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
||||
preferredRanges := []*net.IPNet{localrange}
|
||||
|
||||
// Very incomplete mock objects
|
||||
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
|
||||
hostMap := newHostMap(l, vpncidr)
|
||||
hostMap.preferredRanges.Store(&preferredRanges)
|
||||
|
||||
cs := &CertState{
|
||||
rawCertificate: []byte{},
|
||||
privateKey: []byte{},
|
||||
certificate: &cert.NebulaCertificate{},
|
||||
rawCertificateNoKey: []byte{},
|
||||
RawCertificate: []byte{},
|
||||
PrivateKey: []byte{},
|
||||
Certificate: &cert.NebulaCertificate{},
|
||||
RawCertificateNoKey: []byte{},
|
||||
}
|
||||
|
||||
lh := newTestLighthouse()
|
||||
ifce := &Interface{
|
||||
hostMap: hostMap,
|
||||
inside: &test.NoopTun{},
|
||||
outside: &udp.Conn{},
|
||||
outside: &udp.NoopConn{},
|
||||
firewall: &Firewall{},
|
||||
lightHouse: lh,
|
||||
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
|
||||
pki: &PKI{},
|
||||
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
|
||||
l: l,
|
||||
}
|
||||
ifce.certState.Store(cs)
|
||||
ifce.pki.cs.Store(cs)
|
||||
|
||||
// Create manager
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -157,8 +164,8 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
||||
remoteIndexId: 9901,
|
||||
}
|
||||
hostinfo.ConnectionState = &ConnectionState{
|
||||
certState: cs,
|
||||
H: &noise.HandshakeState{},
|
||||
myCert: &cert.NebulaCertificate{},
|
||||
H: &noise.HandshakeState{},
|
||||
}
|
||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||
|
||||
@@ -207,7 +214,8 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
||||
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
||||
preferredRanges := []*net.IPNet{localrange}
|
||||
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
|
||||
hostMap := newHostMap(l, vpncidr)
|
||||
hostMap.preferredRanges.Store(&preferredRanges)
|
||||
|
||||
// Generate keys for CA and peer's cert.
|
||||
pubCA, privCA, _ := ed25519.GenerateKey(rand.Reader)
|
||||
@@ -220,7 +228,8 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
||||
PublicKey: pubCA,
|
||||
},
|
||||
}
|
||||
caCert.Sign(cert.Curve_CURVE25519, privCA)
|
||||
|
||||
assert.NoError(t, caCert.Sign(cert.Curve_CURVE25519, privCA))
|
||||
ncp := &cert.NebulaCAPool{
|
||||
CAs: cert.NewCAPool().CAs,
|
||||
}
|
||||
@@ -239,28 +248,29 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
||||
Issuer: "ca",
|
||||
},
|
||||
}
|
||||
peerCert.Sign(cert.Curve_CURVE25519, privCA)
|
||||
assert.NoError(t, peerCert.Sign(cert.Curve_CURVE25519, privCA))
|
||||
|
||||
cs := &CertState{
|
||||
rawCertificate: []byte{},
|
||||
privateKey: []byte{},
|
||||
certificate: &cert.NebulaCertificate{},
|
||||
rawCertificateNoKey: []byte{},
|
||||
RawCertificate: []byte{},
|
||||
PrivateKey: []byte{},
|
||||
Certificate: &cert.NebulaCertificate{},
|
||||
RawCertificateNoKey: []byte{},
|
||||
}
|
||||
|
||||
lh := newTestLighthouse()
|
||||
ifce := &Interface{
|
||||
hostMap: hostMap,
|
||||
inside: &test.NoopTun{},
|
||||
outside: &udp.Conn{},
|
||||
firewall: &Firewall{},
|
||||
lightHouse: lh,
|
||||
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
|
||||
l: l,
|
||||
disconnectInvalid: true,
|
||||
caPool: ncp,
|
||||
hostMap: hostMap,
|
||||
inside: &test.NoopTun{},
|
||||
outside: &udp.NoopConn{},
|
||||
firewall: &Firewall{},
|
||||
lightHouse: lh,
|
||||
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
|
||||
l: l,
|
||||
pki: &PKI{},
|
||||
}
|
||||
ifce.certState.Store(cs)
|
||||
ifce.pki.cs.Store(cs)
|
||||
ifce.pki.caPool.Store(ncp)
|
||||
ifce.disconnectInvalid.Store(true)
|
||||
|
||||
// Create manager
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
@@ -268,12 +278,16 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
||||
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
||||
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
||||
ifce.connectionManager = nc
|
||||
hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil)
|
||||
hostinfo.ConnectionState = &ConnectionState{
|
||||
certState: cs,
|
||||
peerCert: &peerCert,
|
||||
H: &noise.HandshakeState{},
|
||||
|
||||
hostinfo := &HostInfo{
|
||||
vpnIp: vpnIp,
|
||||
ConnectionState: &ConnectionState{
|
||||
myCert: &cert.NebulaCertificate{},
|
||||
peerCert: &peerCert,
|
||||
H: &noise.HandshakeState{},
|
||||
},
|
||||
}
|
||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||
|
||||
// Move ahead 45s.
|
||||
// Check if to disconnect with invalid certificate.
|
||||
|
||||
@@ -18,35 +18,34 @@ type ConnectionState struct {
|
||||
eKey *NebulaCipherState
|
||||
dKey *NebulaCipherState
|
||||
H *noise.HandshakeState
|
||||
certState *CertState
|
||||
myCert *cert.NebulaCertificate
|
||||
peerCert *cert.NebulaCertificate
|
||||
initiator bool
|
||||
messageCounter atomic.Uint64
|
||||
window *Bits
|
||||
queueLock sync.Mutex
|
||||
writeLock sync.Mutex
|
||||
ready bool
|
||||
}
|
||||
|
||||
func (f *Interface) newConnectionState(l *logrus.Logger, initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState {
|
||||
func NewConnectionState(l *logrus.Logger, cipher string, certState *CertState, initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState {
|
||||
var dhFunc noise.DHFunc
|
||||
curCertState := f.certState.Load()
|
||||
|
||||
switch curCertState.certificate.Details.Curve {
|
||||
switch certState.Certificate.Details.Curve {
|
||||
case cert.Curve_CURVE25519:
|
||||
dhFunc = noise.DH25519
|
||||
case cert.Curve_P256:
|
||||
dhFunc = noiseutil.DHP256
|
||||
default:
|
||||
l.Errorf("invalid curve: %s", curCertState.certificate.Details.Curve)
|
||||
l.Errorf("invalid curve: %s", certState.Certificate.Details.Curve)
|
||||
return nil
|
||||
}
|
||||
cs := noise.NewCipherSuite(dhFunc, noiseutil.CipherAESGCM, noise.HashSHA256)
|
||||
if f.cipher == "chachapoly" {
|
||||
|
||||
var cs noise.CipherSuite
|
||||
if cipher == "chachapoly" {
|
||||
cs = noise.NewCipherSuite(dhFunc, noise.CipherChaChaPoly, noise.HashSHA256)
|
||||
} else {
|
||||
cs = noise.NewCipherSuite(dhFunc, noiseutil.CipherAESGCM, noise.HashSHA256)
|
||||
}
|
||||
|
||||
static := noise.DHKey{Private: curCertState.privateKey, Public: curCertState.publicKey}
|
||||
static := noise.DHKey{Private: certState.PrivateKey, Public: certState.PublicKey}
|
||||
|
||||
b := NewBits(ReplayWindow)
|
||||
// Clear out bit 0, we never transmit it and we don't want it showing as packet loss
|
||||
@@ -71,8 +70,7 @@ func (f *Interface) newConnectionState(l *logrus.Logger, initiator bool, pattern
|
||||
H: hs,
|
||||
initiator: initiator,
|
||||
window: b,
|
||||
ready: false,
|
||||
certState: curCertState,
|
||||
myCert: certState.Certificate,
|
||||
}
|
||||
|
||||
return ci
|
||||
@@ -83,6 +81,5 @@ func (cs *ConnectionState) MarshalJSON() ([]byte, error) {
|
||||
"certificate": cs.peerCert,
|
||||
"initiator": cs.initiator,
|
||||
"message_counter": cs.messageCounter.Load(),
|
||||
"ready": cs.ready,
|
||||
})
|
||||
}
|
||||
|
||||
99
control.go
99
control.go
@@ -11,19 +11,31 @@ import (
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/overlay"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
)
|
||||
|
||||
// Every interaction here needs to take extra care to copy memory and not return or use arguments "as is" when touching
|
||||
// core. This means copying IP objects, slices, de-referencing pointers and taking the actual value, etc
|
||||
|
||||
type controlEach func(h *HostInfo)
|
||||
|
||||
type controlHostLister interface {
|
||||
QueryVpnIp(vpnIp iputil.VpnIp) *HostInfo
|
||||
ForEachIndex(each controlEach)
|
||||
ForEachVpnIp(each controlEach)
|
||||
GetPreferredRanges() []*net.IPNet
|
||||
}
|
||||
|
||||
type Control struct {
|
||||
f *Interface
|
||||
l *logrus.Logger
|
||||
cancel context.CancelFunc
|
||||
sshStart func()
|
||||
statsStart func()
|
||||
dnsStart func()
|
||||
f *Interface
|
||||
l *logrus.Logger
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
sshStart func()
|
||||
statsStart func()
|
||||
dnsStart func()
|
||||
lighthouseStart func()
|
||||
}
|
||||
|
||||
type ControlHostInfo struct {
|
||||
@@ -31,7 +43,6 @@ type ControlHostInfo struct {
|
||||
LocalIndex uint32 `json:"localIndex"`
|
||||
RemoteIndex uint32 `json:"remoteIndex"`
|
||||
RemoteAddrs []*udp.Addr `json:"remoteAddrs"`
|
||||
CachedPackets int `json:"cachedPackets"`
|
||||
Cert *cert.NebulaCertificate `json:"cert"`
|
||||
MessageCounter uint64 `json:"messageCounter"`
|
||||
CurrentRemote *udp.Addr `json:"currentRemote"`
|
||||
@@ -54,12 +65,19 @@ func (c *Control) Start() {
|
||||
if c.dnsStart != nil {
|
||||
go c.dnsStart()
|
||||
}
|
||||
if c.lighthouseStart != nil {
|
||||
c.lighthouseStart()
|
||||
}
|
||||
|
||||
// Start reading packets.
|
||||
c.f.run()
|
||||
}
|
||||
|
||||
// Stop signals nebula to shutdown, returns after the shutdown is complete
|
||||
func (c *Control) Context() context.Context {
|
||||
return c.ctx
|
||||
}
|
||||
|
||||
// Stop signals nebula to shutdown and close all tunnels, returns after the shutdown is complete
|
||||
func (c *Control) Stop() {
|
||||
// Stop the handshakeManager (and other services), to prevent new tunnels from
|
||||
// being created while we're shutting them all down.
|
||||
@@ -89,7 +107,7 @@ func (c *Control) RebindUDPServer() {
|
||||
_ = c.f.outside.Rebind()
|
||||
|
||||
// Trigger a lighthouse update, useful for mobile clients that should have an update interval of 0
|
||||
c.f.lightHouse.SendUpdate(c.f)
|
||||
c.f.lightHouse.SendUpdate()
|
||||
|
||||
// Let the main interface know that we rebound so that underlying tunnels know to trigger punches from their remotes
|
||||
c.f.rebindCount++
|
||||
@@ -98,7 +116,7 @@ func (c *Control) RebindUDPServer() {
|
||||
// ListHostmapHosts returns details about the actual or pending (handshaking) hostmap by vpn ip
|
||||
func (c *Control) ListHostmapHosts(pendingMap bool) []ControlHostInfo {
|
||||
if pendingMap {
|
||||
return listHostMapHosts(c.f.handshakeManager.pendingHostMap)
|
||||
return listHostMapHosts(c.f.handshakeManager)
|
||||
} else {
|
||||
return listHostMapHosts(c.f.hostMap)
|
||||
}
|
||||
@@ -107,7 +125,7 @@ func (c *Control) ListHostmapHosts(pendingMap bool) []ControlHostInfo {
|
||||
// ListHostmapIndexes returns details about the actual or pending (handshaking) hostmap by local index id
|
||||
func (c *Control) ListHostmapIndexes(pendingMap bool) []ControlHostInfo {
|
||||
if pendingMap {
|
||||
return listHostMapIndexes(c.f.handshakeManager.pendingHostMap)
|
||||
return listHostMapIndexes(c.f.handshakeManager)
|
||||
} else {
|
||||
return listHostMapIndexes(c.f.hostMap)
|
||||
}
|
||||
@@ -115,38 +133,38 @@ func (c *Control) ListHostmapIndexes(pendingMap bool) []ControlHostInfo {
|
||||
|
||||
// GetHostInfoByVpnIp returns a single tunnels hostInfo, or nil if not found
|
||||
func (c *Control) GetHostInfoByVpnIp(vpnIp iputil.VpnIp, pending bool) *ControlHostInfo {
|
||||
var hm *HostMap
|
||||
var hl controlHostLister
|
||||
if pending {
|
||||
hm = c.f.handshakeManager.pendingHostMap
|
||||
hl = c.f.handshakeManager
|
||||
} else {
|
||||
hm = c.f.hostMap
|
||||
hl = c.f.hostMap
|
||||
}
|
||||
|
||||
h, err := hm.QueryVpnIp(vpnIp)
|
||||
if err != nil {
|
||||
h := hl.QueryVpnIp(vpnIp)
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ch := copyHostInfo(h, c.f.hostMap.preferredRanges)
|
||||
ch := copyHostInfo(h, c.f.hostMap.GetPreferredRanges())
|
||||
return &ch
|
||||
}
|
||||
|
||||
// SetRemoteForTunnel forces a tunnel to use a specific remote
|
||||
func (c *Control) SetRemoteForTunnel(vpnIp iputil.VpnIp, addr udp.Addr) *ControlHostInfo {
|
||||
hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||
if err != nil {
|
||||
hostInfo := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||
if hostInfo == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
hostInfo.SetRemote(addr.Copy())
|
||||
ch := copyHostInfo(hostInfo, c.f.hostMap.preferredRanges)
|
||||
ch := copyHostInfo(hostInfo, c.f.hostMap.GetPreferredRanges())
|
||||
return &ch
|
||||
}
|
||||
|
||||
// CloseTunnel closes a fully established tunnel. If localOnly is false it will notify the remote end as well.
|
||||
func (c *Control) CloseTunnel(vpnIp iputil.VpnIp, localOnly bool) bool {
|
||||
hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||
if err != nil {
|
||||
hostInfo := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||
if hostInfo == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -214,6 +232,10 @@ func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Control) Device() overlay.Device {
|
||||
return c.f.inside
|
||||
}
|
||||
|
||||
func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
|
||||
|
||||
chi := ControlHostInfo{
|
||||
@@ -221,7 +243,6 @@ func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
|
||||
LocalIndex: h.localIndexId,
|
||||
RemoteIndex: h.remoteIndexId,
|
||||
RemoteAddrs: h.remotes.CopyAddrs(preferredRanges),
|
||||
CachedPackets: len(h.packetStore),
|
||||
CurrentRelaysToMe: h.relayState.CopyRelayIps(),
|
||||
CurrentRelaysThroughMe: h.relayState.CopyRelayForIps(),
|
||||
}
|
||||
@@ -241,28 +262,20 @@ func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
|
||||
return chi
|
||||
}
|
||||
|
||||
func listHostMapHosts(hm *HostMap) []ControlHostInfo {
|
||||
hm.RLock()
|
||||
hosts := make([]ControlHostInfo, len(hm.Hosts))
|
||||
i := 0
|
||||
for _, v := range hm.Hosts {
|
||||
hosts[i] = copyHostInfo(v, hm.preferredRanges)
|
||||
i++
|
||||
}
|
||||
hm.RUnlock()
|
||||
|
||||
func listHostMapHosts(hl controlHostLister) []ControlHostInfo {
|
||||
hosts := make([]ControlHostInfo, 0)
|
||||
pr := hl.GetPreferredRanges()
|
||||
hl.ForEachVpnIp(func(hostinfo *HostInfo) {
|
||||
hosts = append(hosts, copyHostInfo(hostinfo, pr))
|
||||
})
|
||||
return hosts
|
||||
}
|
||||
|
||||
func listHostMapIndexes(hm *HostMap) []ControlHostInfo {
|
||||
hm.RLock()
|
||||
hosts := make([]ControlHostInfo, len(hm.Indexes))
|
||||
i := 0
|
||||
for _, v := range hm.Indexes {
|
||||
hosts[i] = copyHostInfo(v, hm.preferredRanges)
|
||||
i++
|
||||
}
|
||||
hm.RUnlock()
|
||||
|
||||
func listHostMapIndexes(hl controlHostLister) []ControlHostInfo {
|
||||
hosts := make([]ControlHostInfo, 0)
|
||||
pr := hl.GetPreferredRanges()
|
||||
hl.ForEachIndex(func(hostinfo *HostInfo) {
|
||||
hosts = append(hosts, copyHostInfo(hostinfo, pr))
|
||||
})
|
||||
return hosts
|
||||
}
|
||||
|
||||
@@ -18,7 +18,9 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
// Special care must be taken to re-use all objects provided to the hostmap and certificate in the expectedInfo object
|
||||
// To properly ensure we are not exposing core memory to the caller
|
||||
hm := NewHostMap(l, "test", &net.IPNet{}, make([]*net.IPNet, 0))
|
||||
hm := newHostMap(l, &net.IPNet{})
|
||||
hm.preferredRanges.Store(&[]*net.IPNet{})
|
||||
|
||||
remote1 := udp.NewAddr(net.ParseIP("0.0.0.100"), 4444)
|
||||
remote2 := udp.NewAddr(net.ParseIP("1:2:3:4:5:6:7:8"), 4444)
|
||||
ipNet := net.IPNet{
|
||||
@@ -50,7 +52,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
||||
remotes := NewRemoteList(nil)
|
||||
remotes.unlockedPrependV4(0, NewIp4AndPort(remote1.IP, uint32(remote1.Port)))
|
||||
remotes.unlockedPrependV6(0, NewIp6AndPort(remote2.IP, uint32(remote2.Port)))
|
||||
hm.Add(iputil.Ip2VpnIp(ipNet.IP), &HostInfo{
|
||||
hm.unlockedAddHostInfo(&HostInfo{
|
||||
remote: remote1,
|
||||
remotes: remotes,
|
||||
ConnectionState: &ConnectionState{
|
||||
@@ -64,9 +66,9 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
||||
relayForByIp: map[iputil.VpnIp]*Relay{},
|
||||
relayForByIdx: map[uint32]*Relay{},
|
||||
},
|
||||
})
|
||||
}, &Interface{})
|
||||
|
||||
hm.Add(iputil.Ip2VpnIp(ipNet2.IP), &HostInfo{
|
||||
hm.unlockedAddHostInfo(&HostInfo{
|
||||
remote: remote1,
|
||||
remotes: remotes,
|
||||
ConnectionState: &ConnectionState{
|
||||
@@ -80,7 +82,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
||||
relayForByIp: map[iputil.VpnIp]*Relay{},
|
||||
relayForByIdx: map[uint32]*Relay{},
|
||||
},
|
||||
})
|
||||
}, &Interface{})
|
||||
|
||||
c := Control{
|
||||
f: &Interface{
|
||||
@@ -96,7 +98,6 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
||||
LocalIndex: 201,
|
||||
RemoteIndex: 200,
|
||||
RemoteAddrs: []*udp.Addr{remote2, remote1},
|
||||
CachedPackets: 0,
|
||||
Cert: crt.Copy(),
|
||||
MessageCounter: 0,
|
||||
CurrentRemote: udp.NewAddr(net.ParseIP("0.0.0.100"), 4444),
|
||||
@@ -105,7 +106,7 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
||||
}
|
||||
|
||||
// Make sure we don't have any unexpected fields
|
||||
assertFields(t, []string{"VpnIp", "LocalIndex", "RemoteIndex", "RemoteAddrs", "CachedPackets", "Cert", "MessageCounter", "CurrentRemote", "CurrentRelaysToMe", "CurrentRelaysThroughMe"}, thi)
|
||||
assertFields(t, []string{"VpnIp", "LocalIndex", "RemoteIndex", "RemoteAddrs", "Cert", "MessageCounter", "CurrentRemote", "CurrentRelaysToMe", "CurrentRelaysThroughMe"}, thi)
|
||||
test.AssertDeepCopyEqual(t, &expectedInfo, thi)
|
||||
|
||||
// Make sure we don't panic if the host info doesn't have a cert yet
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
func (c *Control) WaitForType(msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
|
||||
h := &header.H{}
|
||||
for {
|
||||
p := c.f.outside.Get(true)
|
||||
p := c.f.outside.(*udp.TesterConn).Get(true)
|
||||
if err := h.Parse(p.Data); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -37,7 +37,7 @@ func (c *Control) WaitForType(msgType header.MessageType, subType header.Message
|
||||
func (c *Control) WaitForTypeByIndex(toIndex uint32, msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
|
||||
h := &header.H{}
|
||||
for {
|
||||
p := c.f.outside.Get(true)
|
||||
p := c.f.outside.(*udp.TesterConn).Get(true)
|
||||
if err := h.Parse(p.Data); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -90,11 +90,11 @@ func (c *Control) GetFromTun(block bool) []byte {
|
||||
|
||||
// GetFromUDP will pull a udp packet off the udp side of nebula
|
||||
func (c *Control) GetFromUDP(block bool) *udp.Packet {
|
||||
return c.f.outside.Get(block)
|
||||
return c.f.outside.(*udp.TesterConn).Get(block)
|
||||
}
|
||||
|
||||
func (c *Control) GetUDPTxChan() <-chan *udp.Packet {
|
||||
return c.f.outside.TxPackets
|
||||
return c.f.outside.(*udp.TesterConn).TxPackets
|
||||
}
|
||||
|
||||
func (c *Control) GetTunTxChan() <-chan []byte {
|
||||
@@ -103,7 +103,7 @@ func (c *Control) GetTunTxChan() <-chan []byte {
|
||||
|
||||
// InjectUDPPacket will inject a packet into the udp side of nebula
|
||||
func (c *Control) InjectUDPPacket(p *udp.Packet) {
|
||||
c.f.outside.Send(p)
|
||||
c.f.outside.(*udp.TesterConn).Send(p)
|
||||
}
|
||||
|
||||
// InjectTunUDPPacket puts a udp packet on the tun interface. Using UDP here because it's a simpler protocol
|
||||
@@ -143,16 +143,16 @@ func (c *Control) GetVpnIp() iputil.VpnIp {
|
||||
}
|
||||
|
||||
func (c *Control) GetUDPAddr() string {
|
||||
return c.f.outside.Addr.String()
|
||||
return c.f.outside.(*udp.TesterConn).Addr.String()
|
||||
}
|
||||
|
||||
func (c *Control) KillPendingTunnel(vpnIp net.IP) bool {
|
||||
hostinfo, ok := c.f.handshakeManager.pendingHostMap.Hosts[iputil.Ip2VpnIp(vpnIp)]
|
||||
if !ok {
|
||||
hostinfo := c.f.handshakeManager.QueryVpnIp(iputil.Ip2VpnIp(vpnIp))
|
||||
if hostinfo == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
c.f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo)
|
||||
c.f.handshakeManager.DeleteHostInfo(hostinfo)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -161,19 +161,9 @@ func (c *Control) GetHostmap() *HostMap {
|
||||
}
|
||||
|
||||
func (c *Control) GetCert() *cert.NebulaCertificate {
|
||||
return c.f.certState.Load().certificate
|
||||
return c.f.pki.GetCertState().Certificate
|
||||
}
|
||||
|
||||
func (c *Control) ReHandshake(vpnIp iputil.VpnIp) {
|
||||
hostinfo := c.f.handshakeManager.AddVpnIp(vpnIp, c.f.initHostInfo)
|
||||
ixHandshakeStage0(c.f, vpnIp, hostinfo)
|
||||
|
||||
// If this is a static host, we don't need to wait for the HostQueryReply
|
||||
// We can trigger the handshake right now
|
||||
if _, ok := c.f.lightHouse.GetStaticHostList()[hostinfo.vpnIp]; ok {
|
||||
select {
|
||||
case c.f.handshakeManager.trigger <- hostinfo.vpnIp:
|
||||
default:
|
||||
}
|
||||
}
|
||||
c.f.handshakeManager.StartHandshake(vpnIp, nil)
|
||||
}
|
||||
|
||||
13
dist/arch/nebula.service
vendored
13
dist/arch/nebula.service
vendored
@@ -1,13 +0,0 @@
|
||||
[Unit]
|
||||
Description=Nebula overlay networking tool
|
||||
Wants=basic.target network-online.target nss-lookup.target time-sync.target
|
||||
After=basic.target network.target network-online.target
|
||||
|
||||
[Service]
|
||||
SyslogIdentifier=nebula
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
14
dist/fedora/nebula.service
vendored
14
dist/fedora/nebula.service
vendored
@@ -1,14 +0,0 @@
|
||||
[Unit]
|
||||
Description=Nebula overlay networking tool
|
||||
Wants=basic.target network-online.target nss-lookup.target time-sync.target
|
||||
After=basic.target network.target network-online.target
|
||||
Before=sshd.service
|
||||
|
||||
[Service]
|
||||
SyslogIdentifier=nebula
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -47,8 +47,8 @@ func (d *dnsRecords) QueryCert(data string) string {
|
||||
return ""
|
||||
}
|
||||
iip := iputil.Ip2VpnIp(ip)
|
||||
hostinfo, err := d.hostMap.QueryVpnIp(iip)
|
||||
if err != nil {
|
||||
hostinfo := d.hostMap.QueryVpnIp(iip)
|
||||
if hostinfo == nil {
|
||||
return ""
|
||||
}
|
||||
q := hostinfo.GetCert()
|
||||
@@ -56,7 +56,7 @@ func (d *dnsRecords) QueryCert(data string) string {
|
||||
return ""
|
||||
}
|
||||
cert := q.Details
|
||||
c := fmt.Sprintf("\"Name: %s\" \"Ips: %s\" \"Subnets %s\" \"Groups %s\" \"NotBefore %s\" \"NotAFter %s\" \"PublicKey %x\" \"IsCA %t\" \"Issuer %s\"", cert.Name, cert.Ips, cert.Subnets, cert.Groups, cert.NotBefore, cert.NotAfter, cert.PublicKey, cert.IsCA, cert.Issuer)
|
||||
c := fmt.Sprintf("\"Name: %s\" \"Ips: %s\" \"Subnets %s\" \"Groups %s\" \"NotBefore %s\" \"NotAfter %s\" \"PublicKey %x\" \"IsCA %t\" \"Issuer %s\"", cert.Name, cert.Ips, cert.Subnets, cert.Groups, cert.NotBefore, cert.NotAfter, cert.PublicKey, cert.IsCA, cert.Issuer)
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -96,6 +96,10 @@ func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(m.Answer) == 0 {
|
||||
m.Rcode = dns.RcodeNameError
|
||||
}
|
||||
}
|
||||
|
||||
func handleDnsRequest(l *logrus.Logger, w dns.ResponseWriter, r *dns.Msg) {
|
||||
@@ -129,7 +133,12 @@ func dnsMain(l *logrus.Logger, hostMap *HostMap, c *config.C) func() {
|
||||
}
|
||||
|
||||
func getDnsServerAddr(c *config.C) string {
|
||||
return c.GetString("lighthouse.dns.host", "") + ":" + strconv.Itoa(c.GetInt("lighthouse.dns.port", 53))
|
||||
dnsHost := strings.TrimSpace(c.GetString("lighthouse.dns.host", ""))
|
||||
// Old guidance was to provide the literal `[::]` in `lighthouse.dns.host` but that won't resolve.
|
||||
if dnsHost == "[::]" {
|
||||
dnsHost = "::"
|
||||
}
|
||||
return net.JoinHostPort(dnsHost, strconv.Itoa(c.GetInt("lighthouse.dns.port", 53)))
|
||||
}
|
||||
|
||||
func startDns(l *logrus.Logger, c *config.C) {
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParsequery(t *testing.T) {
|
||||
@@ -17,3 +19,40 @@ func TestParsequery(t *testing.T) {
|
||||
|
||||
//parseQuery(m)
|
||||
}
|
||||
|
||||
func Test_getDnsServerAddr(t *testing.T) {
|
||||
c := config.NewC(nil)
|
||||
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||
"dns": map[interface{}]interface{}{
|
||||
"host": "0.0.0.0",
|
||||
"port": "1",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "0.0.0.0:1", getDnsServerAddr(c))
|
||||
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||
"dns": map[interface{}]interface{}{
|
||||
"host": "::",
|
||||
"port": "1",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
|
||||
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||
"dns": map[interface{}]interface{}{
|
||||
"host": "[::]",
|
||||
"port": "1",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
|
||||
|
||||
// Make sure whitespace doesn't mess us up
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||
"dns": map[interface{}]interface{}{
|
||||
"host": "[::] ",
|
||||
"port": "1",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
|
||||
}
|
||||
|
||||
11
docker/Dockerfile
Normal file
11
docker/Dockerfile
Normal file
@@ -0,0 +1,11 @@
|
||||
FROM gcr.io/distroless/static:latest
|
||||
|
||||
ARG TARGETOS TARGETARCH
|
||||
COPY build/$TARGETOS-$TARGETARCH/nebula /nebula
|
||||
COPY build/$TARGETOS-$TARGETARCH/nebula-cert /nebula-cert
|
||||
|
||||
VOLUME ["/config"]
|
||||
|
||||
ENTRYPOINT ["/nebula"]
|
||||
# Allow users to override the args passed to nebula
|
||||
CMD ["-config", "/config/config.yml"]
|
||||
24
docker/README.md
Normal file
24
docker/README.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# NebulaOSS/nebula Docker Image
|
||||
|
||||
## Building
|
||||
|
||||
From the root of the repository, run `make docker`.
|
||||
|
||||
## Running
|
||||
|
||||
To run the built image, use the following command:
|
||||
|
||||
```
|
||||
docker run \
|
||||
--name nebula \
|
||||
--network host \
|
||||
--cap-add NET_ADMIN \
|
||||
--volume ./config:/config \
|
||||
--rm \
|
||||
nebulaoss/nebula
|
||||
```
|
||||
|
||||
A few notes:
|
||||
|
||||
- The `NET_ADMIN` capability is necessary to create the tun adapter on the host (this is unnecessary if the tun device is disabled.)
|
||||
- `--volume ./config:/config` should point to a directory that contains your `config.yml` and any other necessary files.
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
)
|
||||
|
||||
func BenchmarkHotPath(b *testing.B) {
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, _, _, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||
|
||||
@@ -44,7 +44,7 @@ func BenchmarkHotPath(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestGoodHandshake(t *testing.T) {
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||
|
||||
@@ -95,7 +95,7 @@ func TestGoodHandshake(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWrongResponderHandshake(t *testing.T) {
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
|
||||
// The IPs here are chosen on purpose:
|
||||
// The current remote handling will sort by preference, public, and then lexically.
|
||||
@@ -164,7 +164,7 @@ func TestStage1Race(t *testing.T) {
|
||||
// This tests ensures that two hosts handshaking with each other at the same time will allow traffic to flow
|
||||
// But will eventually collapse down to a single tunnel
|
||||
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||
|
||||
@@ -241,7 +241,7 @@ func TestStage1Race(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUncleanShutdownRaceLoser(t *testing.T) {
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||
|
||||
@@ -290,7 +290,7 @@ func TestUncleanShutdownRaceLoser(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUncleanShutdownRaceWinner(t *testing.T) {
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||
|
||||
@@ -341,7 +341,7 @@ func TestUncleanShutdownRaceWinner(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRelays(t *testing.T) {
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
||||
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||
@@ -372,7 +372,7 @@ func TestRelays(t *testing.T) {
|
||||
|
||||
func TestStage1RaceRelays(t *testing.T) {
|
||||
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
||||
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||
@@ -410,6 +410,8 @@ func TestStage1RaceRelays(t *testing.T) {
|
||||
p := r.RouteForAllUntilTxTun(myControl)
|
||||
_ = p
|
||||
|
||||
r.FlushAll()
|
||||
|
||||
myControl.Stop()
|
||||
theirControl.Stop()
|
||||
relayControl.Stop()
|
||||
@@ -419,7 +421,7 @@ func TestStage1RaceRelays(t *testing.T) {
|
||||
|
||||
func TestStage1RaceRelays2(t *testing.T) {
|
||||
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
||||
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||
@@ -506,7 +508,7 @@ func TestStage1RaceRelays2(t *testing.T) {
|
||||
////TODO: assert hostmaps
|
||||
}
|
||||
func TestRehandshakingRelays(t *testing.T) {
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
||||
relayControl, relayVpnIpNet, relayUdpAddr, relayConfig := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||
@@ -536,7 +538,111 @@ func TestRehandshakingRelays(t *testing.T) {
|
||||
// When I update the certificate for the relay, both me and them will have 2 host infos for the relay,
|
||||
// and the main host infos will not have any relay state to handle the me<->relay<->them tunnel.
|
||||
r.Log("Renew relay certificate and spin until me and them sees it")
|
||||
_, _, myNextPrivKey, myNextPEM := newTestCert(ca, caKey, "relay", time.Now(), time.Now().Add(5*time.Minute), relayVpnIpNet, nil, []string{"new group"})
|
||||
_, _, myNextPrivKey, myNextPEM := NewTestCert(ca, caKey, "relay", time.Now(), time.Now().Add(5*time.Minute), relayVpnIpNet, nil, []string{"new group"})
|
||||
|
||||
caB, err := ca.MarshalToPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
relayConfig.Settings["pki"] = m{
|
||||
"ca": string(caB),
|
||||
"cert": string(myNextPEM),
|
||||
"key": string(myNextPrivKey),
|
||||
}
|
||||
rc, err := yaml.Marshal(relayConfig.Settings)
|
||||
assert.NoError(t, err)
|
||||
relayConfig.ReloadConfigString(string(rc))
|
||||
|
||||
for {
|
||||
r.Log("Assert the tunnel works between myVpnIpNet and relayVpnIpNet")
|
||||
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r)
|
||||
c := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(relayVpnIpNet.IP), false)
|
||||
if len(c.Cert.Details.Groups) != 0 {
|
||||
// We have a new certificate now
|
||||
r.Log("Certificate between my and relay is updated!")
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
for {
|
||||
r.Log("Assert the tunnel works between theirVpnIpNet and relayVpnIpNet")
|
||||
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r)
|
||||
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(relayVpnIpNet.IP), false)
|
||||
if len(c.Cert.Details.Groups) != 0 {
|
||||
// We have a new certificate now
|
||||
r.Log("Certificate between their and relay is updated!")
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
r.Log("Assert the relay tunnel still works")
|
||||
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
|
||||
// We should have two hostinfos on all sides
|
||||
for len(myControl.GetHostmap().Indexes) != 2 {
|
||||
t.Logf("Waiting for myControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(myControl.GetHostmap().Indexes))
|
||||
r.Log("Assert the relay tunnel still works")
|
||||
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||
r.Log("yupitdoes")
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
t.Logf("myControl hostinfos got cleaned up!")
|
||||
for len(theirControl.GetHostmap().Indexes) != 2 {
|
||||
t.Logf("Waiting for theirControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(theirControl.GetHostmap().Indexes))
|
||||
r.Log("Assert the relay tunnel still works")
|
||||
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||
r.Log("yupitdoes")
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
t.Logf("theirControl hostinfos got cleaned up!")
|
||||
for len(relayControl.GetHostmap().Indexes) != 2 {
|
||||
t.Logf("Waiting for relayControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(relayControl.GetHostmap().Indexes))
|
||||
r.Log("Assert the relay tunnel still works")
|
||||
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||
r.Log("yupitdoes")
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
t.Logf("relayControl hostinfos got cleaned up!")
|
||||
}
|
||||
|
||||
func TestRehandshakingRelaysPrimary(t *testing.T) {
|
||||
// This test is the same as TestRehandshakingRelays but one of the terminal types is a primary swap winner
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 128}, m{"relay": m{"use_relays": true}})
|
||||
relayControl, relayVpnIpNet, relayUdpAddr, relayConfig := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 1}, m{"relay": m{"am_relay": true}})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||
|
||||
// Teach my how to get to the relay and that their can be reached via the relay
|
||||
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||
|
||||
// Build a router so we don't have to reason who gets which packet
|
||||
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||
defer r.RenderFlow()
|
||||
|
||||
// Start the servers
|
||||
myControl.Start()
|
||||
relayControl.Start()
|
||||
theirControl.Start()
|
||||
|
||||
t.Log("Trigger a handshake from me to them via the relay")
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||
|
||||
p := r.RouteForAllUntilTxTun(theirControl)
|
||||
r.Log("Assert the tunnel works")
|
||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
|
||||
|
||||
// When I update the certificate for the relay, both me and them will have 2 host infos for the relay,
|
||||
// and the main host infos will not have any relay state to handle the me<->relay<->them tunnel.
|
||||
r.Log("Renew relay certificate and spin until me and them sees it")
|
||||
_, _, myNextPrivKey, myNextPEM := NewTestCert(ca, caKey, "relay", time.Now(), time.Now().Add(5*time.Minute), relayVpnIpNet, nil, []string{"new group"})
|
||||
|
||||
caB, err := ca.MarshalToPEM()
|
||||
if err != nil {
|
||||
@@ -609,7 +715,7 @@ func TestRehandshakingRelays(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRehandshaking(t *testing.T) {
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 2}, nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 1}, nil)
|
||||
|
||||
@@ -631,7 +737,7 @@ func TestRehandshaking(t *testing.T) {
|
||||
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
||||
|
||||
r.Log("Renew my certificate and spin until their sees it")
|
||||
_, _, myNextPrivKey, myNextPEM := newTestCert(ca, caKey, "me", time.Now(), time.Now().Add(5*time.Minute), myVpnIpNet, nil, []string{"new group"})
|
||||
_, _, myNextPrivKey, myNextPEM := NewTestCert(ca, caKey, "me", time.Now(), time.Now().Add(5*time.Minute), myVpnIpNet, nil, []string{"new group"})
|
||||
|
||||
caB, err := ca.MarshalToPEM()
|
||||
if err != nil {
|
||||
@@ -705,7 +811,7 @@ func TestRehandshaking(t *testing.T) {
|
||||
func TestRehandshakingLoser(t *testing.T) {
|
||||
// The purpose of this test is that the race loser renews their certificate and rehandshakes. The final tunnel
|
||||
// Should be the one with the new certificate
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 2}, nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 1}, nil)
|
||||
|
||||
@@ -731,7 +837,7 @@ func TestRehandshakingLoser(t *testing.T) {
|
||||
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
||||
|
||||
r.Log("Renew their certificate and spin until mine sees it")
|
||||
_, _, theirNextPrivKey, theirNextPEM := newTestCert(ca, caKey, "them", time.Now(), time.Now().Add(5*time.Minute), theirVpnIpNet, nil, []string{"their new group"})
|
||||
_, _, theirNextPrivKey, theirNextPEM := NewTestCert(ca, caKey, "them", time.Now(), time.Now().Add(5*time.Minute), theirVpnIpNet, nil, []string{"their new group"})
|
||||
|
||||
caB, err := ca.MarshalToPEM()
|
||||
if err != nil {
|
||||
@@ -806,7 +912,7 @@ func TestRaceRegression(t *testing.T) {
|
||||
// This test forces stage 1, stage 2, stage 1 to be received by me from them
|
||||
// We had a bug where we were not finding the duplicate handshake and responding to the final stage 1 which
|
||||
// caused a cross-linked hostinfo
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||
|
||||
|
||||
118
e2e/helpers.go
Normal file
118
e2e/helpers.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"golang.org/x/crypto/curve25519"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
// NewTestCaCert will generate a CA cert
|
||||
func NewTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
if before.IsZero() {
|
||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
}
|
||||
if after.IsZero() {
|
||||
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
}
|
||||
|
||||
nc := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: "test ca",
|
||||
NotBefore: time.Unix(before.Unix(), 0),
|
||||
NotAfter: time.Unix(after.Unix(), 0),
|
||||
PublicKey: pub,
|
||||
IsCA: true,
|
||||
InvertedGroups: make(map[string]struct{}),
|
||||
},
|
||||
}
|
||||
|
||||
if len(ips) > 0 {
|
||||
nc.Details.Ips = ips
|
||||
}
|
||||
|
||||
if len(subnets) > 0 {
|
||||
nc.Details.Subnets = subnets
|
||||
}
|
||||
|
||||
if len(groups) > 0 {
|
||||
nc.Details.Groups = groups
|
||||
}
|
||||
|
||||
err = nc.Sign(cert.Curve_CURVE25519, priv)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pem, err := nc.MarshalToPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return nc, pub, priv, pem
|
||||
}
|
||||
|
||||
// NewTestCert will generate a signed certificate with the provided details.
|
||||
// Expiry times are defaulted if you do not pass them in
|
||||
func NewTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, after time.Time, ip *net.IPNet, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
||||
issuer, err := ca.Sha256Sum()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if before.IsZero() {
|
||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
}
|
||||
|
||||
if after.IsZero() {
|
||||
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
}
|
||||
|
||||
pub, rawPriv := x25519Keypair()
|
||||
|
||||
nc := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: name,
|
||||
Ips: []*net.IPNet{ip},
|
||||
Subnets: subnets,
|
||||
Groups: groups,
|
||||
NotBefore: time.Unix(before.Unix(), 0),
|
||||
NotAfter: time.Unix(after.Unix(), 0),
|
||||
PublicKey: pub,
|
||||
IsCA: false,
|
||||
Issuer: issuer,
|
||||
InvertedGroups: make(map[string]struct{}),
|
||||
},
|
||||
}
|
||||
|
||||
err = nc.Sign(ca.Details.Curve, key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pem, err := nc.MarshalToPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return nc, pub, cert.MarshalX25519PrivateKey(rawPriv), pem
|
||||
}
|
||||
|
||||
func x25519Keypair() ([]byte, []byte) {
|
||||
privkey := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return pubkey, privkey
|
||||
}
|
||||
@@ -4,7 +4,6 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@@ -12,9 +11,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"dario.cat/mergo"
|
||||
"github.com/google/gopacket"
|
||||
"github.com/google/gopacket/layers"
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
@@ -22,8 +21,6 @@ import (
|
||||
"github.com/slackhq/nebula/e2e/router"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/crypto/curve25519"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
@@ -40,7 +37,7 @@ func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, u
|
||||
IP: udpIp,
|
||||
Port: 4242,
|
||||
}
|
||||
_, _, myPrivKey, myPEM := newTestCert(caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnIpNet, nil, []string{})
|
||||
_, _, myPrivKey, myPEM := NewTestCert(caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnIpNet, nil, []string{})
|
||||
|
||||
caB, err := caCrt.MarshalToPEM()
|
||||
if err != nil {
|
||||
@@ -108,112 +105,6 @@ func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, u
|
||||
return control, vpnIpNet, &udpAddr, c
|
||||
}
|
||||
|
||||
// newTestCaCert will generate a CA cert
|
||||
func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
if before.IsZero() {
|
||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
}
|
||||
if after.IsZero() {
|
||||
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
}
|
||||
|
||||
nc := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: "test ca",
|
||||
NotBefore: time.Unix(before.Unix(), 0),
|
||||
NotAfter: time.Unix(after.Unix(), 0),
|
||||
PublicKey: pub,
|
||||
IsCA: true,
|
||||
InvertedGroups: make(map[string]struct{}),
|
||||
},
|
||||
}
|
||||
|
||||
if len(ips) > 0 {
|
||||
nc.Details.Ips = ips
|
||||
}
|
||||
|
||||
if len(subnets) > 0 {
|
||||
nc.Details.Subnets = subnets
|
||||
}
|
||||
|
||||
if len(groups) > 0 {
|
||||
nc.Details.Groups = groups
|
||||
}
|
||||
|
||||
err = nc.Sign(cert.Curve_CURVE25519, priv)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pem, err := nc.MarshalToPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return nc, pub, priv, pem
|
||||
}
|
||||
|
||||
// newTestCert will generate a signed certificate with the provided details.
|
||||
// Expiry times are defaulted if you do not pass them in
|
||||
func newTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, after time.Time, ip *net.IPNet, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
||||
issuer, err := ca.Sha256Sum()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if before.IsZero() {
|
||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
}
|
||||
|
||||
if after.IsZero() {
|
||||
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
}
|
||||
|
||||
pub, rawPriv := x25519Keypair()
|
||||
|
||||
nc := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: name,
|
||||
Ips: []*net.IPNet{ip},
|
||||
Subnets: subnets,
|
||||
Groups: groups,
|
||||
NotBefore: time.Unix(before.Unix(), 0),
|
||||
NotAfter: time.Unix(after.Unix(), 0),
|
||||
PublicKey: pub,
|
||||
IsCA: false,
|
||||
Issuer: issuer,
|
||||
InvertedGroups: make(map[string]struct{}),
|
||||
},
|
||||
}
|
||||
|
||||
err = nc.Sign(ca.Details.Curve, key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pem, err := nc.MarshalToPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return nc, pub, cert.MarshalX25519PrivateKey(rawPriv), pem
|
||||
}
|
||||
|
||||
func x25519Keypair() ([]byte, []byte) {
|
||||
privkey := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return pubkey, privkey
|
||||
}
|
||||
|
||||
type doneCb func()
|
||||
|
||||
func deadline(t *testing.T, seconds time.Duration) doneCb {
|
||||
|
||||
@@ -11,7 +11,7 @@ pki:
|
||||
#blocklist:
|
||||
# - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72
|
||||
# disconnect_invalid is a toggle to force a client to be disconnected if the certificate is expired or invalid.
|
||||
#disconnect_invalid: false
|
||||
#disconnect_invalid: true
|
||||
|
||||
# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
|
||||
# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
|
||||
@@ -21,6 +21,19 @@ pki:
|
||||
static_host_map:
|
||||
"192.168.100.1": ["100.64.22.11:4242"]
|
||||
|
||||
# The static_map config stanza can be used to configure how the static_host_map behaves.
|
||||
#static_map:
|
||||
# cadence determines how frequently DNS is re-queried for updated IP addresses when a static_host_map entry contains
|
||||
# a DNS name.
|
||||
#cadence: 30s
|
||||
|
||||
# network determines the type of IP addresses to ask the DNS server for. The default is "ip4" because nodes typically
|
||||
# do not know their public IPv4 address. Connecting to the Lighthouse via IPv4 allows the Lighthouse to detect the
|
||||
# public address. Other valid options are "ip6" and "ip" (returns both.)
|
||||
#network: ip4
|
||||
|
||||
# lookup_timeout is the DNS query timeout.
|
||||
#lookup_timeout: 250ms
|
||||
|
||||
lighthouse:
|
||||
# am_lighthouse is used to enable lighthouse functionality for a node. This should ONLY be true on nodes
|
||||
@@ -154,11 +167,11 @@ punchy:
|
||||
|
||||
# Preferred ranges is used to define a hint about the local network ranges, which speeds up discovering the fastest
|
||||
# path to a network adjacent nebula node.
|
||||
# NOTE: the previous option "local_range" only allowed definition of a single range
|
||||
# and has been deprecated for "preferred_ranges"
|
||||
# This setting is reloadable.
|
||||
#preferred_ranges: ["172.16.0.0/24"]
|
||||
|
||||
# sshd can expose informational and administrative functions via ssh this is a
|
||||
# sshd can expose informational and administrative functions via ssh. This can expose informational and administrative
|
||||
# functions, and allows manual tweaking of various network settings when debugging or testing.
|
||||
#sshd:
|
||||
# Toggles the feature
|
||||
#enabled: true
|
||||
@@ -167,12 +180,15 @@ punchy:
|
||||
# A file containing the ssh host private key to use
|
||||
# A decent way to generate one: ssh-keygen -t ed25519 -f ssh_host_ed25519_key -N "" < /dev/null
|
||||
#host_key: ./ssh_host_ed25519_key
|
||||
# A file containing a list of authorized public keys
|
||||
# Authorized users and their public keys
|
||||
#authorized_users:
|
||||
#- user: steeeeve
|
||||
# keys can be an array of strings or single string
|
||||
#keys:
|
||||
#- "ssh public key string"
|
||||
# Trusted SSH CA public keys. These are the public keys of the CAs that are allowed to sign SSH keys for access.
|
||||
#trusted_cas:
|
||||
#- "ssh public key string"
|
||||
|
||||
# EXPERIMENTAL: relay support for networks that can't establish direct connections.
|
||||
relay:
|
||||
@@ -194,7 +210,7 @@ tun:
|
||||
disabled: false
|
||||
# Name of the device. If not set, a default will be chosen by the OS.
|
||||
# For macOS: if set, must be in the form `utun[0-9]+`.
|
||||
# For FreeBSD: Required to be set, must be in the form `tun[0-9]+`.
|
||||
# For NetBSD: Required to be set, must be in the form `tun[0-9]+`
|
||||
dev: nebula1
|
||||
# Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert
|
||||
drop_local_broadcast: false
|
||||
@@ -216,6 +232,7 @@ tun:
|
||||
# `mtu`: will default to tun mtu if this option is not specified
|
||||
# `metric`: will default to 0 if this option is not specified
|
||||
# `install`: will default to true, controls whether this route is installed in the systems routing table.
|
||||
# This setting is reloadable.
|
||||
unsafe_routes:
|
||||
#- route: 172.16.1.0/24
|
||||
# via: 192.168.100.99
|
||||
@@ -230,7 +247,10 @@ tun:
|
||||
# TODO
|
||||
# Configure logging level
|
||||
logging:
|
||||
# panic, fatal, error, warning, info, or debug. Default is info
|
||||
# panic, fatal, error, warning, info, or debug. Default is info and is reloadable.
|
||||
#NOTE: Debug mode can log remotely controlled/untrusted data which can quickly fill a disk in some
|
||||
# scenarios. Debug logging is also CPU intensive and will decrease performance overall.
|
||||
# Only enable debug logging while actively investigating an issue.
|
||||
level: info
|
||||
# json or text formats currently available. Default is text
|
||||
format: text
|
||||
@@ -275,6 +295,10 @@ logging:
|
||||
# A 100ms interval with the default 10 retries will give a handshake 5.5 seconds to resolve before timing out
|
||||
#try_interval: 100ms
|
||||
#retries: 20
|
||||
|
||||
# query_buffer is the size of the buffer channel for querying lighthouses
|
||||
#query_buffer: 64
|
||||
|
||||
# trigger_buffer is the size of the buffer channel for quickly sending handshakes
|
||||
# after receiving the response for lighthouse queries
|
||||
#trigger_buffer: 64
|
||||
@@ -291,6 +315,13 @@ firewall:
|
||||
outbound_action: drop
|
||||
inbound_action: drop
|
||||
|
||||
# Controls the default value for local_cidr. Default is true, will be deprecated after v1.9 and defaulted to false.
|
||||
# This setting only affects nebula hosts with subnets encoded in their certificate. A nebula host acting as an
|
||||
# unsafe router with `default_local_cidr_any: true` will expose their unsafe routes to every inbound rule regardless
|
||||
# of the actual destination for the packet. Setting this to false requires each inbound rule to contain a `local_cidr`
|
||||
# if the intention is to allow traffic to flow to an unsafe route.
|
||||
#default_local_cidr_any: false
|
||||
|
||||
conntrack:
|
||||
tcp_timeout: 12m
|
||||
udp_timeout: 3m
|
||||
@@ -298,7 +329,7 @@ firewall:
|
||||
|
||||
# The firewall is default deny. There is no way to write a deny rule.
|
||||
# Rules are comprised of a protocol, port, and one or more of host, group, or CIDR
|
||||
# Logical evaluation is roughly: port AND proto AND (ca_sha OR ca_name) AND (host OR group OR groups OR cidr)
|
||||
# Logical evaluation is roughly: port AND proto AND (ca_sha OR ca_name) AND (host OR group OR groups OR cidr) AND (local cidr)
|
||||
# - port: Takes `0` or `any` as any, a single number `80`, a range `200-901`, or `fragment` to match second and further fragments of fragmented packets (since there is no port available).
|
||||
# code: same as port but makes more sense when talking about ICMP, TODO: this is not currently implemented in a way that works, use `any`
|
||||
# proto: `any`, `tcp`, `udp`, or `icmp`
|
||||
@@ -307,6 +338,8 @@ firewall:
|
||||
# groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
|
||||
# cidr: a remote CIDR, `0.0.0.0/0` is any.
|
||||
# local_cidr: a local CIDR, `0.0.0.0/0` is any. This could be used to filter destinations when using unsafe_routes.
|
||||
# Default is `any` unless the certificate contains subnets and then the default is the ip issued in the certificate
|
||||
# if `default_local_cidr_any` is false, otherwise its `any`.
|
||||
# ca_name: An issuing CA name
|
||||
# ca_sha: An issuing CA shasum
|
||||
|
||||
@@ -328,3 +361,10 @@ firewall:
|
||||
groups:
|
||||
- laptop
|
||||
- home
|
||||
|
||||
# Expose a subnet (unsafe route) to hosts with the group remote_client
|
||||
# This example assume you have a subnet of 192.168.100.1/24 or larger encoded in the certificate
|
||||
- port: 8080
|
||||
proto: tcp
|
||||
group: remote_client
|
||||
local_cidr: 192.168.100.1/24
|
||||
|
||||
100
examples/go_service/main.go
Normal file
100
examples/go_service/main.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/service"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := run(); err != nil {
|
||||
log.Fatalf("%+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func run() error {
|
||||
configStr := `
|
||||
tun:
|
||||
user: true
|
||||
|
||||
static_host_map:
|
||||
'192.168.100.1': ['localhost:4242']
|
||||
|
||||
listen:
|
||||
host: 0.0.0.0
|
||||
port: 4241
|
||||
|
||||
lighthouse:
|
||||
am_lighthouse: false
|
||||
interval: 60
|
||||
hosts:
|
||||
- '192.168.100.1'
|
||||
|
||||
firewall:
|
||||
outbound:
|
||||
# Allow all outbound traffic from this node
|
||||
- port: any
|
||||
proto: any
|
||||
host: any
|
||||
|
||||
inbound:
|
||||
# Allow icmp between any nebula hosts
|
||||
- port: any
|
||||
proto: icmp
|
||||
host: any
|
||||
- port: any
|
||||
proto: any
|
||||
host: any
|
||||
|
||||
pki:
|
||||
ca: /home/rice/Developer/nebula-config/ca.crt
|
||||
cert: /home/rice/Developer/nebula-config/app.crt
|
||||
key: /home/rice/Developer/nebula-config/app.key
|
||||
`
|
||||
var config config.C
|
||||
if err := config.LoadString(configStr); err != nil {
|
||||
return err
|
||||
}
|
||||
service, err := service.New(&config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ln, err := service.Listen("tcp", ":1234")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
log.Printf("accept error: %s", err)
|
||||
break
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
log.Printf("got connection")
|
||||
|
||||
conn.Write([]byte("hello world\n"))
|
||||
|
||||
scanner := bufio.NewScanner(conn)
|
||||
for scanner.Scan() {
|
||||
message := scanner.Text()
|
||||
fmt.Fprintf(conn, "echo: %q\n", message)
|
||||
log.Printf("got message %q", message)
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
log.Printf("scanner error: %s", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
service.Close()
|
||||
if err := service.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
# Quickstart Guide
|
||||
|
||||
This guide is intended to bring up a vagrant environment with 1 lighthouse and 2 generic hosts running nebula.
|
||||
|
||||
## Creating the virtualenv for ansible
|
||||
|
||||
Within the `quickstart/` directory, do the following
|
||||
|
||||
```
|
||||
# make a virtual environment
|
||||
virtualenv venv
|
||||
|
||||
# get into the virtualenv
|
||||
source venv/bin/activate
|
||||
|
||||
# install ansible
|
||||
pip install -r requirements.yml
|
||||
```
|
||||
|
||||
## Bringing up the vagrant environment
|
||||
|
||||
A plugin that is used for the Vagrant environment is `vagrant-hostmanager`
|
||||
|
||||
To install, run
|
||||
|
||||
```
|
||||
vagrant plugin install vagrant-hostmanager
|
||||
```
|
||||
|
||||
All hosts within the Vagrantfile are brought up with
|
||||
|
||||
`vagrant up`
|
||||
|
||||
Once the boxes are up, go into the `ansible/` directory and deploy the playbook by running
|
||||
|
||||
`ansible-playbook playbook.yml -i inventory -u vagrant`
|
||||
|
||||
## Testing within the vagrant env
|
||||
|
||||
Once the ansible run is done, hop onto a vagrant box
|
||||
|
||||
`vagrant ssh generic1.vagrant`
|
||||
|
||||
or specifically
|
||||
|
||||
`ssh vagrant@<ip-address-in-vagrant-file` (password for the vagrant user on the boxes is `vagrant`)
|
||||
|
||||
Some quick tests once the vagrant boxes are up are to ping from `generic1.vagrant` to `generic2.vagrant` using
|
||||
their respective nebula ip address.
|
||||
|
||||
```
|
||||
vagrant@generic1:~$ ping 10.168.91.220
|
||||
PING 10.168.91.220 (10.168.91.220) 56(84) bytes of data.
|
||||
64 bytes from 10.168.91.220: icmp_seq=1 ttl=64 time=241 ms
|
||||
64 bytes from 10.168.91.220: icmp_seq=2 ttl=64 time=0.704 ms
|
||||
```
|
||||
|
||||
You can further verify that the allowed nebula firewall rules work by ssh'ing from 1 generic box to the other.
|
||||
|
||||
`ssh vagrant@<nebula-ip-address>` (password for the vagrant user on the boxes is `vagrant`)
|
||||
|
||||
See `/etc/nebula/config.yml` on a box for firewall rules.
|
||||
|
||||
To see full handshakes and hostmaps, change the logging config of `/etc/nebula/config.yml` on the vagrant boxes from
|
||||
info to debug.
|
||||
|
||||
You can watch nebula logs by running
|
||||
|
||||
```
|
||||
sudo journalctl -fu nebula
|
||||
```
|
||||
|
||||
Refer to the nebula src code directory's README for further instructions on configuring nebula.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Is nebula up and running?
|
||||
|
||||
Run and verify that
|
||||
|
||||
```
|
||||
ifconfig
|
||||
```
|
||||
|
||||
shows you an interface with the name `nebula1` being up.
|
||||
|
||||
```
|
||||
vagrant@generic1:~$ ifconfig nebula1
|
||||
nebula1: flags=4305<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST> mtu 1300
|
||||
inet 10.168.91.210 netmask 255.128.0.0 destination 10.168.91.210
|
||||
inet6 fe80::aeaf:b105:e6dc:936c prefixlen 64 scopeid 0x20<link>
|
||||
unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC)
|
||||
RX packets 2 bytes 168 (168.0 B)
|
||||
RX errors 0 dropped 0 overruns 0 frame 0
|
||||
TX packets 11 bytes 600 (600.0 B)
|
||||
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
|
||||
```
|
||||
|
||||
### Connectivity
|
||||
|
||||
Are you able to ping other boxes on the private nebula network?
|
||||
|
||||
The following are the private nebula ip addresses of the vagrant env
|
||||
|
||||
```
|
||||
generic1.vagrant [nebula_ip] 10.168.91.210
|
||||
generic2.vagrant [nebula_ip] 10.168.91.220
|
||||
lighthouse1.vagrant [nebula_ip] 10.168.91.230
|
||||
```
|
||||
|
||||
Try pinging generic1.vagrant to and from any other box using its nebula ip above.
|
||||
|
||||
Double check the nebula firewall rules under /etc/nebula/config.yml to make sure that connectivity is allowed for your use-case if on a specific port.
|
||||
|
||||
```
|
||||
vagrant@lighthouse1:~$ grep -A21 firewall /etc/nebula/config.yml
|
||||
firewall:
|
||||
conntrack:
|
||||
tcp_timeout: 12m
|
||||
udp_timeout: 3m
|
||||
default_timeout: 10m
|
||||
|
||||
inbound:
|
||||
- proto: icmp
|
||||
port: any
|
||||
host: any
|
||||
- proto: any
|
||||
port: 22
|
||||
host: any
|
||||
- proto: any
|
||||
port: 53
|
||||
host: any
|
||||
|
||||
outbound:
|
||||
- proto: any
|
||||
port: any
|
||||
host: any
|
||||
```
|
||||
40
examples/quickstart-vagrant/Vagrantfile
vendored
40
examples/quickstart-vagrant/Vagrantfile
vendored
@@ -1,40 +0,0 @@
|
||||
Vagrant.require_version ">= 2.2.6"
|
||||
|
||||
nodes = [
|
||||
{ :hostname => 'generic1.vagrant', :ip => '172.11.91.210', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
|
||||
{ :hostname => 'generic2.vagrant', :ip => '172.11.91.220', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
|
||||
{ :hostname => 'lighthouse1.vagrant', :ip => '172.11.91.230', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
|
||||
]
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
|
||||
config.ssh.insert_key = false
|
||||
|
||||
if Vagrant.has_plugin?('vagrant-cachier')
|
||||
config.cache.enable :apt
|
||||
else
|
||||
printf("** Install vagrant-cachier plugin to speedup deploy: `vagrant plugin install vagrant-cachier`.**\n")
|
||||
end
|
||||
|
||||
if Vagrant.has_plugin?('vagrant-hostmanager')
|
||||
config.hostmanager.enabled = true
|
||||
config.hostmanager.manage_host = true
|
||||
config.hostmanager.include_offline = true
|
||||
else
|
||||
config.vagrant.plugins = "vagrant-hostmanager"
|
||||
end
|
||||
|
||||
nodes.each do |node|
|
||||
config.vm.define node[:hostname] do |node_config|
|
||||
node_config.vm.box = node[:box]
|
||||
node_config.vm.hostname = node[:hostname]
|
||||
node_config.vm.network :private_network, ip: node[:ip]
|
||||
node_config.vm.provider :virtualbox do |vb|
|
||||
vb.memory = node[:ram]
|
||||
vb.cpus = node[:cpus]
|
||||
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
|
||||
vb.customize ['guestproperty', 'set', :id, '/VirtualBox/GuestAdd/VBoxService/--timesync-set-threshold', 10000]
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -1,4 +0,0 @@
|
||||
[defaults]
|
||||
host_key_checking = False
|
||||
private_key_file = ~/.vagrant.d/insecure_private_key
|
||||
become = yes
|
||||
@@ -1,21 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'to_nebula_ip': self.to_nebula_ip,
|
||||
'map_to_nebula_ips': self.map_to_nebula_ips,
|
||||
}
|
||||
|
||||
def to_nebula_ip(self, ip_str):
|
||||
ip_list = list(map(int, ip_str.split(".")))
|
||||
ip_list[0] = 10
|
||||
ip_list[1] = 168
|
||||
ip = '.'.join(map(str, ip_list))
|
||||
return ip
|
||||
|
||||
def map_to_nebula_ips(self, ip_strs):
|
||||
ip_list = [ self.to_nebula_ip(ip_str) for ip_str in ip_strs ]
|
||||
ips = ', '.join(ip_list)
|
||||
return ips
|
||||
@@ -1,11 +0,0 @@
|
||||
[all]
|
||||
generic1.vagrant
|
||||
generic2.vagrant
|
||||
lighthouse1.vagrant
|
||||
|
||||
[generic]
|
||||
generic1.vagrant
|
||||
generic2.vagrant
|
||||
|
||||
[lighthouse]
|
||||
lighthouse1.vagrant
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
- name: test connection to vagrant boxes
|
||||
hosts: all
|
||||
tasks:
|
||||
- debug: msg=ok
|
||||
|
||||
- name: build nebula binaries locally
|
||||
connection: local
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- command: chdir=../../../ make build/linux-amd64/"{{ item }}"
|
||||
with_items:
|
||||
- nebula
|
||||
- nebula-cert
|
||||
tags:
|
||||
- build-nebula
|
||||
|
||||
- name: install nebula on all vagrant hosts
|
||||
hosts: all
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
roles:
|
||||
- nebula
|
||||
@@ -1,3 +0,0 @@
|
||||
---
|
||||
# defaults file for nebula
|
||||
nebula_config_directory: "/etc/nebula/"
|
||||
@@ -1,14 +0,0 @@
|
||||
[Unit]
|
||||
Description=Nebula overlay networking tool
|
||||
Wants=basic.target network-online.target nss-lookup.target time-sync.target
|
||||
After=basic.target network.target network-online.target
|
||||
Before=sshd.service
|
||||
|
||||
[Service]
|
||||
SyslogIdentifier=nebula
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,5 +0,0 @@
|
||||
-----BEGIN NEBULA CERTIFICATE-----
|
||||
CkAKDm5lYnVsYSB0ZXN0IENBKNXC1NYFMNXIhO0GOiCmVYeZ9tkB4WEnawmkrca+
|
||||
hsAg9otUFhpAowZeJ33KVEABEkAORybHQUUyVFbKYzw0JHfVzAQOHA4kwB1yP9IV
|
||||
KpiTw9+ADz+wA+R5tn9B+L8+7+Apc+9dem4BQULjA5mRaoYN
|
||||
-----END NEBULA CERTIFICATE-----
|
||||
@@ -1,4 +0,0 @@
|
||||
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
||||
FEXZKMSmg8CgIODR0ymUeNT3nbnVpMi7nD79UgkCRHWmVYeZ9tkB4WEnawmkrca+
|
||||
hsAg9otUFhpAowZeJ33KVA==
|
||||
-----END NEBULA ED25519 PRIVATE KEY-----
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
# handlers file for nebula
|
||||
|
||||
- name: restart nebula
|
||||
service: name=nebula state=restarted
|
||||
@@ -1,62 +0,0 @@
|
||||
---
|
||||
# tasks file for nebula
|
||||
|
||||
- name: get the vagrant network interface and set fact
|
||||
set_fact:
|
||||
vagrant_ifce: "ansible_{{ ansible_interfaces | difference(['lo',ansible_default_ipv4.alias]) | sort | first }}"
|
||||
tags:
|
||||
- nebula-conf
|
||||
|
||||
- name: install built nebula binary
|
||||
copy: src="../../../../../build/linux-amd64/{{ item }}" dest="/usr/local/bin" mode=0755
|
||||
with_items:
|
||||
- nebula
|
||||
- nebula-cert
|
||||
|
||||
- name: create nebula config directory
|
||||
file: path="{{ nebula_config_directory }}" state=directory mode=0755
|
||||
|
||||
- name: temporarily copy over root.crt and root.key to sign
|
||||
copy: src={{ item }} dest=/opt/{{ item }}
|
||||
with_items:
|
||||
- vagrant-test-ca.key
|
||||
- vagrant-test-ca.crt
|
||||
|
||||
- name: remove previously signed host certificate
|
||||
file: dest=/etc/nebula/{{ item }} state=absent
|
||||
with_items:
|
||||
- host.crt
|
||||
- host.key
|
||||
|
||||
- name: sign using the root key
|
||||
command: nebula-cert sign -ca-crt /opt/vagrant-test-ca.crt -ca-key /opt/vagrant-test-ca.key -duration 4320h -groups vagrant -ip {{ hostvars[inventory_hostname][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }}/9 -name {{ ansible_hostname }}.nebula -out-crt /etc/nebula/host.crt -out-key /etc/nebula/host.key
|
||||
|
||||
- name: remove root.key used to sign
|
||||
file: dest=/opt/{{ item }} state=absent
|
||||
with_items:
|
||||
- vagrant-test-ca.key
|
||||
|
||||
- name: write the content of the trusted ca certificate
|
||||
copy: src="vagrant-test-ca.crt" dest="/etc/nebula/vagrant-test-ca.crt"
|
||||
notify: restart nebula
|
||||
|
||||
- name: Create config directory
|
||||
file: path="{{ nebula_config_directory }}" owner=root group=root mode=0755 state=directory
|
||||
|
||||
- name: nebula config
|
||||
template: src=config.yml.j2 dest="/etc/nebula/config.yml" mode=0644 owner=root group=root
|
||||
notify: restart nebula
|
||||
tags:
|
||||
- nebula-conf
|
||||
|
||||
- name: nebula systemd
|
||||
copy: src=systemd.nebula.service dest="/etc/systemd/system/nebula.service" mode=0644 owner=root group=root
|
||||
register: addconf
|
||||
notify: restart nebula
|
||||
|
||||
- name: maybe reload systemd
|
||||
shell: systemctl daemon-reload
|
||||
when: addconf.changed
|
||||
|
||||
- name: nebula running
|
||||
service: name="nebula" state=started enabled=yes
|
||||
@@ -1,85 +0,0 @@
|
||||
pki:
|
||||
ca: /etc/nebula/vagrant-test-ca.crt
|
||||
cert: /etc/nebula/host.crt
|
||||
key: /etc/nebula/host.key
|
||||
|
||||
# Port Nebula will be listening on
|
||||
listen:
|
||||
host: 0.0.0.0
|
||||
port: 4242
|
||||
|
||||
# sshd can expose informational and administrative functions via ssh
|
||||
sshd:
|
||||
# Toggles the feature
|
||||
enabled: true
|
||||
# Host and port to listen on
|
||||
listen: 127.0.0.1:2222
|
||||
# A file containing the ssh host private key to use
|
||||
host_key: /etc/ssh/ssh_host_ed25519_key
|
||||
# A file containing a list of authorized public keys
|
||||
authorized_users:
|
||||
{% for user in nebula_users %}
|
||||
- user: {{ user.name }}
|
||||
keys:
|
||||
{% for key in user.ssh_auth_keys %}
|
||||
- "{{ key }}"
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
local_range: 10.168.0.0/16
|
||||
|
||||
static_host_map:
|
||||
# lighthouse
|
||||
{{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }}: ["{{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address']}}:4242"]
|
||||
|
||||
default_route: "0.0.0.0"
|
||||
|
||||
lighthouse:
|
||||
{% if 'lighthouse' in group_names %}
|
||||
am_lighthouse: true
|
||||
serve_dns: true
|
||||
{% else %}
|
||||
am_lighthouse: false
|
||||
{% endif %}
|
||||
interval: 60
|
||||
{% if 'generic' in group_names %}
|
||||
hosts:
|
||||
- {{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }}
|
||||
{% endif %}
|
||||
|
||||
# Configure the private interface
|
||||
tun:
|
||||
dev: nebula1
|
||||
# Sets MTU of the tun dev.
|
||||
# MTU of the tun must be smaller than the MTU of the eth0 interface
|
||||
mtu: 1300
|
||||
|
||||
# TODO
|
||||
# Configure logging level
|
||||
logging:
|
||||
level: info
|
||||
format: json
|
||||
|
||||
firewall:
|
||||
conntrack:
|
||||
tcp_timeout: 12m
|
||||
udp_timeout: 3m
|
||||
default_timeout: 10m
|
||||
|
||||
inbound:
|
||||
- proto: icmp
|
||||
port: any
|
||||
host: any
|
||||
- proto: any
|
||||
port: 22
|
||||
host: any
|
||||
{% if "lighthouse" in groups %}
|
||||
- proto: any
|
||||
port: 53
|
||||
host: any
|
||||
{% endif %}
|
||||
|
||||
outbound:
|
||||
- proto: any
|
||||
port: any
|
||||
host: any
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
# vars file for nebula
|
||||
|
||||
nebula_users:
|
||||
- name: user1
|
||||
ssh_auth_keys:
|
||||
- "ed25519 place-your-ssh-public-key-here"
|
||||
@@ -1 +0,0 @@
|
||||
ansible
|
||||
35
examples/service_scripts/nebula.open-rc
Normal file
35
examples/service_scripts/nebula.open-rc
Normal file
@@ -0,0 +1,35 @@
|
||||
#!/sbin/openrc-run
|
||||
#
|
||||
# nebula service for open-rc systems
|
||||
|
||||
extra_commands="checkconfig"
|
||||
|
||||
: ${NEBULA_CONFDIR:=${RC_PREFIX%/}/etc/nebula}
|
||||
: ${NEBULA_CONFIG:=${NEBULA_CONFDIR}/config.yml}
|
||||
: ${NEBULA_BINARY:=${NEBULA_BINARY}${RC_PREFIX%/}/usr/local/sbin/nebula}
|
||||
|
||||
command="${NEBULA_BINARY}"
|
||||
command_args="${NEBULA_OPTS} -config ${NEBULA_CONFIG}"
|
||||
|
||||
supervisor="supervise-daemon"
|
||||
|
||||
description="A scalable overlay networking tool with a focus on performance, simplicity and security"
|
||||
|
||||
required_dirs="${NEBULA_CONFDIR}"
|
||||
required_files="${NEBULA_CONFIG}"
|
||||
|
||||
checkconfig() {
|
||||
"${command}" -test ${command_args} || return 1
|
||||
}
|
||||
|
||||
start_pre() {
|
||||
if [ "${RC_CMD}" != "restart" ] ; then
|
||||
checkconfig || return $?
|
||||
fi
|
||||
}
|
||||
|
||||
stop_pre() {
|
||||
if [ "${RC_CMD}" = "restart" ] ; then
|
||||
checkconfig || return $?
|
||||
fi
|
||||
}
|
||||
@@ -5,6 +5,8 @@ After=basic.target network.target network-online.target
|
||||
Before=sshd.service
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
NotifyAccess=main
|
||||
SyslogIdentifier=nebula
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml
|
||||
|
||||
273
firewall.go
273
firewall.go
@@ -2,10 +2,10 @@ package nebula
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"net"
|
||||
"reflect"
|
||||
"strconv"
|
||||
@@ -21,17 +21,12 @@ import (
|
||||
"github.com/slackhq/nebula/firewall"
|
||||
)
|
||||
|
||||
const tcpACK = 0x10
|
||||
const tcpFIN = 0x01
|
||||
|
||||
type FirewallInterface interface {
|
||||
AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error
|
||||
}
|
||||
|
||||
type conn struct {
|
||||
Expires time.Time // Time when this conntrack entry will expire
|
||||
Sent time.Time // If tcp rtt tracking is enabled this will be when Seq was last set
|
||||
Seq uint32 // If tcp rtt tracking is enabled this will be the seq we are looking for an ack
|
||||
|
||||
// record why the original connection passed the firewall, so we can re-validate
|
||||
// after ruleset changes. Note, rulesVersion is a uint16 so that these two
|
||||
@@ -57,15 +52,16 @@ type Firewall struct {
|
||||
DefaultTimeout time.Duration //linux: 600s
|
||||
|
||||
// Used to ensure we don't emit local packets for ips we don't own
|
||||
localIps *cidr.Tree4
|
||||
localIps *cidr.Tree4[struct{}]
|
||||
assignedCIDR *net.IPNet
|
||||
hasSubnets bool
|
||||
|
||||
rules string
|
||||
rulesVersion uint16
|
||||
|
||||
trackTCPRTT bool
|
||||
metricTCPRTT metrics.Histogram
|
||||
incomingMetrics firewallMetrics
|
||||
outgoingMetrics firewallMetrics
|
||||
defaultLocalCIDRAny bool
|
||||
incomingMetrics firewallMetrics
|
||||
outgoingMetrics firewallMetrics
|
||||
|
||||
l *logrus.Logger
|
||||
}
|
||||
@@ -83,6 +79,8 @@ type FirewallConntrack struct {
|
||||
TimerWheel *TimerWheel[firewall.Packet]
|
||||
}
|
||||
|
||||
// FirewallTable is the entry point for a rule, the evaluation order is:
|
||||
// Proto AND port AND (CA SHA or CA name) AND local CIDR AND (group OR groups OR name OR remote CIDR)
|
||||
type FirewallTable struct {
|
||||
TCP firewallPort
|
||||
UDP firewallPort
|
||||
@@ -106,18 +104,27 @@ type FirewallCA struct {
|
||||
}
|
||||
|
||||
type FirewallRule struct {
|
||||
// Any makes Hosts, Groups, CIDR and LocalCIDR irrelevant
|
||||
Any bool
|
||||
Hosts map[string]struct{}
|
||||
Groups [][]string
|
||||
CIDR *cidr.Tree4
|
||||
LocalCIDR *cidr.Tree4
|
||||
// Any makes Hosts, Groups, and CIDR irrelevant
|
||||
Any *firewallLocalCIDR
|
||||
Hosts map[string]*firewallLocalCIDR
|
||||
Groups []*firewallGroups
|
||||
CIDR *cidr.Tree4[*firewallLocalCIDR]
|
||||
}
|
||||
|
||||
type firewallGroups struct {
|
||||
Groups []string
|
||||
LocalCIDR *firewallLocalCIDR
|
||||
}
|
||||
|
||||
// Even though ports are uint16, int32 maps are faster for lookup
|
||||
// Plus we can use `-1` for fragment rules
|
||||
type firewallPort map[int32]*FirewallCA
|
||||
|
||||
type firewallLocalCIDR struct {
|
||||
Any bool
|
||||
LocalCIDR *cidr.Tree4[struct{}]
|
||||
}
|
||||
|
||||
// NewFirewall creates a new Firewall object. A TimerWheel is created for you from the provided timeouts.
|
||||
func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.Duration, c *cert.NebulaCertificate) *Firewall {
|
||||
//TODO: error on 0 duration
|
||||
@@ -137,9 +144,16 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D
|
||||
max = defaultTimeout
|
||||
}
|
||||
|
||||
localIps := cidr.NewTree4()
|
||||
localIps := cidr.NewTree4[struct{}]()
|
||||
var assignedCIDR *net.IPNet
|
||||
for _, ip := range c.Details.Ips {
|
||||
localIps.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{})
|
||||
ipNet := &net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}
|
||||
localIps.AddCIDR(ipNet, struct{}{})
|
||||
|
||||
if assignedCIDR == nil {
|
||||
// Only grabbing the first one in the cert since any more than that currently has undefined behavior
|
||||
assignedCIDR = ipNet
|
||||
}
|
||||
}
|
||||
|
||||
for _, n := range c.Details.Subnets {
|
||||
@@ -157,9 +171,10 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D
|
||||
UDPTimeout: UDPTimeout,
|
||||
DefaultTimeout: defaultTimeout,
|
||||
localIps: localIps,
|
||||
assignedCIDR: assignedCIDR,
|
||||
hasSubnets: len(c.Details.Subnets) > 0,
|
||||
l: l,
|
||||
|
||||
metricTCPRTT: metrics.GetOrRegisterHistogram("network.tcp.rtt", nil, metrics.NewExpDecaySample(1028, 0.015)),
|
||||
incomingMetrics: firewallMetrics{
|
||||
droppedLocalIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.local_ip", nil),
|
||||
droppedRemoteIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.remote_ip", nil),
|
||||
@@ -183,6 +198,9 @@ func NewFirewallFromConfig(l *logrus.Logger, nc *cert.NebulaCertificate, c *conf
|
||||
//TODO: max_connections
|
||||
)
|
||||
|
||||
//TODO: Flip to false after v1.9 release
|
||||
fw.defaultLocalCIDRAny = c.GetBool("firewall.default_local_cidr_any", true)
|
||||
|
||||
inboundAction := c.GetString("firewall.inbound_action", "drop")
|
||||
switch inboundAction {
|
||||
case "reject":
|
||||
@@ -269,7 +287,7 @@ func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort
|
||||
return fmt.Errorf("unknown protocol %v", proto)
|
||||
}
|
||||
|
||||
return fp.addRule(startPort, endPort, groups, host, ip, localIp, caName, caSha)
|
||||
return fp.addRule(f, startPort, endPort, groups, host, ip, localIp, caName, caSha)
|
||||
}
|
||||
|
||||
// GetRuleHash returns a hash representation of all inbound and outbound rules
|
||||
@@ -278,6 +296,18 @@ func (f *Firewall) GetRuleHash() string {
|
||||
return hex.EncodeToString(sum[:])
|
||||
}
|
||||
|
||||
// GetRuleHashFNV returns a uint32 FNV-1 hash representation the rules, for use as a metric value
|
||||
func (f *Firewall) GetRuleHashFNV() uint32 {
|
||||
h := fnv.New32a()
|
||||
h.Write([]byte(f.rules))
|
||||
return h.Sum32()
|
||||
}
|
||||
|
||||
// GetRuleHashes returns both the sha256 and FNV-1 hashes, suitable for logging
|
||||
func (f *Firewall) GetRuleHashes() string {
|
||||
return "SHA:" + f.GetRuleHash() + ",FNV:" + strconv.FormatUint(uint64(f.GetRuleHashFNV()), 10)
|
||||
}
|
||||
|
||||
func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw FirewallInterface) error {
|
||||
var table string
|
||||
if inbound {
|
||||
@@ -383,15 +413,16 @@ var ErrNoMatchingRule = errors.New("no matching rule in firewall table")
|
||||
|
||||
// Drop returns an error if the packet should be dropped, explaining why. It
|
||||
// returns nil if the packet should not be dropped.
|
||||
func (f *Firewall) Drop(packet []byte, fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) error {
|
||||
func (f *Firewall) Drop(fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) error {
|
||||
// Check if we spoke to this tuple, if we did then allow this packet
|
||||
if f.inConns(packet, fp, incoming, h, caPool, localCache) {
|
||||
if f.inConns(fp, h, caPool, localCache) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make sure remote address matches nebula certificate
|
||||
if remoteCidr := h.remoteCidr; remoteCidr != nil {
|
||||
if remoteCidr.Contains(fp.RemoteIP) == nil {
|
||||
ok, _ := remoteCidr.Contains(fp.RemoteIP)
|
||||
if !ok {
|
||||
f.metrics(incoming).droppedRemoteIP.Inc(1)
|
||||
return ErrInvalidRemoteIP
|
||||
}
|
||||
@@ -404,7 +435,8 @@ func (f *Firewall) Drop(packet []byte, fp firewall.Packet, incoming bool, h *Hos
|
||||
}
|
||||
|
||||
// Make sure we are supposed to be handling this local ip address
|
||||
if f.localIps.Contains(fp.LocalIP) == nil {
|
||||
ok, _ := f.localIps.Contains(fp.LocalIP)
|
||||
if !ok {
|
||||
f.metrics(incoming).droppedLocalIP.Inc(1)
|
||||
return ErrInvalidLocalIP
|
||||
}
|
||||
@@ -421,7 +453,7 @@ func (f *Firewall) Drop(packet []byte, fp firewall.Packet, incoming bool, h *Hos
|
||||
}
|
||||
|
||||
// We always want to conntrack since it is a faster operation
|
||||
f.addConn(packet, fp, incoming)
|
||||
f.addConn(fp, incoming)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -447,9 +479,10 @@ func (f *Firewall) EmitStats() {
|
||||
conntrack.Unlock()
|
||||
metrics.GetOrRegisterGauge("firewall.conntrack.count", nil).Update(int64(conntrackCount))
|
||||
metrics.GetOrRegisterGauge("firewall.rules.version", nil).Update(int64(f.rulesVersion))
|
||||
metrics.GetOrRegisterGauge("firewall.rules.hash", nil).Update(int64(f.GetRuleHashFNV()))
|
||||
}
|
||||
|
||||
func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) bool {
|
||||
func (f *Firewall) inConns(fp firewall.Packet, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) bool {
|
||||
if localCache != nil {
|
||||
if _, ok := localCache[fp]; ok {
|
||||
return true
|
||||
@@ -509,11 +542,6 @@ func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *
|
||||
switch fp.Protocol {
|
||||
case firewall.ProtoTCP:
|
||||
c.Expires = time.Now().Add(f.TCPTimeout)
|
||||
if incoming {
|
||||
f.checkTCPRTT(c, packet)
|
||||
} else {
|
||||
setTCPRTTTracking(c, packet)
|
||||
}
|
||||
case firewall.ProtoUDP:
|
||||
c.Expires = time.Now().Add(f.UDPTimeout)
|
||||
default:
|
||||
@@ -529,16 +557,13 @@ func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *
|
||||
return true
|
||||
}
|
||||
|
||||
func (f *Firewall) addConn(packet []byte, fp firewall.Packet, incoming bool) {
|
||||
func (f *Firewall) addConn(fp firewall.Packet, incoming bool) {
|
||||
var timeout time.Duration
|
||||
c := &conn{}
|
||||
|
||||
switch fp.Protocol {
|
||||
case firewall.ProtoTCP:
|
||||
timeout = f.TCPTimeout
|
||||
if !incoming {
|
||||
setTCPRTTTracking(c, packet)
|
||||
}
|
||||
case firewall.ProtoUDP:
|
||||
timeout = f.UDPTimeout
|
||||
default:
|
||||
@@ -608,7 +633,7 @@ func (ft *FirewallTable) match(p firewall.Packet, incoming bool, c *cert.NebulaC
|
||||
return false
|
||||
}
|
||||
|
||||
func (fp firewallPort) addRule(startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error {
|
||||
func (fp firewallPort) addRule(f *Firewall, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error {
|
||||
if startPort > endPort {
|
||||
return fmt.Errorf("start port was lower than end port")
|
||||
}
|
||||
@@ -621,7 +646,7 @@ func (fp firewallPort) addRule(startPort int32, endPort int32, groups []string,
|
||||
}
|
||||
}
|
||||
|
||||
if err := fp[i].addRule(groups, host, ip, localIp, caName, caSha); err != nil {
|
||||
if err := fp[i].addRule(f, groups, host, ip, localIp, caName, caSha); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -652,13 +677,12 @@ func (fp firewallPort) match(p firewall.Packet, incoming bool, c *cert.NebulaCer
|
||||
return fp[firewall.PortAny].match(p, c, caPool)
|
||||
}
|
||||
|
||||
func (fc *FirewallCA) addRule(groups []string, host string, ip, localIp *net.IPNet, caName, caSha string) error {
|
||||
func (fc *FirewallCA) addRule(f *Firewall, groups []string, host string, ip, localIp *net.IPNet, caName, caSha string) error {
|
||||
fr := func() *FirewallRule {
|
||||
return &FirewallRule{
|
||||
Hosts: make(map[string]struct{}),
|
||||
Groups: make([][]string, 0),
|
||||
CIDR: cidr.NewTree4(),
|
||||
LocalCIDR: cidr.NewTree4(),
|
||||
Hosts: make(map[string]*firewallLocalCIDR),
|
||||
Groups: make([]*firewallGroups, 0),
|
||||
CIDR: cidr.NewTree4[*firewallLocalCIDR](),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -667,14 +691,14 @@ func (fc *FirewallCA) addRule(groups []string, host string, ip, localIp *net.IPN
|
||||
fc.Any = fr()
|
||||
}
|
||||
|
||||
return fc.Any.addRule(groups, host, ip, localIp)
|
||||
return fc.Any.addRule(f, groups, host, ip, localIp)
|
||||
}
|
||||
|
||||
if caSha != "" {
|
||||
if _, ok := fc.CAShas[caSha]; !ok {
|
||||
fc.CAShas[caSha] = fr()
|
||||
}
|
||||
err := fc.CAShas[caSha].addRule(groups, host, ip, localIp)
|
||||
err := fc.CAShas[caSha].addRule(f, groups, host, ip, localIp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -684,7 +708,7 @@ func (fc *FirewallCA) addRule(groups []string, host string, ip, localIp *net.IPN
|
||||
if _, ok := fc.CANames[caName]; !ok {
|
||||
fc.CANames[caName] = fr()
|
||||
}
|
||||
err := fc.CANames[caName].addRule(groups, host, ip, localIp)
|
||||
err := fc.CANames[caName].addRule(f, groups, host, ip, localIp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -716,41 +740,63 @@ func (fc *FirewallCA) match(p firewall.Packet, c *cert.NebulaCertificate, caPool
|
||||
return fc.CANames[s.Details.Name].match(p, c)
|
||||
}
|
||||
|
||||
func (fr *FirewallRule) addRule(groups []string, host string, ip *net.IPNet, localIp *net.IPNet) error {
|
||||
if fr.Any {
|
||||
return nil
|
||||
func (fr *FirewallRule) addRule(f *Firewall, groups []string, host string, ip *net.IPNet, localCIDR *net.IPNet) error {
|
||||
flc := func() *firewallLocalCIDR {
|
||||
return &firewallLocalCIDR{
|
||||
LocalCIDR: cidr.NewTree4[struct{}](),
|
||||
}
|
||||
}
|
||||
|
||||
if fr.isAny(groups, host, ip, localIp) {
|
||||
fr.Any = true
|
||||
// If it's any we need to wipe out any pre-existing rules to save on memory
|
||||
fr.Groups = make([][]string, 0)
|
||||
fr.Hosts = make(map[string]struct{})
|
||||
fr.CIDR = cidr.NewTree4()
|
||||
fr.LocalCIDR = cidr.NewTree4()
|
||||
} else {
|
||||
if len(groups) > 0 {
|
||||
fr.Groups = append(fr.Groups, groups)
|
||||
if fr.isAny(groups, host, ip) {
|
||||
if fr.Any == nil {
|
||||
fr.Any = flc()
|
||||
}
|
||||
|
||||
if host != "" {
|
||||
fr.Hosts[host] = struct{}{}
|
||||
return fr.Any.addRule(f, localCIDR)
|
||||
}
|
||||
|
||||
if len(groups) > 0 {
|
||||
nlc := flc()
|
||||
err := nlc.addRule(f, localCIDR)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ip != nil {
|
||||
fr.CIDR.AddCIDR(ip, struct{}{})
|
||||
}
|
||||
fr.Groups = append(fr.Groups, &firewallGroups{
|
||||
Groups: groups,
|
||||
LocalCIDR: nlc,
|
||||
})
|
||||
}
|
||||
|
||||
if localIp != nil {
|
||||
fr.LocalCIDR.AddCIDR(localIp, struct{}{})
|
||||
if host != "" {
|
||||
nlc := fr.Hosts[host]
|
||||
if nlc == nil {
|
||||
nlc = flc()
|
||||
}
|
||||
err := nlc.addRule(f, localCIDR)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fr.Hosts[host] = nlc
|
||||
}
|
||||
|
||||
if ip != nil {
|
||||
_, nlc := fr.CIDR.GetCIDR(ip)
|
||||
if nlc == nil {
|
||||
nlc = flc()
|
||||
}
|
||||
err := nlc.addRule(f, localCIDR)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fr.CIDR.AddCIDR(ip, nlc)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fr *FirewallRule) isAny(groups []string, host string, ip, localIp *net.IPNet) bool {
|
||||
if len(groups) == 0 && host == "" && ip == nil && localIp == nil {
|
||||
func (fr *FirewallRule) isAny(groups []string, host string, ip *net.IPNet) bool {
|
||||
if len(groups) == 0 && host == "" && ip == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -768,10 +814,6 @@ func (fr *FirewallRule) isAny(groups []string, host string, ip, localIp *net.IPN
|
||||
return true
|
||||
}
|
||||
|
||||
if localIp != nil && localIp.Contains(net.IPv4(0, 0, 0, 0)) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -781,7 +823,7 @@ func (fr *FirewallRule) match(p firewall.Packet, c *cert.NebulaCertificate) bool
|
||||
}
|
||||
|
||||
// Shortcut path for if groups, hosts, or cidr contained an `any`
|
||||
if fr.Any {
|
||||
if fr.Any.match(p, c) {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -789,7 +831,7 @@ func (fr *FirewallRule) match(p firewall.Packet, c *cert.NebulaCertificate) bool
|
||||
for _, sg := range fr.Groups {
|
||||
found := false
|
||||
|
||||
for _, g := range sg {
|
||||
for _, g := range sg.Groups {
|
||||
if _, ok := c.Details.InvertedGroups[g]; !ok {
|
||||
found = false
|
||||
break
|
||||
@@ -798,27 +840,51 @@ func (fr *FirewallRule) match(p firewall.Packet, c *cert.NebulaCertificate) bool
|
||||
found = true
|
||||
}
|
||||
|
||||
if found {
|
||||
if found && sg.LocalCIDR.match(p, c) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if fr.Hosts != nil {
|
||||
if _, ok := fr.Hosts[c.Details.Name]; ok {
|
||||
return true
|
||||
if flc, ok := fr.Hosts[c.Details.Name]; ok {
|
||||
if flc.match(p, c) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if fr.CIDR != nil && fr.CIDR.Contains(p.RemoteIP) != nil {
|
||||
return fr.CIDR.EachContains(p.RemoteIP, func(flc *firewallLocalCIDR) bool {
|
||||
return flc.match(p, c)
|
||||
})
|
||||
}
|
||||
|
||||
func (flc *firewallLocalCIDR) addRule(f *Firewall, localIp *net.IPNet) error {
|
||||
if localIp == nil {
|
||||
if !f.hasSubnets || f.defaultLocalCIDRAny {
|
||||
flc.Any = true
|
||||
return nil
|
||||
}
|
||||
|
||||
localIp = f.assignedCIDR
|
||||
} else if localIp.Contains(net.IPv4(0, 0, 0, 0)) {
|
||||
flc.Any = true
|
||||
}
|
||||
|
||||
flc.LocalCIDR.AddCIDR(localIp, struct{}{})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flc *firewallLocalCIDR) match(p firewall.Packet, c *cert.NebulaCertificate) bool {
|
||||
if flc == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if flc.Any {
|
||||
return true
|
||||
}
|
||||
|
||||
if fr.LocalCIDR != nil && fr.LocalCIDR.Contains(p.LocalIP) != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// No host, group, or cidr matched, bye bye
|
||||
return false
|
||||
ok, _ := flc.LocalCIDR.Contains(p.LocalIP)
|
||||
return ok
|
||||
}
|
||||
|
||||
type rule struct {
|
||||
@@ -934,42 +1000,3 @@ func parsePort(s string) (startPort, endPort int32, err error) {
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: write tests for these
|
||||
func setTCPRTTTracking(c *conn, p []byte) {
|
||||
if c.Seq != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
ihl := int(p[0]&0x0f) << 2
|
||||
|
||||
// Don't track FIN packets
|
||||
if p[ihl+13]&tcpFIN != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
c.Seq = binary.BigEndian.Uint32(p[ihl+4 : ihl+8])
|
||||
c.Sent = time.Now()
|
||||
}
|
||||
|
||||
func (f *Firewall) checkTCPRTT(c *conn, p []byte) bool {
|
||||
if c.Seq == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
ihl := int(p[0]&0x0f) << 2
|
||||
if p[ihl+13]&tcpACK == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Deal with wrap around, signed int cuts the ack window in half
|
||||
// 0 is a bad ack, no data acknowledged
|
||||
// positive number is a bad ack, ack is over half the window away
|
||||
if int32(c.Seq-binary.BigEndian.Uint32(p[ihl+8:ihl+12])) >= 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
f.metricTCPRTT.Update(time.Since(c.Sent).Nanoseconds())
|
||||
c.Seq = 0
|
||||
return true
|
||||
}
|
||||
|
||||
339
firewall_test.go
339
firewall_test.go
@@ -2,14 +2,12 @@ package nebula
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/firewall"
|
||||
@@ -71,35 +69,33 @@ func TestFirewall_AddRule(t *testing.T) {
|
||||
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", nil, nil, "", ""))
|
||||
// An empty rule is any
|
||||
assert.True(t, fw.InRules.TCP[1].Any.Any)
|
||||
assert.True(t, fw.InRules.TCP[1].Any.Any.Any)
|
||||
assert.Empty(t, fw.InRules.TCP[1].Any.Groups)
|
||||
assert.Empty(t, fw.InRules.TCP[1].Any.Hosts)
|
||||
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "", ""))
|
||||
assert.False(t, fw.InRules.UDP[1].Any.Any)
|
||||
assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0], "g1")
|
||||
assert.Nil(t, fw.InRules.UDP[1].Any.Any)
|
||||
assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0].Groups, "g1")
|
||||
assert.Empty(t, fw.InRules.UDP[1].Any.Hosts)
|
||||
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoICMP, 1, 1, []string{}, "h1", nil, nil, "", ""))
|
||||
assert.False(t, fw.InRules.ICMP[1].Any.Any)
|
||||
assert.Nil(t, fw.InRules.ICMP[1].Any.Any)
|
||||
assert.Empty(t, fw.InRules.ICMP[1].Any.Groups)
|
||||
assert.Contains(t, fw.InRules.ICMP[1].Any.Hosts, "h1")
|
||||
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", ti, nil, "", ""))
|
||||
assert.False(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups)
|
||||
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts)
|
||||
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP)))
|
||||
assert.Nil(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||
ok, _ := fw.OutRules.AnyProto[1].Any.CIDR.GetCIDR(ti)
|
||||
assert.True(t, ok)
|
||||
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", nil, ti, "", ""))
|
||||
assert.False(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups)
|
||||
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts)
|
||||
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.LocalCIDR.Match(iputil.Ip2VpnIp(ti.IP)))
|
||||
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||
ok, _ = fw.OutRules.AnyProto[1].Any.Any.LocalCIDR.GetCIDR(ti)
|
||||
assert.True(t, ok)
|
||||
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "ca-name", ""))
|
||||
@@ -109,30 +105,14 @@ func TestFirewall_AddRule(t *testing.T) {
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "", "ca-sha"))
|
||||
assert.Contains(t, fw.InRules.UDP[1].CAShas, "ca-sha")
|
||||
|
||||
// Set any and clear fields
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"g1", "g2"}, "h1", ti, ti, "", ""))
|
||||
assert.Equal(t, []string{"g1", "g2"}, fw.OutRules.AnyProto[0].Any.Groups[0])
|
||||
assert.Contains(t, fw.OutRules.AnyProto[0].Any.Hosts, "h1")
|
||||
assert.NotNil(t, fw.OutRules.AnyProto[0].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP)))
|
||||
assert.NotNil(t, fw.OutRules.AnyProto[0].Any.LocalCIDR.Match(iputil.Ip2VpnIp(ti.IP)))
|
||||
|
||||
// run twice just to make sure
|
||||
//TODO: these ANY rules should clear the CA firewall portion
|
||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", ""))
|
||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, nil, "", ""))
|
||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
||||
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Groups)
|
||||
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Hosts)
|
||||
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, nil, "", ""))
|
||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any.Any)
|
||||
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
_, anyIp, _ := net.ParseCIDR("0.0.0.0/0")
|
||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, nil, "", ""))
|
||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any.Any)
|
||||
|
||||
// Test error conditions
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
@@ -181,74 +161,84 @@ func TestFirewall_Drop(t *testing.T) {
|
||||
cp := cert.NewCAPool()
|
||||
|
||||
// Drop outbound
|
||||
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule)
|
||||
assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrNoMatchingRule)
|
||||
// Allow inbound
|
||||
resetConntrack(fw)
|
||||
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||
assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||
// Allow outbound because conntrack
|
||||
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil))
|
||||
assert.NoError(t, fw.Drop(p, false, &h, cp, nil))
|
||||
|
||||
// test remote mismatch
|
||||
oldRemote := p.RemoteIP
|
||||
p.RemoteIP = iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 10))
|
||||
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrInvalidRemoteIP)
|
||||
assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrInvalidRemoteIP)
|
||||
p.RemoteIP = oldRemote
|
||||
|
||||
// ensure signer doesn't get in the way of group checks
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "", "signer-shasum"))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "", "signer-shasum-bad"))
|
||||
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||
assert.Equal(t, fw.Drop(p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||
|
||||
// test caSha doesn't drop on match
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "", "signer-shasum-bad"))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "", "signer-shasum"))
|
||||
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||
assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||
|
||||
// ensure ca name doesn't get in the way of group checks
|
||||
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "ca-good", ""))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "ca-good-bad", ""))
|
||||
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||
assert.Equal(t, fw.Drop(p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||
|
||||
// test caName doesn't drop on match
|
||||
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "ca-good-bad", ""))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "ca-good", ""))
|
||||
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||
assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||
}
|
||||
|
||||
func BenchmarkFirewallTable_match(b *testing.B) {
|
||||
f := &Firewall{}
|
||||
ft := FirewallTable{
|
||||
TCP: firewallPort{},
|
||||
}
|
||||
|
||||
_, n, _ := net.ParseCIDR("172.1.1.1/32")
|
||||
_ = ft.TCP.addRule(10, 10, []string{"good-group"}, "good-host", n, n, "", "")
|
||||
_ = ft.TCP.addRule(10, 10, []string{"good-group2"}, "good-host", n, n, "", "")
|
||||
_ = ft.TCP.addRule(10, 10, []string{"good-group3"}, "good-host", n, n, "", "")
|
||||
_ = ft.TCP.addRule(10, 10, []string{"good-group4"}, "good-host", n, n, "", "")
|
||||
_ = ft.TCP.addRule(10, 10, []string{"good-group, good-group1"}, "good-host", n, n, "", "")
|
||||
goodLocalCIDRIP := iputil.Ip2VpnIp(n.IP)
|
||||
_ = ft.TCP.addRule(f, 10, 10, []string{"good-group"}, "good-host", n, nil, "", "")
|
||||
_ = ft.TCP.addRule(f, 100, 100, []string{"good-group"}, "good-host", nil, n, "", "")
|
||||
cp := cert.NewCAPool()
|
||||
|
||||
b.Run("fail on proto", func(b *testing.B) {
|
||||
// This benchmark is showing us the cost of failing to match the protocol
|
||||
c := &cert.NebulaCertificate{}
|
||||
for n := 0; n < b.N; n++ {
|
||||
ft.match(firewall.Packet{Protocol: firewall.ProtoUDP}, true, c, cp)
|
||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoUDP}, true, c, cp))
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("fail on port", func(b *testing.B) {
|
||||
b.Run("pass proto, fail on port", func(b *testing.B) {
|
||||
// This benchmark is showing us the cost of matching a specific protocol but failing to match the port
|
||||
c := &cert.NebulaCertificate{}
|
||||
for n := 0; n < b.N; n++ {
|
||||
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 1}, true, c, cp)
|
||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 1}, true, c, cp))
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("fail all group, name, and cidr", func(b *testing.B) {
|
||||
b.Run("pass proto, port, fail on local CIDR", func(b *testing.B) {
|
||||
c := &cert.NebulaCertificate{}
|
||||
ip, _, _ := net.ParseCIDR("9.254.254.254/32")
|
||||
lip := iputil.Ip2VpnIp(ip)
|
||||
for n := 0; n < b.N; n++ {
|
||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: lip}, true, c, cp))
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("pass proto, port, any local CIDR, fail all group, name, and cidr", func(b *testing.B) {
|
||||
_, ip, _ := net.ParseCIDR("9.254.254.254/32")
|
||||
c := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
@@ -258,11 +248,25 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
||||
},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp)
|
||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp))
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("pass on group", func(b *testing.B) {
|
||||
b.Run("pass proto, port, specific local CIDR, fail all group, name, and cidr", func(b *testing.B) {
|
||||
_, ip, _ := net.ParseCIDR("9.254.254.254/32")
|
||||
c := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||
Name: "nope",
|
||||
Ips: []*net.IPNet{ip},
|
||||
},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: goodLocalCIDRIP}, true, c, cp))
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("pass on group on any local cidr", func(b *testing.B) {
|
||||
c := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
InvertedGroups: map[string]struct{}{"good-group": {}},
|
||||
@@ -270,7 +274,19 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
||||
},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp)
|
||||
assert.True(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp))
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("pass on group on specific local cidr", func(b *testing.B) {
|
||||
c := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
InvertedGroups: map[string]struct{}{"good-group": {}},
|
||||
Name: "nope",
|
||||
},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
assert.True(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: goodLocalCIDRIP}, true, c, cp))
|
||||
}
|
||||
})
|
||||
|
||||
@@ -285,60 +301,60 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
||||
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("pass on ip", func(b *testing.B) {
|
||||
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||
c := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||
Name: "good-host",
|
||||
},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, RemoteIP: ip}, true, c, cp)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("pass on local ip", func(b *testing.B) {
|
||||
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||
c := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||
Name: "good-host",
|
||||
},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, LocalIP: ip}, true, c, cp)
|
||||
}
|
||||
})
|
||||
|
||||
_ = ft.TCP.addRule(0, 0, []string{"good-group"}, "good-host", n, n, "", "")
|
||||
|
||||
b.Run("pass on ip with any port", func(b *testing.B) {
|
||||
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||
c := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||
Name: "good-host",
|
||||
},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, RemoteIP: ip}, true, c, cp)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("pass on local ip with any port", func(b *testing.B) {
|
||||
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||
c := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||
Name: "good-host",
|
||||
},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: ip}, true, c, cp)
|
||||
}
|
||||
})
|
||||
//
|
||||
//b.Run("pass on ip", func(b *testing.B) {
|
||||
// ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||
// c := &cert.NebulaCertificate{
|
||||
// Details: cert.NebulaCertificateDetails{
|
||||
// InvertedGroups: map[string]struct{}{"nope": {}},
|
||||
// Name: "good-host",
|
||||
// },
|
||||
// }
|
||||
// for n := 0; n < b.N; n++ {
|
||||
// ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, RemoteIP: ip}, true, c, cp)
|
||||
// }
|
||||
//})
|
||||
//
|
||||
//b.Run("pass on local ip", func(b *testing.B) {
|
||||
// ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||
// c := &cert.NebulaCertificate{
|
||||
// Details: cert.NebulaCertificateDetails{
|
||||
// InvertedGroups: map[string]struct{}{"nope": {}},
|
||||
// Name: "good-host",
|
||||
// },
|
||||
// }
|
||||
// for n := 0; n < b.N; n++ {
|
||||
// ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, LocalIP: ip}, true, c, cp)
|
||||
// }
|
||||
//})
|
||||
//
|
||||
//_ = ft.TCP.addRule(0, 0, []string{"good-group"}, "good-host", n, n, "", "")
|
||||
//
|
||||
//b.Run("pass on ip with any port", func(b *testing.B) {
|
||||
// ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||
// c := &cert.NebulaCertificate{
|
||||
// Details: cert.NebulaCertificateDetails{
|
||||
// InvertedGroups: map[string]struct{}{"nope": {}},
|
||||
// Name: "good-host",
|
||||
// },
|
||||
// }
|
||||
// for n := 0; n < b.N; n++ {
|
||||
// ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, RemoteIP: ip}, true, c, cp)
|
||||
// }
|
||||
//})
|
||||
//
|
||||
//b.Run("pass on local ip with any port", func(b *testing.B) {
|
||||
// ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||
// c := &cert.NebulaCertificate{
|
||||
// Details: cert.NebulaCertificateDetails{
|
||||
// InvertedGroups: map[string]struct{}{"nope": {}},
|
||||
// Name: "good-host",
|
||||
// },
|
||||
// }
|
||||
// for n := 0; n < b.N; n++ {
|
||||
// ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: ip}, true, c, cp)
|
||||
// }
|
||||
//})
|
||||
}
|
||||
|
||||
func TestFirewall_Drop2(t *testing.T) {
|
||||
@@ -394,10 +410,10 @@ func TestFirewall_Drop2(t *testing.T) {
|
||||
cp := cert.NewCAPool()
|
||||
|
||||
// h1/c1 lacks the proper groups
|
||||
assert.Error(t, fw.Drop([]byte{}, p, true, &h1, cp, nil), ErrNoMatchingRule)
|
||||
assert.Error(t, fw.Drop(p, true, &h1, cp, nil), ErrNoMatchingRule)
|
||||
// c has the proper groups
|
||||
resetConntrack(fw)
|
||||
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||
assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||
}
|
||||
|
||||
func TestFirewall_Drop3(t *testing.T) {
|
||||
@@ -477,13 +493,13 @@ func TestFirewall_Drop3(t *testing.T) {
|
||||
cp := cert.NewCAPool()
|
||||
|
||||
// c1 should pass because host match
|
||||
assert.NoError(t, fw.Drop([]byte{}, p, true, &h1, cp, nil))
|
||||
assert.NoError(t, fw.Drop(p, true, &h1, cp, nil))
|
||||
// c2 should pass because ca sha match
|
||||
resetConntrack(fw)
|
||||
assert.NoError(t, fw.Drop([]byte{}, p, true, &h2, cp, nil))
|
||||
assert.NoError(t, fw.Drop(p, true, &h2, cp, nil))
|
||||
// c3 should fail because no match
|
||||
resetConntrack(fw)
|
||||
assert.Equal(t, fw.Drop([]byte{}, p, true, &h3, cp, nil), ErrNoMatchingRule)
|
||||
assert.Equal(t, fw.Drop(p, true, &h3, cp, nil), ErrNoMatchingRule)
|
||||
}
|
||||
|
||||
func TestFirewall_DropConntrackReload(t *testing.T) {
|
||||
@@ -527,12 +543,12 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
|
||||
cp := cert.NewCAPool()
|
||||
|
||||
// Drop outbound
|
||||
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule)
|
||||
assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrNoMatchingRule)
|
||||
// Allow inbound
|
||||
resetConntrack(fw)
|
||||
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||
assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||
// Allow outbound because conntrack
|
||||
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil))
|
||||
assert.NoError(t, fw.Drop(p, false, &h, cp, nil))
|
||||
|
||||
oldFw := fw
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
@@ -541,7 +557,7 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
|
||||
fw.rulesVersion = oldFw.rulesVersion + 1
|
||||
|
||||
// Allow outbound because conntrack and new rules allow port 10
|
||||
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil))
|
||||
assert.NoError(t, fw.Drop(p, false, &h, cp, nil))
|
||||
|
||||
oldFw = fw
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
@@ -550,7 +566,7 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
|
||||
fw.rulesVersion = oldFw.rulesVersion + 1
|
||||
|
||||
// Drop outbound because conntrack doesn't match new ruleset
|
||||
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule)
|
||||
assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrNoMatchingRule)
|
||||
}
|
||||
|
||||
func BenchmarkLookup(b *testing.B) {
|
||||
@@ -812,97 +828,6 @@ func TestAddFirewallRulesFromConfig(t *testing.T) {
|
||||
assert.EqualError(t, AddFirewallRulesFromConfig(l, true, conf, mf), "firewall.inbound rule #0; `test error`")
|
||||
}
|
||||
|
||||
func TestTCPRTTTracking(t *testing.T) {
|
||||
b := make([]byte, 200)
|
||||
|
||||
// Max ip IHL (60 bytes) and tcp IHL (60 bytes)
|
||||
b[0] = 15
|
||||
b[60+12] = 15 << 4
|
||||
f := Firewall{
|
||||
metricTCPRTT: metrics.GetOrRegisterHistogram("nope", nil, metrics.NewExpDecaySample(1028, 0.015)),
|
||||
}
|
||||
|
||||
// Set SEQ to 1
|
||||
binary.BigEndian.PutUint32(b[60+4:60+8], 1)
|
||||
|
||||
c := &conn{}
|
||||
setTCPRTTTracking(c, b)
|
||||
assert.Equal(t, uint32(1), c.Seq)
|
||||
|
||||
// Bad ack - no ack flag
|
||||
binary.BigEndian.PutUint32(b[60+8:60+12], 80)
|
||||
assert.False(t, f.checkTCPRTT(c, b))
|
||||
|
||||
// Bad ack, number is too low
|
||||
binary.BigEndian.PutUint32(b[60+8:60+12], 0)
|
||||
b[60+13] = uint8(0x10)
|
||||
assert.False(t, f.checkTCPRTT(c, b))
|
||||
|
||||
// Good ack
|
||||
binary.BigEndian.PutUint32(b[60+8:60+12], 80)
|
||||
assert.True(t, f.checkTCPRTT(c, b))
|
||||
assert.Equal(t, uint32(0), c.Seq)
|
||||
|
||||
// Set SEQ to 1
|
||||
binary.BigEndian.PutUint32(b[60+4:60+8], 1)
|
||||
c = &conn{}
|
||||
setTCPRTTTracking(c, b)
|
||||
assert.Equal(t, uint32(1), c.Seq)
|
||||
|
||||
// Good acks
|
||||
binary.BigEndian.PutUint32(b[60+8:60+12], 81)
|
||||
assert.True(t, f.checkTCPRTT(c, b))
|
||||
assert.Equal(t, uint32(0), c.Seq)
|
||||
|
||||
// Set SEQ to max uint32 - 20
|
||||
binary.BigEndian.PutUint32(b[60+4:60+8], ^uint32(0)-20)
|
||||
c = &conn{}
|
||||
setTCPRTTTracking(c, b)
|
||||
assert.Equal(t, ^uint32(0)-20, c.Seq)
|
||||
|
||||
// Good acks
|
||||
binary.BigEndian.PutUint32(b[60+8:60+12], 81)
|
||||
assert.True(t, f.checkTCPRTT(c, b))
|
||||
assert.Equal(t, uint32(0), c.Seq)
|
||||
|
||||
// Set SEQ to max uint32 / 2
|
||||
binary.BigEndian.PutUint32(b[60+4:60+8], ^uint32(0)/2)
|
||||
c = &conn{}
|
||||
setTCPRTTTracking(c, b)
|
||||
assert.Equal(t, ^uint32(0)/2, c.Seq)
|
||||
|
||||
// Below
|
||||
binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0)/2-1)
|
||||
assert.False(t, f.checkTCPRTT(c, b))
|
||||
assert.Equal(t, ^uint32(0)/2, c.Seq)
|
||||
|
||||
// Halfway below
|
||||
binary.BigEndian.PutUint32(b[60+8:60+12], uint32(0))
|
||||
assert.False(t, f.checkTCPRTT(c, b))
|
||||
assert.Equal(t, ^uint32(0)/2, c.Seq)
|
||||
|
||||
// Halfway above is ok
|
||||
binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0))
|
||||
assert.True(t, f.checkTCPRTT(c, b))
|
||||
assert.Equal(t, uint32(0), c.Seq)
|
||||
|
||||
// Set SEQ to max uint32
|
||||
binary.BigEndian.PutUint32(b[60+4:60+8], ^uint32(0))
|
||||
c = &conn{}
|
||||
setTCPRTTTracking(c, b)
|
||||
assert.Equal(t, ^uint32(0), c.Seq)
|
||||
|
||||
// Halfway + 1 above
|
||||
binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0)/2+1)
|
||||
assert.False(t, f.checkTCPRTT(c, b))
|
||||
assert.Equal(t, ^uint32(0), c.Seq)
|
||||
|
||||
// Halfway above
|
||||
binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0)/2)
|
||||
assert.True(t, f.checkTCPRTT(c, b))
|
||||
assert.Equal(t, uint32(0), c.Seq)
|
||||
}
|
||||
|
||||
func TestFirewall_convertRule(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
ob := &bytes.Buffer{}
|
||||
|
||||
48
go.mod
48
go.mod
@@ -1,49 +1,53 @@
|
||||
module github.com/slackhq/nebula
|
||||
|
||||
go 1.20
|
||||
go 1.22.0
|
||||
|
||||
toolchain go1.22.2
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
|
||||
github.com/armon/go-radix v1.0.0
|
||||
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
|
||||
github.com/flynn/noise v1.0.0
|
||||
github.com/flynn/noise v1.1.0
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/google/gopacket v1.1.19
|
||||
github.com/imdario/mergo v0.3.15
|
||||
github.com/kardianos/service v1.2.2
|
||||
github.com/miekg/dns v1.1.54
|
||||
github.com/miekg/dns v1.1.59
|
||||
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f
|
||||
github.com/prometheus/client_golang v1.15.1
|
||||
github.com/prometheus/client_golang v1.19.0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
||||
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/vishvananda/netlink v1.1.0
|
||||
golang.org/x/crypto v0.8.0
|
||||
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53
|
||||
golang.org/x/net v0.9.0
|
||||
golang.org/x/sys v0.8.0
|
||||
golang.org/x/term v0.8.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2
|
||||
golang.org/x/crypto v0.23.0
|
||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090
|
||||
golang.org/x/net v0.25.0
|
||||
golang.org/x/sync v0.7.0
|
||||
golang.org/x/sys v0.20.0
|
||||
golang.org/x/term v0.20.0
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
|
||||
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3
|
||||
google.golang.org/protobuf v1.30.0
|
||||
google.golang.org/protobuf v1.34.1
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gvisor.dev/gvisor v0.0.0-20240423190808-9d7a357edefe
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/google/btree v1.1.2 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.42.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.48.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/vishvananda/netns v0.0.4 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/tools v0.8.0 // indirect
|
||||
golang.org/x/mod v0.16.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/tools v0.19.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
102
go.sum
102
go.sum
@@ -1,4 +1,6 @@
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
@@ -20,8 +22,8 @@ github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
|
||||
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
||||
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
@@ -42,20 +44,18 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
|
||||
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
|
||||
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
@@ -72,14 +72,13 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI=
|
||||
github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
|
||||
github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs=
|
||||
github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
@@ -97,24 +96,24 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
|
||||
github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
|
||||
github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU=
|
||||
github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
|
||||
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
|
||||
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
|
||||
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
|
||||
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
@@ -122,27 +121,23 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0=
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
|
||||
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8=
|
||||
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs=
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
|
||||
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
@@ -152,16 +147,16 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
|
||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
||||
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 h1:5llv2sWeaMSnA3w2kS57ouQQ4pudlXrR0dCgw51QK9o=
|
||||
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 h1:Di6/M8l0O2lCLc6VVRWhgCiApHV8MnQurBnFSHsQtNY=
|
||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
|
||||
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -172,8 +167,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
|
||||
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -181,44 +176,50 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
|
||||
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
|
||||
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
||||
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
||||
golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
|
||||
golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
|
||||
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b h1:J1CaxgLerRR5lgx3wnr6L04cJFbWoceSK9JWBdglINo=
|
||||
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b/go.mod h1:tqur9LnfstdR9ep2LaJT4lFUl0EjlHtge+gAjmsHUG4=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -229,9 +230,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
|
||||
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
|
||||
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -247,3 +247,5 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gvisor.dev/gvisor v0.0.0-20240423190808-9d7a357edefe h1:fre4i6mv4iBuz5lCMOzHD1rH1ljqHWSICFmZRbbgp3g=
|
||||
gvisor.dev/gvisor v0.0.0-20240423190808-9d7a357edefe/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU=
|
||||
|
||||
31
handshake.go
31
handshake.go
@@ -1,31 +0,0 @@
|
||||
package nebula
|
||||
|
||||
import (
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
)
|
||||
|
||||
func HandleIncomingHandshake(f *Interface, addr *udp.Addr, via *ViaSender, packet []byte, h *header.H, hostinfo *HostInfo) {
|
||||
// First remote allow list check before we know the vpnIp
|
||||
if addr != nil {
|
||||
if !f.lightHouse.GetRemoteAllowList().AllowUnknownVpnIp(addr.IP) {
|
||||
f.l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch h.Subtype {
|
||||
case header.HandshakeIXPSK0:
|
||||
switch h.MessageCounter {
|
||||
case 1:
|
||||
ixHandshakeStage1(f, addr, via, packet, h)
|
||||
case 2:
|
||||
newHostinfo, _ := f.handshakeManager.QueryIndex(h.RemoteIndex)
|
||||
tearDown := ixHandshakeStage2(f, addr, via, newHostinfo, packet, h)
|
||||
if tearDown && newHostinfo != nil {
|
||||
f.handshakeManager.DeleteHostInfo(newHostinfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
171
handshake_ix.go
171
handshake_ix.go
@@ -4,6 +4,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/flynn/noise"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
@@ -13,27 +14,22 @@ import (
|
||||
|
||||
// This function constructs a handshake packet, but does not actually send it
|
||||
// Sending is done by the handshake manager
|
||||
func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) {
|
||||
// This queries the lighthouse if we don't know a remote for the host
|
||||
// We do it here to provoke the lighthouse to preempt our timer wheel and trigger the stage 1 packet to send
|
||||
// more quickly, effect is a quicker handshake.
|
||||
if hostinfo.remote == nil {
|
||||
f.lightHouse.QueryServer(vpnIp, f)
|
||||
}
|
||||
|
||||
err := f.handshakeManager.AddIndexHostInfo(hostinfo)
|
||||
func ixHandshakeStage0(f *Interface, hh *HandshakeHostInfo) bool {
|
||||
err := f.handshakeManager.allocateIndex(hh)
|
||||
if err != nil {
|
||||
f.l.WithError(err).WithField("vpnIp", vpnIp).
|
||||
f.l.WithError(err).WithField("vpnIp", hh.hostinfo.vpnIp).
|
||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to generate index")
|
||||
return
|
||||
return false
|
||||
}
|
||||
|
||||
ci := hostinfo.ConnectionState
|
||||
certState := f.pki.GetCertState()
|
||||
ci := NewConnectionState(f.l, f.cipher, certState, true, noise.HandshakeIX, []byte{}, 0)
|
||||
hh.hostinfo.ConnectionState = ci
|
||||
|
||||
hsProto := &NebulaHandshakeDetails{
|
||||
InitiatorIndex: hostinfo.localIndexId,
|
||||
InitiatorIndex: hh.hostinfo.localIndexId,
|
||||
Time: uint64(time.Now().UnixNano()),
|
||||
Cert: ci.certState.rawCertificateNoKey,
|
||||
Cert: certState.RawCertificateNoKey,
|
||||
}
|
||||
|
||||
hsBytes := []byte{}
|
||||
@@ -44,9 +40,9 @@ func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) {
|
||||
hsBytes, err = hs.Marshal()
|
||||
|
||||
if err != nil {
|
||||
f.l.WithError(err).WithField("vpnIp", vpnIp).
|
||||
f.l.WithError(err).WithField("vpnIp", hh.hostinfo.vpnIp).
|
||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
|
||||
return
|
||||
return false
|
||||
}
|
||||
|
||||
h := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, 0, 1)
|
||||
@@ -54,22 +50,23 @@ func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) {
|
||||
|
||||
msg, _, _, err := ci.H.WriteMessage(h, hsBytes)
|
||||
if err != nil {
|
||||
f.l.WithError(err).WithField("vpnIp", vpnIp).
|
||||
f.l.WithError(err).WithField("vpnIp", hh.hostinfo.vpnIp).
|
||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage")
|
||||
return
|
||||
return false
|
||||
}
|
||||
|
||||
// We are sending handshake packet 1, so we don't expect to receive
|
||||
// handshake packet 1 from the responder
|
||||
ci.window.Update(f.l, 1)
|
||||
|
||||
hostinfo.HandshakePacket[0] = msg
|
||||
hostinfo.HandshakeReady = true
|
||||
hostinfo.handshakeStart = time.Now()
|
||||
hh.hostinfo.HandshakePacket[0] = msg
|
||||
hh.ready = true
|
||||
return true
|
||||
}
|
||||
|
||||
func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []byte, h *header.H) {
|
||||
ci := f.newConnectionState(f.l, false, noise.HandshakeIX, []byte{}, 0)
|
||||
certState := f.pki.GetCertState()
|
||||
ci := NewConnectionState(f.l, f.cipher, certState, false, noise.HandshakeIX, []byte{}, 0)
|
||||
// Mark packet 1 as seen so it doesn't show up as missed
|
||||
ci.window.Update(f.l, 1)
|
||||
|
||||
@@ -91,11 +88,16 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
|
||||
return
|
||||
}
|
||||
|
||||
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.caPool)
|
||||
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.pki.GetCAPool())
|
||||
if err != nil {
|
||||
f.l.WithError(err).WithField("udpAddr", addr).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).WithField("cert", remoteCert).
|
||||
Info("Invalid certificate from host")
|
||||
e := f.l.WithError(err).WithField("udpAddr", addr).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"})
|
||||
|
||||
if f.l.Level > logrus.DebugLevel {
|
||||
e = e.WithField("cert", remoteCert)
|
||||
}
|
||||
|
||||
e.Info("Invalid certificate from host")
|
||||
return
|
||||
}
|
||||
vpnIp := iputil.Ip2VpnIp(remoteCert.Details.Ips[0].IP)
|
||||
@@ -143,9 +145,6 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
|
||||
},
|
||||
}
|
||||
|
||||
hostinfo.Lock()
|
||||
defer hostinfo.Unlock()
|
||||
|
||||
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("fingerprint", fingerprint).
|
||||
@@ -155,7 +154,7 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
|
||||
Info("Handshake message received")
|
||||
|
||||
hs.Details.ResponderIndex = myIndex
|
||||
hs.Details.Cert = ci.certState.rawCertificateNoKey
|
||||
hs.Details.Cert = certState.RawCertificateNoKey
|
||||
// Update the time in case their clock is way off from ours
|
||||
hs.Details.Time = uint64(time.Now().UnixNano())
|
||||
|
||||
@@ -211,19 +210,12 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
|
||||
if err != nil {
|
||||
switch err {
|
||||
case ErrAlreadySeen:
|
||||
// Update remote if preferred (Note we have to switch to locking
|
||||
// the existing hostinfo, and then switch back so the defer Unlock
|
||||
// higher in this function still works)
|
||||
hostinfo.Unlock()
|
||||
existing.Lock()
|
||||
// Update remote if preferred
|
||||
if existing.SetRemoteIfPreferred(f.hostMap, addr) {
|
||||
// Send a test packet to ensure the other side has also switched to
|
||||
// the preferred remote
|
||||
f.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
||||
}
|
||||
existing.Unlock()
|
||||
hostinfo.Lock()
|
||||
|
||||
msg = existing.HandshakePacket[2]
|
||||
f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1)
|
||||
@@ -310,7 +302,6 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
|
||||
WithField("issuer", issuer).
|
||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||
WithField("sentCachedPackets", len(hostinfo.packetStore)).
|
||||
Info("Handshake message sent")
|
||||
}
|
||||
} else {
|
||||
@@ -326,25 +317,26 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
|
||||
WithField("issuer", issuer).
|
||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||
WithField("sentCachedPackets", len(hostinfo.packetStore)).
|
||||
Info("Handshake message sent")
|
||||
}
|
||||
|
||||
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
|
||||
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
|
||||
hostinfo.ConnectionState.messageCounter.Store(2)
|
||||
hostinfo.remotes.ResetBlockedRemotes()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *HostInfo, packet []byte, h *header.H) bool {
|
||||
if hostinfo == nil {
|
||||
func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hh *HandshakeHostInfo, packet []byte, h *header.H) bool {
|
||||
if hh == nil {
|
||||
// Nothing here to tear down, got a bogus stage 2 packet
|
||||
return true
|
||||
}
|
||||
|
||||
hostinfo.Lock()
|
||||
defer hostinfo.Unlock()
|
||||
hh.Lock()
|
||||
defer hh.Unlock()
|
||||
|
||||
hostinfo := hh.hostinfo
|
||||
if addr != nil {
|
||||
if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, addr.IP) {
|
||||
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
||||
@@ -353,22 +345,6 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
|
||||
}
|
||||
|
||||
ci := hostinfo.ConnectionState
|
||||
if ci.ready {
|
||||
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h).
|
||||
Info("Handshake is already complete")
|
||||
|
||||
// Update remote if preferred
|
||||
if hostinfo.SetRemoteIfPreferred(f.hostMap, addr) {
|
||||
// Send a test packet to ensure the other side has also switched to
|
||||
// the preferred remote
|
||||
f.SendMessageToVpnIp(header.Test, header.TestRequest, hostinfo.vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
||||
}
|
||||
|
||||
// We already have a complete tunnel, there is nothing that can be done by processing further stage 1 packets
|
||||
return false
|
||||
}
|
||||
|
||||
msg, eKey, dKey, err := ci.H.ReadMessage(nil, packet[header.Len:])
|
||||
if err != nil {
|
||||
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||
@@ -399,11 +375,16 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
|
||||
return true
|
||||
}
|
||||
|
||||
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.caPool)
|
||||
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.pki.GetCAPool())
|
||||
if err != nil {
|
||||
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||
WithField("cert", remoteCert).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||
Error("Invalid certificate from host")
|
||||
e := f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"})
|
||||
|
||||
if f.l.Level > logrus.DebugLevel {
|
||||
e = e.WithField("cert", remoteCert)
|
||||
}
|
||||
|
||||
e.Error("Invalid certificate from host")
|
||||
|
||||
// The handshake state machine is complete, if things break now there is no chance to recover. Tear down and start again
|
||||
return true
|
||||
@@ -422,34 +403,30 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
|
||||
Info("Incorrect host responded to handshake")
|
||||
|
||||
// Release our old handshake from pending, it should not continue
|
||||
f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo)
|
||||
f.handshakeManager.DeleteHostInfo(hostinfo)
|
||||
|
||||
// Create a new hostinfo/handshake for the intended vpn ip
|
||||
//TODO: this adds it to the timer wheel in a way that aggressively retries
|
||||
newHostInfo := f.getOrHandshake(hostinfo.vpnIp)
|
||||
newHostInfo.Lock()
|
||||
f.handshakeManager.StartHandshake(hostinfo.vpnIp, func(newHH *HandshakeHostInfo) {
|
||||
//TODO: this doesnt know if its being added or is being used for caching a packet
|
||||
// Block the current used address
|
||||
newHH.hostinfo.remotes = hostinfo.remotes
|
||||
newHH.hostinfo.remotes.BlockRemote(addr)
|
||||
|
||||
// Block the current used address
|
||||
newHostInfo.remotes = hostinfo.remotes
|
||||
newHostInfo.remotes.BlockRemote(addr)
|
||||
// Get the correct remote list for the host we did handshake with
|
||||
hostinfo.remotes = f.lightHouse.QueryCache(vpnIp)
|
||||
|
||||
// Get the correct remote list for the host we did handshake with
|
||||
hostinfo.remotes = f.lightHouse.QueryCache(vpnIp)
|
||||
f.l.WithField("blockedUdpAddrs", newHH.hostinfo.remotes.CopyBlockedRemotes()).WithField("vpnIp", vpnIp).
|
||||
WithField("remotes", newHH.hostinfo.remotes.CopyAddrs(f.hostMap.GetPreferredRanges())).
|
||||
Info("Blocked addresses for handshakes")
|
||||
|
||||
f.l.WithField("blockedUdpAddrs", newHostInfo.remotes.CopyBlockedRemotes()).WithField("vpnIp", vpnIp).
|
||||
WithField("remotes", newHostInfo.remotes.CopyAddrs(f.hostMap.preferredRanges)).
|
||||
Info("Blocked addresses for handshakes")
|
||||
// Swap the packet store to benefit the original intended recipient
|
||||
newHH.packetStore = hh.packetStore
|
||||
hh.packetStore = []*cachedPacket{}
|
||||
|
||||
// Swap the packet store to benefit the original intended recipient
|
||||
hostinfo.ConnectionState.queueLock.Lock()
|
||||
newHostInfo.packetStore = hostinfo.packetStore
|
||||
hostinfo.packetStore = []*cachedPacket{}
|
||||
hostinfo.ConnectionState.queueLock.Unlock()
|
||||
|
||||
// Finally, put the correct vpn ip in the host info, tell them to close the tunnel, and return true to tear down
|
||||
hostinfo.vpnIp = vpnIp
|
||||
f.sendCloseTunnel(hostinfo)
|
||||
newHostInfo.Unlock()
|
||||
// Finally, put the correct vpn ip in the host info, tell them to close the tunnel, and return true to tear down
|
||||
hostinfo.vpnIp = vpnIp
|
||||
f.sendCloseTunnel(hostinfo)
|
||||
})
|
||||
|
||||
return true
|
||||
}
|
||||
@@ -457,7 +434,7 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
|
||||
// Mark packet 2 as seen so it doesn't show up as missed
|
||||
ci.window.Update(f.l, 2)
|
||||
|
||||
duration := time.Since(hostinfo.handshakeStart).Nanoseconds()
|
||||
duration := time.Since(hh.startTime).Nanoseconds()
|
||||
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("fingerprint", fingerprint).
|
||||
@@ -465,7 +442,7 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
|
||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||
WithField("durationNs", duration).
|
||||
WithField("sentCachedPackets", len(hostinfo.packetStore)).
|
||||
WithField("sentCachedPackets", len(hh.packetStore)).
|
||||
Info("Handshake message received")
|
||||
|
||||
hostinfo.remoteIndexId = hs.Details.ResponderIndex
|
||||
@@ -489,7 +466,23 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
|
||||
// Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp
|
||||
f.handshakeManager.Complete(hostinfo, f)
|
||||
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
|
||||
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
|
||||
|
||||
hostinfo.ConnectionState.messageCounter.Store(2)
|
||||
|
||||
if f.l.Level >= logrus.DebugLevel {
|
||||
hostinfo.logger(f.l).Debugf("Sending %d stored packets", len(hh.packetStore))
|
||||
}
|
||||
|
||||
if len(hh.packetStore) > 0 {
|
||||
nb := make([]byte, 12, 12)
|
||||
out := make([]byte, mtu)
|
||||
for _, cp := range hh.packetStore {
|
||||
cp.callback(cp.messageType, cp.messageSubType, hostinfo, cp.packet, nb, out)
|
||||
}
|
||||
f.cachedPacketMetrics.sent.Inc(int64(len(hh.packetStore)))
|
||||
}
|
||||
|
||||
hostinfo.remotes.ResetBlockedRemotes()
|
||||
f.metricHandshakes.Update(duration)
|
||||
|
||||
return false
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
@@ -42,24 +43,68 @@ type HandshakeConfig struct {
|
||||
}
|
||||
|
||||
type HandshakeManager struct {
|
||||
pendingHostMap *HostMap
|
||||
// Mutex for interacting with the vpnIps and indexes maps
|
||||
sync.RWMutex
|
||||
|
||||
vpnIps map[iputil.VpnIp]*HandshakeHostInfo
|
||||
indexes map[uint32]*HandshakeHostInfo
|
||||
|
||||
mainHostMap *HostMap
|
||||
lightHouse *LightHouse
|
||||
outside *udp.Conn
|
||||
outside udp.Conn
|
||||
config HandshakeConfig
|
||||
OutboundHandshakeTimer *LockingTimerWheel[iputil.VpnIp]
|
||||
messageMetrics *MessageMetrics
|
||||
metricInitiated metrics.Counter
|
||||
metricTimedOut metrics.Counter
|
||||
f *Interface
|
||||
l *logrus.Logger
|
||||
|
||||
// can be used to trigger outbound handshake for the given vpnIp
|
||||
trigger chan iputil.VpnIp
|
||||
}
|
||||
|
||||
func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges []*net.IPNet, mainHostMap *HostMap, lightHouse *LightHouse, outside *udp.Conn, config HandshakeConfig) *HandshakeManager {
|
||||
type HandshakeHostInfo struct {
|
||||
sync.Mutex
|
||||
|
||||
startTime time.Time // Time that we first started trying with this handshake
|
||||
ready bool // Is the handshake ready
|
||||
counter int // How many attempts have we made so far
|
||||
lastRemotes []*udp.Addr // Remotes that we sent to during the previous attempt
|
||||
packetStore []*cachedPacket // A set of packets to be transmitted once the handshake completes
|
||||
|
||||
hostinfo *HostInfo
|
||||
}
|
||||
|
||||
func (hh *HandshakeHostInfo) cachePacket(l *logrus.Logger, t header.MessageType, st header.MessageSubType, packet []byte, f packetCallback, m *cachedPacketMetrics) {
|
||||
if len(hh.packetStore) < 100 {
|
||||
tempPacket := make([]byte, len(packet))
|
||||
copy(tempPacket, packet)
|
||||
|
||||
hh.packetStore = append(hh.packetStore, &cachedPacket{t, st, f, tempPacket})
|
||||
if l.Level >= logrus.DebugLevel {
|
||||
hh.hostinfo.logger(l).
|
||||
WithField("length", len(hh.packetStore)).
|
||||
WithField("stored", true).
|
||||
Debugf("Packet store")
|
||||
}
|
||||
|
||||
} else {
|
||||
m.dropped.Inc(1)
|
||||
|
||||
if l.Level >= logrus.DebugLevel {
|
||||
hh.hostinfo.logger(l).
|
||||
WithField("length", len(hh.packetStore)).
|
||||
WithField("stored", false).
|
||||
Debugf("Packet store")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewHandshakeManager(l *logrus.Logger, mainHostMap *HostMap, lightHouse *LightHouse, outside udp.Conn, config HandshakeConfig) *HandshakeManager {
|
||||
return &HandshakeManager{
|
||||
pendingHostMap: NewHostMap(l, "pending", tunCidr, preferredRanges),
|
||||
vpnIps: map[iputil.VpnIp]*HandshakeHostInfo{},
|
||||
indexes: map[uint32]*HandshakeHostInfo{},
|
||||
mainHostMap: mainHostMap,
|
||||
lightHouse: lightHouse,
|
||||
outside: outside,
|
||||
@@ -73,7 +118,7 @@ func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges [
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) Run(ctx context.Context, f EncWriter) {
|
||||
func (c *HandshakeManager) Run(ctx context.Context) {
|
||||
clockSource := time.NewTicker(c.config.tryInterval)
|
||||
defer clockSource.Stop()
|
||||
|
||||
@@ -82,58 +127,80 @@ func (c *HandshakeManager) Run(ctx context.Context, f EncWriter) {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case vpnIP := <-c.trigger:
|
||||
c.handleOutbound(vpnIP, f, true)
|
||||
c.handleOutbound(vpnIP, true)
|
||||
case now := <-clockSource.C:
|
||||
c.NextOutboundHandshakeTimerTick(now, f)
|
||||
c.NextOutboundHandshakeTimerTick(now)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time, f EncWriter) {
|
||||
func (hm *HandshakeManager) HandleIncoming(addr *udp.Addr, via *ViaSender, packet []byte, h *header.H) {
|
||||
// First remote allow list check before we know the vpnIp
|
||||
if addr != nil {
|
||||
if !hm.lightHouse.GetRemoteAllowList().AllowUnknownVpnIp(addr.IP) {
|
||||
hm.l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch h.Subtype {
|
||||
case header.HandshakeIXPSK0:
|
||||
switch h.MessageCounter {
|
||||
case 1:
|
||||
ixHandshakeStage1(hm.f, addr, via, packet, h)
|
||||
|
||||
case 2:
|
||||
newHostinfo := hm.queryIndex(h.RemoteIndex)
|
||||
tearDown := ixHandshakeStage2(hm.f, addr, via, newHostinfo, packet, h)
|
||||
if tearDown && newHostinfo != nil {
|
||||
hm.DeleteHostInfo(newHostinfo.hostinfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time) {
|
||||
c.OutboundHandshakeTimer.Advance(now)
|
||||
for {
|
||||
vpnIp, has := c.OutboundHandshakeTimer.Purge()
|
||||
if !has {
|
||||
break
|
||||
}
|
||||
c.handleOutbound(vpnIp, f, false)
|
||||
c.handleOutbound(vpnIp, false)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, lighthouseTriggered bool) {
|
||||
hostinfo, err := c.pendingHostMap.QueryVpnIp(vpnIp)
|
||||
if err != nil {
|
||||
func (hm *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, lighthouseTriggered bool) {
|
||||
hh := hm.queryVpnIp(vpnIp)
|
||||
if hh == nil {
|
||||
return
|
||||
}
|
||||
hostinfo.Lock()
|
||||
defer hostinfo.Unlock()
|
||||
hh.Lock()
|
||||
defer hh.Unlock()
|
||||
|
||||
// We may have raced to completion but now that we have a lock we should ensure we have not yet completed.
|
||||
if hostinfo.HandshakeComplete {
|
||||
// Ensure we don't exist in the pending hostmap anymore since we have completed
|
||||
c.pendingHostMap.DeleteHostInfo(hostinfo)
|
||||
hostinfo := hh.hostinfo
|
||||
// If we are out of time, clean up
|
||||
if hh.counter >= hm.config.retries {
|
||||
hh.hostinfo.logger(hm.l).WithField("udpAddrs", hh.hostinfo.remotes.CopyAddrs(hm.mainHostMap.GetPreferredRanges())).
|
||||
WithField("initiatorIndex", hh.hostinfo.localIndexId).
|
||||
WithField("remoteIndex", hh.hostinfo.remoteIndexId).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||
WithField("durationNs", time.Since(hh.startTime).Nanoseconds()).
|
||||
Info("Handshake timed out")
|
||||
hm.metricTimedOut.Inc(1)
|
||||
hm.DeleteHostInfo(hostinfo)
|
||||
return
|
||||
}
|
||||
|
||||
// Increment the counter to increase our delay, linear backoff
|
||||
hh.counter++
|
||||
|
||||
// Check if we have a handshake packet to transmit yet
|
||||
if !hostinfo.HandshakeReady {
|
||||
// There is currently a slight race in getOrHandshake due to ConnectionState not being part of the HostInfo directly
|
||||
// Our hostinfo here was added to the pending map and the wheel may have ticked to us before we created ConnectionState
|
||||
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter))
|
||||
return
|
||||
}
|
||||
|
||||
// If we are out of time, clean up
|
||||
if hostinfo.HandshakeCounter >= c.config.retries {
|
||||
hostinfo.logger(c.l).WithField("udpAddrs", hostinfo.remotes.CopyAddrs(c.pendingHostMap.preferredRanges)).
|
||||
WithField("initiatorIndex", hostinfo.localIndexId).
|
||||
WithField("remoteIndex", hostinfo.remoteIndexId).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||
WithField("durationNs", time.Since(hostinfo.handshakeStart).Nanoseconds()).
|
||||
Info("Handshake timed out")
|
||||
c.metricTimedOut.Inc(1)
|
||||
c.pendingHostMap.DeleteHostInfo(hostinfo)
|
||||
return
|
||||
if !hh.ready {
|
||||
if !ixHandshakeStage0(hm.f, hh) {
|
||||
hm.OutboundHandshakeTimer.Add(vpnIp, hm.config.tryInterval*time.Duration(hh.counter))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Get a remotes object if we don't already have one.
|
||||
@@ -141,11 +208,11 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
|
||||
// NB ^ This comment doesn't jive. It's how the thing gets initialized.
|
||||
// It's the common path. Should it update every time, in case a future LH query/queries give us more info?
|
||||
if hostinfo.remotes == nil {
|
||||
hostinfo.remotes = c.lightHouse.QueryCache(vpnIp)
|
||||
hostinfo.remotes = hm.lightHouse.QueryCache(vpnIp)
|
||||
}
|
||||
|
||||
remotes := hostinfo.remotes.CopyAddrs(c.pendingHostMap.preferredRanges)
|
||||
remotesHaveChanged := !udp.AddrSlice(remotes).Equal(hostinfo.HandshakeLastRemotes)
|
||||
remotes := hostinfo.remotes.CopyAddrs(hm.mainHostMap.GetPreferredRanges())
|
||||
remotesHaveChanged := !udp.AddrSlice(remotes).Equal(hh.lastRemotes)
|
||||
|
||||
// We only care about a lighthouse trigger if we have new remotes to send to.
|
||||
// This is a very specific optimization for a fast lighthouse reply.
|
||||
@@ -154,25 +221,25 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
|
||||
return
|
||||
}
|
||||
|
||||
hostinfo.HandshakeLastRemotes = remotes
|
||||
hh.lastRemotes = remotes
|
||||
|
||||
// TODO: this will generate a load of queries for hosts with only 1 ip
|
||||
// (such as ones registered to the lighthouse with only a private IP)
|
||||
// So we only do it one time after attempting 5 handshakes already.
|
||||
if len(remotes) <= 1 && hostinfo.HandshakeCounter == 5 {
|
||||
if len(remotes) <= 1 && hh.counter == 5 {
|
||||
// If we only have 1 remote it is highly likely our query raced with the other host registered within the lighthouse
|
||||
// Our vpnIp here has a tunnel with a lighthouse but has yet to send a host update packet there so we only know about
|
||||
// the learned public ip for them. Query again to short circuit the promotion counter
|
||||
c.lightHouse.QueryServer(vpnIp, f)
|
||||
hm.lightHouse.QueryServer(vpnIp)
|
||||
}
|
||||
|
||||
// Send the handshake to all known ips, stage 2 takes care of assigning the hostinfo.remote based on the first to reply
|
||||
var sentTo []*udp.Addr
|
||||
hostinfo.remotes.ForEach(c.pendingHostMap.preferredRanges, func(addr *udp.Addr, _ bool) {
|
||||
c.messageMetrics.Tx(header.Handshake, header.MessageSubType(hostinfo.HandshakePacket[0][1]), 1)
|
||||
err = c.outside.WriteTo(hostinfo.HandshakePacket[0], addr)
|
||||
hostinfo.remotes.ForEach(hm.mainHostMap.GetPreferredRanges(), func(addr *udp.Addr, _ bool) {
|
||||
hm.messageMetrics.Tx(header.Handshake, header.MessageSubType(hostinfo.HandshakePacket[0][1]), 1)
|
||||
err := hm.outside.WriteTo(hostinfo.HandshakePacket[0], addr)
|
||||
if err != nil {
|
||||
hostinfo.logger(c.l).WithField("udpAddr", addr).
|
||||
hostinfo.logger(hm.l).WithField("udpAddr", addr).
|
||||
WithField("initiatorIndex", hostinfo.localIndexId).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||
WithError(err).Error("Failed to send handshake message")
|
||||
@@ -185,63 +252,63 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
|
||||
// Don't be too noisy or confusing if we fail to send a handshake - if we don't get through we'll eventually log a timeout,
|
||||
// so only log when the list of remotes has changed
|
||||
if remotesHaveChanged {
|
||||
hostinfo.logger(c.l).WithField("udpAddrs", sentTo).
|
||||
hostinfo.logger(hm.l).WithField("udpAddrs", sentTo).
|
||||
WithField("initiatorIndex", hostinfo.localIndexId).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||
Info("Handshake message sent")
|
||||
} else if c.l.IsLevelEnabled(logrus.DebugLevel) {
|
||||
hostinfo.logger(c.l).WithField("udpAddrs", sentTo).
|
||||
} else if hm.l.IsLevelEnabled(logrus.DebugLevel) {
|
||||
hostinfo.logger(hm.l).WithField("udpAddrs", sentTo).
|
||||
WithField("initiatorIndex", hostinfo.localIndexId).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||
Debug("Handshake message sent")
|
||||
}
|
||||
|
||||
if c.config.useRelays && len(hostinfo.remotes.relays) > 0 {
|
||||
hostinfo.logger(c.l).WithField("relays", hostinfo.remotes.relays).Info("Attempt to relay through hosts")
|
||||
if hm.config.useRelays && len(hostinfo.remotes.relays) > 0 {
|
||||
hostinfo.logger(hm.l).WithField("relays", hostinfo.remotes.relays).Info("Attempt to relay through hosts")
|
||||
// Send a RelayRequest to all known Relay IP's
|
||||
for _, relay := range hostinfo.remotes.relays {
|
||||
// Don't relay to myself, and don't relay through the host I'm trying to connect to
|
||||
if *relay == vpnIp || *relay == c.lightHouse.myVpnIp {
|
||||
if *relay == vpnIp || *relay == hm.lightHouse.myVpnIp {
|
||||
continue
|
||||
}
|
||||
relayHostInfo, err := c.mainHostMap.QueryVpnIp(*relay)
|
||||
if err != nil || relayHostInfo.remote == nil {
|
||||
hostinfo.logger(c.l).WithError(err).WithField("relay", relay.String()).Info("Establish tunnel to relay target")
|
||||
f.Handshake(*relay)
|
||||
relayHostInfo := hm.mainHostMap.QueryVpnIp(*relay)
|
||||
if relayHostInfo == nil || relayHostInfo.remote == nil {
|
||||
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Establish tunnel to relay target")
|
||||
hm.f.Handshake(*relay)
|
||||
continue
|
||||
}
|
||||
// Check the relay HostInfo to see if we already established a relay through it
|
||||
if existingRelay, ok := relayHostInfo.relayState.QueryRelayForByIp(vpnIp); ok {
|
||||
switch existingRelay.State {
|
||||
case Established:
|
||||
hostinfo.logger(c.l).WithField("relay", relay.String()).Info("Send handshake via relay")
|
||||
f.SendVia(relayHostInfo, existingRelay, hostinfo.HandshakePacket[0], make([]byte, 12), make([]byte, mtu), false)
|
||||
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Send handshake via relay")
|
||||
hm.f.SendVia(relayHostInfo, existingRelay, hostinfo.HandshakePacket[0], make([]byte, 12), make([]byte, mtu), false)
|
||||
case Requested:
|
||||
hostinfo.logger(c.l).WithField("relay", relay.String()).Info("Re-send CreateRelay request")
|
||||
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Re-send CreateRelay request")
|
||||
// Re-send the CreateRelay request, in case the previous one was lost.
|
||||
m := NebulaControl{
|
||||
Type: NebulaControl_CreateRelayRequest,
|
||||
InitiatorRelayIndex: existingRelay.LocalIndex,
|
||||
RelayFromIp: uint32(c.lightHouse.myVpnIp),
|
||||
RelayFromIp: uint32(hm.lightHouse.myVpnIp),
|
||||
RelayToIp: uint32(vpnIp),
|
||||
}
|
||||
msg, err := m.Marshal()
|
||||
if err != nil {
|
||||
hostinfo.logger(c.l).
|
||||
hostinfo.logger(hm.l).
|
||||
WithError(err).
|
||||
Error("Failed to marshal Control message to create relay")
|
||||
} else {
|
||||
// This must send over the hostinfo, not over hm.Hosts[ip]
|
||||
f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||
c.l.WithFields(logrus.Fields{
|
||||
"relayFrom": c.lightHouse.myVpnIp,
|
||||
hm.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||
hm.l.WithFields(logrus.Fields{
|
||||
"relayFrom": hm.lightHouse.myVpnIp,
|
||||
"relayTo": vpnIp,
|
||||
"initiatorRelayIndex": existingRelay.LocalIndex,
|
||||
"relay": *relay}).
|
||||
Info("send CreateRelayRequest")
|
||||
}
|
||||
default:
|
||||
hostinfo.logger(c.l).
|
||||
hostinfo.logger(hm.l).
|
||||
WithField("vpnIp", vpnIp).
|
||||
WithField("state", existingRelay.State).
|
||||
WithField("relay", relayHostInfo.vpnIp).
|
||||
@@ -250,26 +317,26 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
|
||||
} else {
|
||||
// No relays exist or requested yet.
|
||||
if relayHostInfo.remote != nil {
|
||||
idx, err := AddRelay(c.l, relayHostInfo, c.mainHostMap, vpnIp, nil, TerminalType, Requested)
|
||||
idx, err := AddRelay(hm.l, relayHostInfo, hm.mainHostMap, vpnIp, nil, TerminalType, Requested)
|
||||
if err != nil {
|
||||
hostinfo.logger(c.l).WithField("relay", relay.String()).WithError(err).Info("Failed to add relay to hostmap")
|
||||
hostinfo.logger(hm.l).WithField("relay", relay.String()).WithError(err).Info("Failed to add relay to hostmap")
|
||||
}
|
||||
|
||||
m := NebulaControl{
|
||||
Type: NebulaControl_CreateRelayRequest,
|
||||
InitiatorRelayIndex: idx,
|
||||
RelayFromIp: uint32(c.lightHouse.myVpnIp),
|
||||
RelayFromIp: uint32(hm.lightHouse.myVpnIp),
|
||||
RelayToIp: uint32(vpnIp),
|
||||
}
|
||||
msg, err := m.Marshal()
|
||||
if err != nil {
|
||||
hostinfo.logger(c.l).
|
||||
hostinfo.logger(hm.l).
|
||||
WithError(err).
|
||||
Error("Failed to marshal Control message to create relay")
|
||||
} else {
|
||||
f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||
c.l.WithFields(logrus.Fields{
|
||||
"relayFrom": c.lightHouse.myVpnIp,
|
||||
hm.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||
hm.l.WithFields(logrus.Fields{
|
||||
"relayFrom": hm.lightHouse.myVpnIp,
|
||||
"relayTo": vpnIp,
|
||||
"initiatorRelayIndex": idx,
|
||||
"relay": *relay}).
|
||||
@@ -280,23 +347,82 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
|
||||
}
|
||||
}
|
||||
|
||||
// Increment the counter to increase our delay, linear backoff
|
||||
hostinfo.HandshakeCounter++
|
||||
|
||||
// If a lighthouse triggered this attempt then we are still in the timer wheel and do not need to re-add
|
||||
if !lighthouseTriggered {
|
||||
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter))
|
||||
hm.OutboundHandshakeTimer.Add(vpnIp, hm.config.tryInterval*time.Duration(hh.counter))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) AddVpnIp(vpnIp iputil.VpnIp, init func(*HostInfo)) *HostInfo {
|
||||
hostinfo, created := c.pendingHostMap.AddVpnIp(vpnIp, init)
|
||||
// GetOrHandshake will try to find a hostinfo with a fully formed tunnel or start a new handshake if one is not present
|
||||
// The 2nd argument will be true if the hostinfo is ready to transmit traffic
|
||||
func (hm *HandshakeManager) GetOrHandshake(vpnIp iputil.VpnIp, cacheCb func(*HandshakeHostInfo)) (*HostInfo, bool) {
|
||||
hm.mainHostMap.RLock()
|
||||
h, ok := hm.mainHostMap.Hosts[vpnIp]
|
||||
hm.mainHostMap.RUnlock()
|
||||
|
||||
if created {
|
||||
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval)
|
||||
c.metricInitiated.Inc(1)
|
||||
if ok {
|
||||
// Do not attempt promotion if you are a lighthouse
|
||||
if !hm.lightHouse.amLighthouse {
|
||||
h.TryPromoteBest(hm.mainHostMap.GetPreferredRanges(), hm.f)
|
||||
}
|
||||
return h, true
|
||||
}
|
||||
|
||||
return hm.StartHandshake(vpnIp, cacheCb), false
|
||||
}
|
||||
|
||||
// StartHandshake will ensure a handshake is currently being attempted for the provided vpn ip
|
||||
func (hm *HandshakeManager) StartHandshake(vpnIp iputil.VpnIp, cacheCb func(*HandshakeHostInfo)) *HostInfo {
|
||||
hm.Lock()
|
||||
|
||||
if hh, ok := hm.vpnIps[vpnIp]; ok {
|
||||
// We are already trying to handshake with this vpn ip
|
||||
if cacheCb != nil {
|
||||
cacheCb(hh)
|
||||
}
|
||||
hm.Unlock()
|
||||
return hh.hostinfo
|
||||
}
|
||||
|
||||
hostinfo := &HostInfo{
|
||||
vpnIp: vpnIp,
|
||||
HandshakePacket: make(map[uint8][]byte, 0),
|
||||
relayState: RelayState{
|
||||
relays: map[iputil.VpnIp]struct{}{},
|
||||
relayForByIp: map[iputil.VpnIp]*Relay{},
|
||||
relayForByIdx: map[uint32]*Relay{},
|
||||
},
|
||||
}
|
||||
|
||||
hh := &HandshakeHostInfo{
|
||||
hostinfo: hostinfo,
|
||||
startTime: time.Now(),
|
||||
}
|
||||
hm.vpnIps[vpnIp] = hh
|
||||
hm.metricInitiated.Inc(1)
|
||||
hm.OutboundHandshakeTimer.Add(vpnIp, hm.config.tryInterval)
|
||||
|
||||
if cacheCb != nil {
|
||||
cacheCb(hh)
|
||||
}
|
||||
|
||||
// If this is a static host, we don't need to wait for the HostQueryReply
|
||||
// We can trigger the handshake right now
|
||||
_, doTrigger := hm.lightHouse.GetStaticHostList()[vpnIp]
|
||||
if !doTrigger {
|
||||
// Add any calculated remotes, and trigger early handshake if one found
|
||||
doTrigger = hm.lightHouse.addCalculatedRemotes(vpnIp)
|
||||
}
|
||||
|
||||
if doTrigger {
|
||||
select {
|
||||
case hm.trigger <- vpnIp:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
hm.Unlock()
|
||||
hm.lightHouse.QueryServer(vpnIp)
|
||||
return hostinfo
|
||||
}
|
||||
|
||||
@@ -318,10 +444,10 @@ var (
|
||||
// ErrLocalIndexCollision if we already have an entry in the main or pending
|
||||
// hostmap for the hostinfo.localIndexId.
|
||||
func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket uint8, f *Interface) (*HostInfo, error) {
|
||||
c.pendingHostMap.Lock()
|
||||
defer c.pendingHostMap.Unlock()
|
||||
c.mainHostMap.Lock()
|
||||
defer c.mainHostMap.Unlock()
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
// Check if we already have a tunnel with this vpn ip
|
||||
existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp]
|
||||
@@ -350,8 +476,8 @@ func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket
|
||||
return existingIndex, ErrLocalIndexCollision
|
||||
}
|
||||
|
||||
existingIndex, found = c.pendingHostMap.Indexes[hostinfo.localIndexId]
|
||||
if found && existingIndex != hostinfo {
|
||||
existingPendingIndex, found := c.indexes[hostinfo.localIndexId]
|
||||
if found && existingPendingIndex.hostinfo != hostinfo {
|
||||
// We have a collision, but for a different hostinfo
|
||||
return existingIndex, ErrLocalIndexCollision
|
||||
}
|
||||
@@ -372,47 +498,47 @@ func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket
|
||||
// Complete is a simpler version of CheckAndComplete when we already know we
|
||||
// won't have a localIndexId collision because we already have an entry in the
|
||||
// pendingHostMap. An existing hostinfo is returned if there was one.
|
||||
func (c *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) {
|
||||
c.pendingHostMap.Lock()
|
||||
defer c.pendingHostMap.Unlock()
|
||||
c.mainHostMap.Lock()
|
||||
defer c.mainHostMap.Unlock()
|
||||
func (hm *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) {
|
||||
hm.mainHostMap.Lock()
|
||||
defer hm.mainHostMap.Unlock()
|
||||
hm.Lock()
|
||||
defer hm.Unlock()
|
||||
|
||||
existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
|
||||
existingRemoteIndex, found := hm.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
|
||||
if found && existingRemoteIndex != nil {
|
||||
// We have a collision, but this can happen since we can't control
|
||||
// the remote ID. Just log about the situation as a note.
|
||||
hostinfo.logger(c.l).
|
||||
hostinfo.logger(hm.l).
|
||||
WithField("remoteIndex", hostinfo.remoteIndexId).WithField("collision", existingRemoteIndex.vpnIp).
|
||||
Info("New host shadows existing host remoteIndex")
|
||||
}
|
||||
|
||||
// We need to remove from the pending hostmap first to avoid undoing work when after to the main hostmap.
|
||||
c.pendingHostMap.unlockedDeleteHostInfo(hostinfo)
|
||||
c.mainHostMap.unlockedAddHostInfo(hostinfo, f)
|
||||
hm.unlockedDeleteHostInfo(hostinfo)
|
||||
hm.mainHostMap.unlockedAddHostInfo(hostinfo, f)
|
||||
}
|
||||
|
||||
// AddIndexHostInfo generates a unique localIndexId for this HostInfo
|
||||
// allocateIndex generates a unique localIndexId for this HostInfo
|
||||
// and adds it to the pendingHostMap. Will error if we are unable to generate
|
||||
// a unique localIndexId
|
||||
func (c *HandshakeManager) AddIndexHostInfo(h *HostInfo) error {
|
||||
c.pendingHostMap.Lock()
|
||||
defer c.pendingHostMap.Unlock()
|
||||
c.mainHostMap.RLock()
|
||||
defer c.mainHostMap.RUnlock()
|
||||
func (hm *HandshakeManager) allocateIndex(hh *HandshakeHostInfo) error {
|
||||
hm.mainHostMap.RLock()
|
||||
defer hm.mainHostMap.RUnlock()
|
||||
hm.Lock()
|
||||
defer hm.Unlock()
|
||||
|
||||
for i := 0; i < 32; i++ {
|
||||
index, err := generateIndex(c.l)
|
||||
index, err := generateIndex(hm.l)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, inPending := c.pendingHostMap.Indexes[index]
|
||||
_, inMain := c.mainHostMap.Indexes[index]
|
||||
_, inPending := hm.indexes[index]
|
||||
_, inMain := hm.mainHostMap.Indexes[index]
|
||||
|
||||
if !inMain && !inPending {
|
||||
h.localIndexId = index
|
||||
c.pendingHostMap.Indexes[index] = h
|
||||
hh.hostinfo.localIndexId = index
|
||||
hm.indexes[index] = hh
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -420,22 +546,90 @@ func (c *HandshakeManager) AddIndexHostInfo(h *HostInfo) error {
|
||||
return errors.New("failed to generate unique localIndexId")
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) addRemoteIndexHostInfo(index uint32, h *HostInfo) {
|
||||
c.pendingHostMap.addRemoteIndexHostInfo(index, h)
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) DeleteHostInfo(hostinfo *HostInfo) {
|
||||
//l.Debugln("Deleting pending hostinfo :", hostinfo)
|
||||
c.pendingHostMap.DeleteHostInfo(hostinfo)
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.unlockedDeleteHostInfo(hostinfo)
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) QueryIndex(index uint32) (*HostInfo, error) {
|
||||
return c.pendingHostMap.QueryIndex(index)
|
||||
func (c *HandshakeManager) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
||||
delete(c.vpnIps, hostinfo.vpnIp)
|
||||
if len(c.vpnIps) == 0 {
|
||||
c.vpnIps = map[iputil.VpnIp]*HandshakeHostInfo{}
|
||||
}
|
||||
|
||||
delete(c.indexes, hostinfo.localIndexId)
|
||||
if len(c.vpnIps) == 0 {
|
||||
c.indexes = map[uint32]*HandshakeHostInfo{}
|
||||
}
|
||||
|
||||
if c.l.Level >= logrus.DebugLevel {
|
||||
c.l.WithField("hostMap", m{"mapTotalSize": len(c.vpnIps),
|
||||
"vpnIp": hostinfo.vpnIp, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}).
|
||||
Debug("Pending hostmap hostInfo deleted")
|
||||
}
|
||||
}
|
||||
|
||||
func (hm *HandshakeManager) QueryVpnIp(vpnIp iputil.VpnIp) *HostInfo {
|
||||
hh := hm.queryVpnIp(vpnIp)
|
||||
if hh != nil {
|
||||
return hh.hostinfo
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (hm *HandshakeManager) queryVpnIp(vpnIp iputil.VpnIp) *HandshakeHostInfo {
|
||||
hm.RLock()
|
||||
defer hm.RUnlock()
|
||||
return hm.vpnIps[vpnIp]
|
||||
}
|
||||
|
||||
func (hm *HandshakeManager) QueryIndex(index uint32) *HostInfo {
|
||||
hh := hm.queryIndex(index)
|
||||
if hh != nil {
|
||||
return hh.hostinfo
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hm *HandshakeManager) queryIndex(index uint32) *HandshakeHostInfo {
|
||||
hm.RLock()
|
||||
defer hm.RUnlock()
|
||||
return hm.indexes[index]
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) GetPreferredRanges() []*net.IPNet {
|
||||
return c.mainHostMap.GetPreferredRanges()
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) ForEachVpnIp(f controlEach) {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
for _, v := range c.vpnIps {
|
||||
f(v.hostinfo)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) ForEachIndex(f controlEach) {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
for _, v := range c.indexes {
|
||||
f(v.hostinfo)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) EmitStats() {
|
||||
c.pendingHostMap.EmitStats("pending")
|
||||
c.mainHostMap.EmitStats("main")
|
||||
c.RLock()
|
||||
hostLen := len(c.vpnIps)
|
||||
indexLen := len(c.indexes)
|
||||
c.RUnlock()
|
||||
|
||||
metrics.GetOrRegisterGauge("hostmap.pending.hosts", nil).Update(int64(hostLen))
|
||||
metrics.GetOrRegisterGauge("hostmap.pending.indexes", nil).Update(int64(indexLen))
|
||||
c.mainHostMap.EmitStats()
|
||||
}
|
||||
|
||||
// Utility functions below
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/test"
|
||||
@@ -14,56 +15,55 @@ import (
|
||||
|
||||
func Test_NewHandshakeManagerVpnIp(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
||||
ip := iputil.Ip2VpnIp(net.ParseIP("172.1.1.2"))
|
||||
preferredRanges := []*net.IPNet{localrange}
|
||||
mw := &mockEncWriter{}
|
||||
mainHM := NewHostMap(l, "test", vpncidr, preferredRanges)
|
||||
mainHM := newHostMap(l, vpncidr)
|
||||
mainHM.preferredRanges.Store(&preferredRanges)
|
||||
|
||||
lh := newTestLighthouse()
|
||||
|
||||
blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig)
|
||||
|
||||
now := time.Now()
|
||||
blah.NextOutboundHandshakeTimerTick(now, mw)
|
||||
|
||||
var initCalled bool
|
||||
initFunc := func(*HostInfo) {
|
||||
initCalled = true
|
||||
cs := &CertState{
|
||||
RawCertificate: []byte{},
|
||||
PrivateKey: []byte{},
|
||||
Certificate: &cert.NebulaCertificate{},
|
||||
RawCertificateNoKey: []byte{},
|
||||
}
|
||||
|
||||
i := blah.AddVpnIp(ip, initFunc)
|
||||
assert.True(t, initCalled)
|
||||
blah := NewHandshakeManager(l, mainHM, lh, &udp.NoopConn{}, defaultHandshakeConfig)
|
||||
blah.f = &Interface{handshakeManager: blah, pki: &PKI{}, l: l}
|
||||
blah.f.pki.cs.Store(cs)
|
||||
|
||||
initCalled = false
|
||||
i2 := blah.AddVpnIp(ip, initFunc)
|
||||
assert.False(t, initCalled)
|
||||
now := time.Now()
|
||||
blah.NextOutboundHandshakeTimerTick(now)
|
||||
|
||||
i := blah.StartHandshake(ip, nil)
|
||||
i2 := blah.StartHandshake(ip, nil)
|
||||
assert.Same(t, i, i2)
|
||||
|
||||
i.remotes = NewRemoteList(nil)
|
||||
i.HandshakeReady = true
|
||||
|
||||
// Adding something to pending should not affect the main hostmap
|
||||
assert.Len(t, mainHM.Hosts, 0)
|
||||
|
||||
// Confirm they are in the pending index list
|
||||
assert.Contains(t, blah.pendingHostMap.Hosts, ip)
|
||||
assert.Contains(t, blah.vpnIps, ip)
|
||||
|
||||
// Jump ahead `HandshakeRetries` ticks, offset by one to get the sleep logic right
|
||||
for i := 1; i <= DefaultHandshakeRetries+1; i++ {
|
||||
now = now.Add(time.Duration(i) * DefaultHandshakeTryInterval)
|
||||
blah.NextOutboundHandshakeTimerTick(now, mw)
|
||||
blah.NextOutboundHandshakeTimerTick(now)
|
||||
}
|
||||
|
||||
// Confirm they are still in the pending index list
|
||||
assert.Contains(t, blah.pendingHostMap.Hosts, ip)
|
||||
assert.Contains(t, blah.vpnIps, ip)
|
||||
|
||||
// Tick 1 more time, a minute will certainly flush it out
|
||||
blah.NextOutboundHandshakeTimerTick(now.Add(time.Minute), mw)
|
||||
blah.NextOutboundHandshakeTimerTick(now.Add(time.Minute))
|
||||
|
||||
// Confirm they have been removed
|
||||
assert.NotContains(t, blah.pendingHostMap.Hosts, ip)
|
||||
assert.NotContains(t, blah.vpnIps, ip)
|
||||
}
|
||||
|
||||
func testCountTimerWheelEntries(tw *LockingTimerWheel[iputil.VpnIp]) (c int) {
|
||||
|
||||
360
hostmap.go
360
hostmap.go
@@ -2,7 +2,6 @@ package nebula
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
@@ -12,15 +11,18 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/cidr"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
)
|
||||
|
||||
// const ProbeLen = 100
|
||||
const PromoteEvery = 1000
|
||||
const ReQueryEvery = 5000
|
||||
const defaultPromoteEvery = 1000 // Count of packets sent before we try moving a tunnel to a preferred underlay ip address
|
||||
const defaultReQueryEvery = 5000 // Count of packets sent before re-querying a hostinfo to the lighthouse
|
||||
const defaultReQueryWait = time.Minute // Minimum amount of seconds to wait before re-querying a hostinfo the lighthouse. Evaluated every ReQueryEvery
|
||||
const MaxRemotes = 10
|
||||
const maxRecvError = 4
|
||||
|
||||
// MaxHostInfosPerVpnIp is the max number of hostinfos we will track for a given vpn ip
|
||||
// 5 allows for an initial handshake and each host pair re-handshaking twice
|
||||
@@ -52,14 +54,12 @@ type Relay struct {
|
||||
|
||||
type HostMap struct {
|
||||
sync.RWMutex //Because we concurrently read and write to our maps
|
||||
name string
|
||||
Indexes map[uint32]*HostInfo
|
||||
Relays map[uint32]*HostInfo // Maps a Relay IDX to a Relay HostInfo object
|
||||
RemoteIndexes map[uint32]*HostInfo
|
||||
Hosts map[iputil.VpnIp]*HostInfo
|
||||
preferredRanges []*net.IPNet
|
||||
preferredRanges atomic.Pointer[[]*net.IPNet]
|
||||
vpnCIDR *net.IPNet
|
||||
metricsEnabled bool
|
||||
l *logrus.Logger
|
||||
}
|
||||
|
||||
@@ -197,25 +197,24 @@ func (rs *RelayState) InsertRelay(ip iputil.VpnIp, idx uint32, r *Relay) {
|
||||
}
|
||||
|
||||
type HostInfo struct {
|
||||
sync.RWMutex
|
||||
remote *udp.Addr
|
||||
remotes *RemoteList
|
||||
promoteCounter atomic.Uint32
|
||||
ConnectionState *ConnectionState
|
||||
remoteIndexId uint32
|
||||
localIndexId uint32
|
||||
vpnIp iputil.VpnIp
|
||||
recvError atomic.Uint32
|
||||
remoteCidr *cidr.Tree4[struct{}]
|
||||
relayState RelayState
|
||||
|
||||
remote *udp.Addr
|
||||
remotes *RemoteList
|
||||
promoteCounter atomic.Uint32
|
||||
ConnectionState *ConnectionState
|
||||
handshakeStart time.Time //todo: this an entry in the handshake manager
|
||||
HandshakeReady bool //todo: being in the manager means you are ready
|
||||
HandshakeCounter int //todo: another handshake manager entry
|
||||
HandshakeLastRemotes []*udp.Addr //todo: another handshake manager entry, which remotes we sent to last time
|
||||
HandshakeComplete bool //todo: this should go away in favor of ConnectionState.ready
|
||||
HandshakePacket map[uint8][]byte //todo: this is other handshake manager entry
|
||||
packetStore []*cachedPacket //todo: this is other handshake manager entry
|
||||
remoteIndexId uint32
|
||||
localIndexId uint32
|
||||
vpnIp iputil.VpnIp
|
||||
recvError int
|
||||
remoteCidr *cidr.Tree4
|
||||
relayState RelayState
|
||||
// HandshakePacket records the packets used to create this hostinfo
|
||||
// We need these to avoid replayed handshake packets creating new hostinfos which causes churn
|
||||
HandshakePacket map[uint8][]byte
|
||||
|
||||
// nextLHQuery is the earliest we can ask the lighthouse for new information.
|
||||
// This is used to limit lighthouse re-queries in chatty clients
|
||||
nextLHQuery atomic.Int64
|
||||
|
||||
// lastRebindCount is the other side of Interface.rebindCount, if these values don't match then we need to ask LH
|
||||
// for a punch from the remote end of this tunnel. The goal being to prime their conntrack for our traffic just like
|
||||
@@ -255,26 +254,57 @@ type cachedPacketMetrics struct {
|
||||
dropped metrics.Counter
|
||||
}
|
||||
|
||||
func NewHostMap(l *logrus.Logger, name string, vpnCIDR *net.IPNet, preferredRanges []*net.IPNet) *HostMap {
|
||||
h := map[iputil.VpnIp]*HostInfo{}
|
||||
i := map[uint32]*HostInfo{}
|
||||
r := map[uint32]*HostInfo{}
|
||||
relays := map[uint32]*HostInfo{}
|
||||
m := HostMap{
|
||||
name: name,
|
||||
Indexes: i,
|
||||
Relays: relays,
|
||||
RemoteIndexes: r,
|
||||
Hosts: h,
|
||||
preferredRanges: preferredRanges,
|
||||
vpnCIDR: vpnCIDR,
|
||||
l: l,
|
||||
}
|
||||
return &m
|
||||
func NewHostMapFromConfig(l *logrus.Logger, vpnCIDR *net.IPNet, c *config.C) *HostMap {
|
||||
hm := newHostMap(l, vpnCIDR)
|
||||
|
||||
hm.reload(c, true)
|
||||
c.RegisterReloadCallback(func(c *config.C) {
|
||||
hm.reload(c, false)
|
||||
})
|
||||
|
||||
l.WithField("network", hm.vpnCIDR.String()).
|
||||
WithField("preferredRanges", hm.GetPreferredRanges()).
|
||||
Info("Main HostMap created")
|
||||
|
||||
return hm
|
||||
}
|
||||
|
||||
// UpdateStats takes a name and reports host and index counts to the stats collection system
|
||||
func (hm *HostMap) EmitStats(name string) {
|
||||
func newHostMap(l *logrus.Logger, vpnCIDR *net.IPNet) *HostMap {
|
||||
return &HostMap{
|
||||
Indexes: map[uint32]*HostInfo{},
|
||||
Relays: map[uint32]*HostInfo{},
|
||||
RemoteIndexes: map[uint32]*HostInfo{},
|
||||
Hosts: map[iputil.VpnIp]*HostInfo{},
|
||||
vpnCIDR: vpnCIDR,
|
||||
l: l,
|
||||
}
|
||||
}
|
||||
|
||||
func (hm *HostMap) reload(c *config.C, initial bool) {
|
||||
if initial || c.HasChanged("preferred_ranges") {
|
||||
var preferredRanges []*net.IPNet
|
||||
rawPreferredRanges := c.GetStringSlice("preferred_ranges", []string{})
|
||||
|
||||
for _, rawPreferredRange := range rawPreferredRanges {
|
||||
_, preferredRange, err := net.ParseCIDR(rawPreferredRange)
|
||||
|
||||
if err != nil {
|
||||
hm.l.WithError(err).WithField("range", rawPreferredRanges).Warn("Failed to parse preferred ranges, ignoring")
|
||||
continue
|
||||
}
|
||||
|
||||
preferredRanges = append(preferredRanges, preferredRange)
|
||||
}
|
||||
|
||||
oldRanges := hm.preferredRanges.Swap(&preferredRanges)
|
||||
if !initial {
|
||||
hm.l.WithField("oldPreferredRanges", *oldRanges).WithField("newPreferredRanges", preferredRanges).Info("preferred_ranges changed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// EmitStats reports host, index, and relay counts to the stats collection system
|
||||
func (hm *HostMap) EmitStats() {
|
||||
hm.RLock()
|
||||
hostLen := len(hm.Hosts)
|
||||
indexLen := len(hm.Indexes)
|
||||
@@ -282,10 +312,10 @@ func (hm *HostMap) EmitStats(name string) {
|
||||
relaysLen := len(hm.Relays)
|
||||
hm.RUnlock()
|
||||
|
||||
metrics.GetOrRegisterGauge("hostmap."+name+".hosts", nil).Update(int64(hostLen))
|
||||
metrics.GetOrRegisterGauge("hostmap."+name+".indexes", nil).Update(int64(indexLen))
|
||||
metrics.GetOrRegisterGauge("hostmap."+name+".remoteIndexes", nil).Update(int64(remoteIndexLen))
|
||||
metrics.GetOrRegisterGauge("hostmap."+name+".relayIndexes", nil).Update(int64(relaysLen))
|
||||
metrics.GetOrRegisterGauge("hostmap.main.hosts", nil).Update(int64(hostLen))
|
||||
metrics.GetOrRegisterGauge("hostmap.main.indexes", nil).Update(int64(indexLen))
|
||||
metrics.GetOrRegisterGauge("hostmap.main.remoteIndexes", nil).Update(int64(remoteIndexLen))
|
||||
metrics.GetOrRegisterGauge("hostmap.main.relayIndexes", nil).Update(int64(relaysLen))
|
||||
}
|
||||
|
||||
func (hm *HostMap) RemoveRelay(localIdx uint32) {
|
||||
@@ -299,88 +329,6 @@ func (hm *HostMap) RemoveRelay(localIdx uint32) {
|
||||
hm.Unlock()
|
||||
}
|
||||
|
||||
func (hm *HostMap) GetIndexByVpnIp(vpnIp iputil.VpnIp) (uint32, error) {
|
||||
hm.RLock()
|
||||
if i, ok := hm.Hosts[vpnIp]; ok {
|
||||
index := i.localIndexId
|
||||
hm.RUnlock()
|
||||
return index, nil
|
||||
}
|
||||
hm.RUnlock()
|
||||
return 0, errors.New("vpn IP not found")
|
||||
}
|
||||
|
||||
func (hm *HostMap) Add(ip iputil.VpnIp, hostinfo *HostInfo) {
|
||||
hm.Lock()
|
||||
hm.Hosts[ip] = hostinfo
|
||||
hm.Unlock()
|
||||
}
|
||||
|
||||
func (hm *HostMap) AddVpnIp(vpnIp iputil.VpnIp, init func(hostinfo *HostInfo)) (hostinfo *HostInfo, created bool) {
|
||||
hm.RLock()
|
||||
if h, ok := hm.Hosts[vpnIp]; !ok {
|
||||
hm.RUnlock()
|
||||
h = &HostInfo{
|
||||
vpnIp: vpnIp,
|
||||
HandshakePacket: make(map[uint8][]byte, 0),
|
||||
relayState: RelayState{
|
||||
relays: map[iputil.VpnIp]struct{}{},
|
||||
relayForByIp: map[iputil.VpnIp]*Relay{},
|
||||
relayForByIdx: map[uint32]*Relay{},
|
||||
},
|
||||
}
|
||||
if init != nil {
|
||||
init(h)
|
||||
}
|
||||
hm.Lock()
|
||||
hm.Hosts[vpnIp] = h
|
||||
hm.Unlock()
|
||||
return h, true
|
||||
} else {
|
||||
hm.RUnlock()
|
||||
return h, false
|
||||
}
|
||||
}
|
||||
|
||||
// Only used by pendingHostMap when the remote index is not initially known
|
||||
func (hm *HostMap) addRemoteIndexHostInfo(index uint32, h *HostInfo) {
|
||||
hm.Lock()
|
||||
h.remoteIndexId = index
|
||||
hm.RemoteIndexes[index] = h
|
||||
hm.Unlock()
|
||||
|
||||
if hm.l.Level > logrus.DebugLevel {
|
||||
hm.l.WithField("hostMap", m{"mapName": hm.name, "indexNumber": index, "mapTotalSize": len(hm.Indexes),
|
||||
"hostinfo": m{"existing": true, "localIndexId": h.localIndexId, "hostId": h.vpnIp}}).
|
||||
Debug("Hostmap remoteIndex added")
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteReverseIndex is used to clean up on recv_error
|
||||
// This function should only ever be called on the pending hostmap
|
||||
func (hm *HostMap) DeleteReverseIndex(index uint32) {
|
||||
hm.Lock()
|
||||
hostinfo, ok := hm.RemoteIndexes[index]
|
||||
if ok {
|
||||
delete(hm.Indexes, hostinfo.localIndexId)
|
||||
delete(hm.RemoteIndexes, index)
|
||||
|
||||
// Check if we have an entry under hostId that matches the same hostinfo
|
||||
// instance. Clean it up as well if we do (they might not match in pendingHostmap)
|
||||
var hostinfo2 *HostInfo
|
||||
hostinfo2, ok = hm.Hosts[hostinfo.vpnIp]
|
||||
if ok && hostinfo2 == hostinfo {
|
||||
delete(hm.Hosts, hostinfo.vpnIp)
|
||||
}
|
||||
}
|
||||
hm.Unlock()
|
||||
|
||||
if hm.l.Level >= logrus.DebugLevel {
|
||||
hm.l.WithField("hostMap", m{"mapName": hm.name, "indexNumber": index, "mapTotalSize": len(hm.Indexes)}).
|
||||
Debug("Hostmap remote index deleted")
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteHostInfo will fully unlink the hostinfo and return true if it was the final hostinfo for this vpn ip
|
||||
func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) bool {
|
||||
// Delete the host itself, ensuring it's not modified anymore
|
||||
@@ -393,12 +341,6 @@ func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) bool {
|
||||
return final
|
||||
}
|
||||
|
||||
func (hm *HostMap) DeleteRelayIdx(localIdx uint32) {
|
||||
hm.Lock()
|
||||
defer hm.Unlock()
|
||||
delete(hm.RemoteIndexes, localIdx)
|
||||
}
|
||||
|
||||
func (hm *HostMap) MakePrimary(hostinfo *HostInfo) {
|
||||
hm.Lock()
|
||||
defer hm.Unlock()
|
||||
@@ -476,7 +418,7 @@ func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
||||
}
|
||||
|
||||
if hm.l.Level >= logrus.DebugLevel {
|
||||
hm.l.WithField("hostMap", m{"mapName": hm.name, "mapTotalSize": len(hm.Hosts),
|
||||
hm.l.WithField("hostMap", m{"mapTotalSize": len(hm.Hosts),
|
||||
"vpnIp": hostinfo.vpnIp, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}).
|
||||
Debug("Hostmap hostInfo deleted")
|
||||
}
|
||||
@@ -486,55 +428,40 @@ func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
func (hm *HostMap) QueryIndex(index uint32) (*HostInfo, error) {
|
||||
//TODO: we probably just want to return bool instead of error, or at least a static error
|
||||
func (hm *HostMap) QueryIndex(index uint32) *HostInfo {
|
||||
hm.RLock()
|
||||
if h, ok := hm.Indexes[index]; ok {
|
||||
hm.RUnlock()
|
||||
return h, nil
|
||||
return h
|
||||
} else {
|
||||
hm.RUnlock()
|
||||
return nil, errors.New("unable to find index")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieves a HostInfo by Index. Returns whether the HostInfo is primary at time of query.
|
||||
// This helper exists so that the hostinfo.prev pointer can be read while the hostmap lock is held.
|
||||
func (hm *HostMap) QueryIndexIsPrimary(index uint32) (*HostInfo, bool, error) {
|
||||
//TODO: we probably just want to return bool instead of error, or at least a static error
|
||||
hm.RLock()
|
||||
if h, ok := hm.Indexes[index]; ok {
|
||||
hm.RUnlock()
|
||||
return h, h.prev == nil, nil
|
||||
} else {
|
||||
hm.RUnlock()
|
||||
return nil, false, errors.New("unable to find index")
|
||||
}
|
||||
}
|
||||
func (hm *HostMap) QueryRelayIndex(index uint32) (*HostInfo, error) {
|
||||
//TODO: we probably just want to return bool instead of error, or at least a static error
|
||||
func (hm *HostMap) QueryRelayIndex(index uint32) *HostInfo {
|
||||
hm.RLock()
|
||||
if h, ok := hm.Relays[index]; ok {
|
||||
hm.RUnlock()
|
||||
return h, nil
|
||||
return h
|
||||
} else {
|
||||
hm.RUnlock()
|
||||
return nil, errors.New("unable to find index")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (hm *HostMap) QueryReverseIndex(index uint32) (*HostInfo, error) {
|
||||
func (hm *HostMap) QueryReverseIndex(index uint32) *HostInfo {
|
||||
hm.RLock()
|
||||
if h, ok := hm.RemoteIndexes[index]; ok {
|
||||
hm.RUnlock()
|
||||
return h, nil
|
||||
return h
|
||||
} else {
|
||||
hm.RUnlock()
|
||||
return nil, fmt.Errorf("unable to find reverse index or connectionstate nil in %s hostmap", hm.name)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (hm *HostMap) QueryVpnIp(vpnIp iputil.VpnIp) (*HostInfo, error) {
|
||||
func (hm *HostMap) QueryVpnIp(vpnIp iputil.VpnIp) *HostInfo {
|
||||
return hm.queryVpnIp(vpnIp, nil)
|
||||
}
|
||||
|
||||
@@ -556,26 +483,20 @@ func (hm *HostMap) QueryVpnIpRelayFor(targetIp, relayHostIp iputil.VpnIp) (*Host
|
||||
return nil, nil, errors.New("unable to find host with relay")
|
||||
}
|
||||
|
||||
// PromoteBestQueryVpnIp will attempt to lazily switch to the best remote every
|
||||
// `PromoteEvery` calls to this function for a given host.
|
||||
func (hm *HostMap) PromoteBestQueryVpnIp(vpnIp iputil.VpnIp, ifce *Interface) (*HostInfo, error) {
|
||||
return hm.queryVpnIp(vpnIp, ifce)
|
||||
}
|
||||
|
||||
func (hm *HostMap) queryVpnIp(vpnIp iputil.VpnIp, promoteIfce *Interface) (*HostInfo, error) {
|
||||
func (hm *HostMap) queryVpnIp(vpnIp iputil.VpnIp, promoteIfce *Interface) *HostInfo {
|
||||
hm.RLock()
|
||||
if h, ok := hm.Hosts[vpnIp]; ok {
|
||||
hm.RUnlock()
|
||||
// Do not attempt promotion if you are a lighthouse
|
||||
if promoteIfce != nil && !promoteIfce.lightHouse.amLighthouse {
|
||||
h.TryPromoteBest(hm.preferredRanges, promoteIfce)
|
||||
h.TryPromoteBest(hm.GetPreferredRanges(), promoteIfce)
|
||||
}
|
||||
return h, nil
|
||||
return h
|
||||
|
||||
}
|
||||
|
||||
hm.RUnlock()
|
||||
return nil, errors.New("unable to find host")
|
||||
return nil
|
||||
}
|
||||
|
||||
// unlockedAddHostInfo assumes you have a write-lock and will add a hostinfo object to the hostmap Indexes and RemoteIndexes maps.
|
||||
@@ -598,7 +519,7 @@ func (hm *HostMap) unlockedAddHostInfo(hostinfo *HostInfo, f *Interface) {
|
||||
hm.RemoteIndexes[hostinfo.remoteIndexId] = hostinfo
|
||||
|
||||
if hm.l.Level >= logrus.DebugLevel {
|
||||
hm.l.WithField("hostMap", m{"mapName": hm.name, "vpnIp": hostinfo.vpnIp, "mapTotalSize": len(hm.Hosts),
|
||||
hm.l.WithField("hostMap", m{"vpnIp": hostinfo.vpnIp, "mapTotalSize": len(hm.Hosts),
|
||||
"hostinfo": m{"existing": true, "localIndexId": hostinfo.localIndexId, "hostId": hostinfo.vpnIp}}).
|
||||
Debug("Hostmap vpnIp added")
|
||||
}
|
||||
@@ -614,15 +535,35 @@ func (hm *HostMap) unlockedAddHostInfo(hostinfo *HostInfo, f *Interface) {
|
||||
}
|
||||
}
|
||||
|
||||
func (hm *HostMap) GetPreferredRanges() []*net.IPNet {
|
||||
//NOTE: if preferredRanges is ever not stored before a load this will fail to dereference a nil pointer
|
||||
return *hm.preferredRanges.Load()
|
||||
}
|
||||
|
||||
func (hm *HostMap) ForEachVpnIp(f controlEach) {
|
||||
hm.RLock()
|
||||
defer hm.RUnlock()
|
||||
|
||||
for _, v := range hm.Hosts {
|
||||
f(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (hm *HostMap) ForEachIndex(f controlEach) {
|
||||
hm.RLock()
|
||||
defer hm.RUnlock()
|
||||
|
||||
for _, v := range hm.Indexes {
|
||||
f(v)
|
||||
}
|
||||
}
|
||||
|
||||
// TryPromoteBest handles re-querying lighthouses and probing for better paths
|
||||
// NOTE: It is an error to call this if you are a lighthouse since they should not roam clients!
|
||||
func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface) {
|
||||
c := i.promoteCounter.Add(1)
|
||||
if c%PromoteEvery == 0 {
|
||||
// The lock here is currently protecting i.remote access
|
||||
i.RLock()
|
||||
if c%ifce.tryPromoteEvery.Load() == 0 {
|
||||
remote := i.remote
|
||||
i.RUnlock()
|
||||
|
||||
// return early if we are already on a preferred remote
|
||||
if remote != nil {
|
||||
@@ -646,65 +587,17 @@ func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface)
|
||||
}
|
||||
|
||||
// Re query our lighthouses for new remotes occasionally
|
||||
if c%ReQueryEvery == 0 && ifce.lightHouse != nil {
|
||||
ifce.lightHouse.QueryServer(i.vpnIp, ifce)
|
||||
}
|
||||
}
|
||||
|
||||
func (i *HostInfo) cachePacket(l *logrus.Logger, t header.MessageType, st header.MessageSubType, packet []byte, f packetCallback, m *cachedPacketMetrics) {
|
||||
//TODO: return the error so we can log with more context
|
||||
if len(i.packetStore) < 100 {
|
||||
tempPacket := make([]byte, len(packet))
|
||||
copy(tempPacket, packet)
|
||||
//l.WithField("trace", string(debug.Stack())).Error("Caching packet", tempPacket)
|
||||
i.packetStore = append(i.packetStore, &cachedPacket{t, st, f, tempPacket})
|
||||
if l.Level >= logrus.DebugLevel {
|
||||
i.logger(l).
|
||||
WithField("length", len(i.packetStore)).
|
||||
WithField("stored", true).
|
||||
Debugf("Packet store")
|
||||
if c%ifce.reQueryEvery.Load() == 0 && ifce.lightHouse != nil {
|
||||
now := time.Now().UnixNano()
|
||||
if now < i.nextLHQuery.Load() {
|
||||
return
|
||||
}
|
||||
|
||||
} else if l.Level >= logrus.DebugLevel {
|
||||
m.dropped.Inc(1)
|
||||
i.logger(l).
|
||||
WithField("length", len(i.packetStore)).
|
||||
WithField("stored", false).
|
||||
Debugf("Packet store")
|
||||
i.nextLHQuery.Store(now + ifce.reQueryWait.Load())
|
||||
ifce.lightHouse.QueryServer(i.vpnIp)
|
||||
}
|
||||
}
|
||||
|
||||
// handshakeComplete will set the connection as ready to communicate, as well as flush any stored packets
|
||||
func (i *HostInfo) handshakeComplete(l *logrus.Logger, m *cachedPacketMetrics) {
|
||||
//TODO: I'm not certain the distinction between handshake complete and ConnectionState being ready matters because:
|
||||
//TODO: HandshakeComplete means send stored packets and ConnectionState.ready means we are ready to send
|
||||
//TODO: if the transition from HandhsakeComplete to ConnectionState.ready happens all within this function they are identical
|
||||
|
||||
i.ConnectionState.queueLock.Lock()
|
||||
i.HandshakeComplete = true
|
||||
//TODO: this should be managed by the handshake state machine to set it based on how many handshake were seen.
|
||||
// Clamping it to 2 gets us out of the woods for now
|
||||
i.ConnectionState.messageCounter.Store(2)
|
||||
|
||||
if l.Level >= logrus.DebugLevel {
|
||||
i.logger(l).Debugf("Sending %d stored packets", len(i.packetStore))
|
||||
}
|
||||
|
||||
if len(i.packetStore) > 0 {
|
||||
nb := make([]byte, 12, 12)
|
||||
out := make([]byte, mtu)
|
||||
for _, cp := range i.packetStore {
|
||||
cp.callback(cp.messageType, cp.messageSubType, i, cp.packet, nb, out)
|
||||
}
|
||||
m.sent.Inc(int64(len(i.packetStore)))
|
||||
}
|
||||
|
||||
i.remotes.ResetBlockedRemotes()
|
||||
i.packetStore = make([]*cachedPacket, 0)
|
||||
i.ConnectionState.ready = true
|
||||
i.ConnectionState.queueLock.Unlock()
|
||||
}
|
||||
|
||||
func (i *HostInfo) GetCert() *cert.NebulaCertificate {
|
||||
if i.ConnectionState != nil {
|
||||
return i.ConnectionState.peerCert
|
||||
@@ -736,7 +629,7 @@ func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote *udp.Addr) bool {
|
||||
// NOTE: We do this loop here instead of calling `isPreferred` in
|
||||
// remote_list.go so that we only have to loop over preferredRanges once.
|
||||
newIsPreferred := false
|
||||
for _, l := range hm.preferredRanges {
|
||||
for _, l := range hm.GetPreferredRanges() {
|
||||
// return early if we are already on a preferred remote
|
||||
if l.Contains(currentRemote.IP) {
|
||||
return false
|
||||
@@ -761,9 +654,8 @@ func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote *udp.Addr) bool {
|
||||
}
|
||||
|
||||
func (i *HostInfo) RecvErrorExceeded() bool {
|
||||
if i.recvError < 3 {
|
||||
i.recvError += 1
|
||||
return false
|
||||
if i.recvError.Add(1) >= maxRecvError {
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -774,7 +666,7 @@ func (i *HostInfo) CreateRemoteCIDR(c *cert.NebulaCertificate) {
|
||||
return
|
||||
}
|
||||
|
||||
remoteCidr := cidr.NewTree4()
|
||||
remoteCidr := cidr.NewTree4[struct{}]()
|
||||
for _, ip := range c.Details.Ips {
|
||||
remoteCidr.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{})
|
||||
}
|
||||
|
||||
@@ -4,19 +4,19 @@ import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestHostMap_MakePrimary(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
hm := NewHostMap(
|
||||
l, "test",
|
||||
hm := newHostMap(
|
||||
l,
|
||||
&net.IPNet{
|
||||
IP: net.IP{10, 0, 0, 1},
|
||||
Mask: net.IPMask{255, 255, 255, 0},
|
||||
},
|
||||
[]*net.IPNet{},
|
||||
)
|
||||
|
||||
f := &Interface{}
|
||||
@@ -32,7 +32,7 @@ func TestHostMap_MakePrimary(t *testing.T) {
|
||||
hm.unlockedAddHostInfo(h1, f)
|
||||
|
||||
// Make sure we go h1 -> h2 -> h3 -> h4
|
||||
prim, _ := hm.QueryVpnIp(1)
|
||||
prim := hm.QueryVpnIp(1)
|
||||
assert.Equal(t, h1.localIndexId, prim.localIndexId)
|
||||
assert.Equal(t, h2.localIndexId, prim.next.localIndexId)
|
||||
assert.Nil(t, prim.prev)
|
||||
@@ -47,7 +47,7 @@ func TestHostMap_MakePrimary(t *testing.T) {
|
||||
hm.MakePrimary(h3)
|
||||
|
||||
// Make sure we go h3 -> h1 -> h2 -> h4
|
||||
prim, _ = hm.QueryVpnIp(1)
|
||||
prim = hm.QueryVpnIp(1)
|
||||
assert.Equal(t, h3.localIndexId, prim.localIndexId)
|
||||
assert.Equal(t, h1.localIndexId, prim.next.localIndexId)
|
||||
assert.Nil(t, prim.prev)
|
||||
@@ -62,7 +62,7 @@ func TestHostMap_MakePrimary(t *testing.T) {
|
||||
hm.MakePrimary(h4)
|
||||
|
||||
// Make sure we go h4 -> h3 -> h1 -> h2
|
||||
prim, _ = hm.QueryVpnIp(1)
|
||||
prim = hm.QueryVpnIp(1)
|
||||
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
||||
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
||||
assert.Nil(t, prim.prev)
|
||||
@@ -77,7 +77,7 @@ func TestHostMap_MakePrimary(t *testing.T) {
|
||||
hm.MakePrimary(h4)
|
||||
|
||||
// Make sure we go h4 -> h3 -> h1 -> h2
|
||||
prim, _ = hm.QueryVpnIp(1)
|
||||
prim = hm.QueryVpnIp(1)
|
||||
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
||||
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
||||
assert.Nil(t, prim.prev)
|
||||
@@ -91,13 +91,12 @@ func TestHostMap_MakePrimary(t *testing.T) {
|
||||
|
||||
func TestHostMap_DeleteHostInfo(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
hm := NewHostMap(
|
||||
l, "test",
|
||||
hm := newHostMap(
|
||||
l,
|
||||
&net.IPNet{
|
||||
IP: net.IP{10, 0, 0, 1},
|
||||
Mask: net.IPMask{255, 255, 255, 0},
|
||||
},
|
||||
[]*net.IPNet{},
|
||||
)
|
||||
|
||||
f := &Interface{}
|
||||
@@ -119,11 +118,11 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
|
||||
// h6 should be deleted
|
||||
assert.Nil(t, h6.next)
|
||||
assert.Nil(t, h6.prev)
|
||||
_, err := hm.QueryIndex(h6.localIndexId)
|
||||
assert.Error(t, err)
|
||||
h := hm.QueryIndex(h6.localIndexId)
|
||||
assert.Nil(t, h)
|
||||
|
||||
// Make sure we go h1 -> h2 -> h3 -> h4 -> h5
|
||||
prim, _ := hm.QueryVpnIp(1)
|
||||
prim := hm.QueryVpnIp(1)
|
||||
assert.Equal(t, h1.localIndexId, prim.localIndexId)
|
||||
assert.Equal(t, h2.localIndexId, prim.next.localIndexId)
|
||||
assert.Nil(t, prim.prev)
|
||||
@@ -142,7 +141,7 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
|
||||
assert.Nil(t, h1.next)
|
||||
|
||||
// Make sure we go h2 -> h3 -> h4 -> h5
|
||||
prim, _ = hm.QueryVpnIp(1)
|
||||
prim = hm.QueryVpnIp(1)
|
||||
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
||||
assert.Equal(t, h3.localIndexId, prim.next.localIndexId)
|
||||
assert.Nil(t, prim.prev)
|
||||
@@ -160,7 +159,7 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
|
||||
assert.Nil(t, h3.next)
|
||||
|
||||
// Make sure we go h2 -> h4 -> h5
|
||||
prim, _ = hm.QueryVpnIp(1)
|
||||
prim = hm.QueryVpnIp(1)
|
||||
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
||||
assert.Equal(t, h4.localIndexId, prim.next.localIndexId)
|
||||
assert.Nil(t, prim.prev)
|
||||
@@ -176,7 +175,7 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
|
||||
assert.Nil(t, h5.next)
|
||||
|
||||
// Make sure we go h2 -> h4
|
||||
prim, _ = hm.QueryVpnIp(1)
|
||||
prim = hm.QueryVpnIp(1)
|
||||
assert.Equal(t, h2.localIndexId, prim.localIndexId)
|
||||
assert.Equal(t, h4.localIndexId, prim.next.localIndexId)
|
||||
assert.Nil(t, prim.prev)
|
||||
@@ -190,7 +189,7 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
|
||||
assert.Nil(t, h2.next)
|
||||
|
||||
// Make sure we only have h4
|
||||
prim, _ = hm.QueryVpnIp(1)
|
||||
prim = hm.QueryVpnIp(1)
|
||||
assert.Equal(t, h4.localIndexId, prim.localIndexId)
|
||||
assert.Nil(t, prim.prev)
|
||||
assert.Nil(t, prim.next)
|
||||
@@ -202,6 +201,36 @@ func TestHostMap_DeleteHostInfo(t *testing.T) {
|
||||
assert.Nil(t, h4.next)
|
||||
|
||||
// Make sure we have nil
|
||||
prim, _ = hm.QueryVpnIp(1)
|
||||
prim = hm.QueryVpnIp(1)
|
||||
assert.Nil(t, prim)
|
||||
}
|
||||
|
||||
func TestHostMap_reload(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
c := config.NewC(l)
|
||||
|
||||
hm := NewHostMapFromConfig(
|
||||
l,
|
||||
&net.IPNet{
|
||||
IP: net.IP{10, 0, 0, 1},
|
||||
Mask: net.IPMask{255, 255, 255, 0},
|
||||
},
|
||||
c,
|
||||
)
|
||||
|
||||
toS := func(ipn []*net.IPNet) []string {
|
||||
var s []string
|
||||
for _, n := range ipn {
|
||||
s = append(s, n.String())
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
assert.Empty(t, hm.GetPreferredRanges())
|
||||
|
||||
c.ReloadConfigString("preferred_ranges: [1.1.1.0/24, 10.1.1.0/24]")
|
||||
assert.EqualValues(t, []string{"1.1.1.0/24", "10.1.1.0/24"}, toS(hm.GetPreferredRanges()))
|
||||
|
||||
c.ReloadConfigString("preferred_ranges: [1.1.1.1/32]")
|
||||
assert.EqualValues(t, []string{"1.1.1.1/32"}, toS(hm.GetPreferredRanges()))
|
||||
}
|
||||
|
||||
135
inside.go
135
inside.go
@@ -1,7 +1,6 @@
|
||||
package nebula
|
||||
|
||||
import (
|
||||
"github.com/flynn/noise"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/firewall"
|
||||
"github.com/slackhq/nebula/header"
|
||||
@@ -45,7 +44,10 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
|
||||
return
|
||||
}
|
||||
|
||||
hostinfo := f.getOrHandshake(fwPacket.RemoteIP)
|
||||
hostinfo, ready := f.getOrHandshake(fwPacket.RemoteIP, func(hh *HandshakeHostInfo) {
|
||||
hh.cachePacket(f.l, header.Message, 0, packet, f.sendMessageNow, f.cachedPacketMetrics)
|
||||
})
|
||||
|
||||
if hostinfo == nil {
|
||||
f.rejectInside(packet, out, q)
|
||||
if f.l.Level >= logrus.DebugLevel {
|
||||
@@ -55,23 +57,14 @@ func (f *Interface) consumeInsidePacket(packet []byte, fwPacket *firewall.Packet
|
||||
}
|
||||
return
|
||||
}
|
||||
ci := hostinfo.ConnectionState
|
||||
|
||||
if !ci.ready {
|
||||
// Because we might be sending stored packets, lock here to stop new things going to
|
||||
// the packet queue.
|
||||
ci.queueLock.Lock()
|
||||
if !ci.ready {
|
||||
hostinfo.cachePacket(f.l, header.Message, 0, packet, f.sendMessageNow, f.cachedPacketMetrics)
|
||||
ci.queueLock.Unlock()
|
||||
return
|
||||
}
|
||||
ci.queueLock.Unlock()
|
||||
if !ready {
|
||||
return
|
||||
}
|
||||
|
||||
dropReason := f.firewall.Drop(packet, *fwPacket, false, hostinfo, f.caPool, localCache)
|
||||
dropReason := f.firewall.Drop(*fwPacket, false, hostinfo, f.pki.GetCAPool(), localCache)
|
||||
if dropReason == nil {
|
||||
f.sendNoMetrics(header.Message, 0, ci, hostinfo, nil, packet, nb, out, q)
|
||||
f.sendNoMetrics(header.Message, 0, hostinfo.ConnectionState, hostinfo, nil, packet, nb, out, q)
|
||||
|
||||
} else {
|
||||
f.rejectInside(packet, out, q)
|
||||
@@ -90,6 +83,10 @@ func (f *Interface) rejectInside(packet []byte, out []byte, q int) {
|
||||
}
|
||||
|
||||
out = iputil.CreateRejectPacket(packet, out)
|
||||
if len(out) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
_, err := f.readers[q].Write(out)
|
||||
if err != nil {
|
||||
f.l.WithError(err).Error("Failed to write to tun")
|
||||
@@ -101,80 +98,39 @@ func (f *Interface) rejectOutside(packet []byte, ci *ConnectionState, hostinfo *
|
||||
return
|
||||
}
|
||||
|
||||
// Use some out buffer space to build the packet before encryption
|
||||
// Need 40 bytes for the reject packet (20 byte ipv4 header, 20 byte tcp rst packet)
|
||||
// Leave 100 bytes for the encrypted packet (60 byte Nebula header, 40 byte reject packet)
|
||||
out = out[:140]
|
||||
outPacket := iputil.CreateRejectPacket(packet, out[100:])
|
||||
f.sendNoMetrics(header.Message, 0, ci, hostinfo, nil, outPacket, nb, out, q)
|
||||
out = iputil.CreateRejectPacket(packet, out)
|
||||
if len(out) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if len(out) > iputil.MaxRejectPacketSize {
|
||||
if f.l.GetLevel() >= logrus.InfoLevel {
|
||||
f.l.
|
||||
WithField("packet", packet).
|
||||
WithField("outPacket", out).
|
||||
Info("rejectOutside: packet too big, not sending")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
f.sendNoMetrics(header.Message, 0, ci, hostinfo, nil, out, nb, packet, q)
|
||||
}
|
||||
|
||||
func (f *Interface) Handshake(vpnIp iputil.VpnIp) {
|
||||
f.getOrHandshake(vpnIp)
|
||||
f.getOrHandshake(vpnIp, nil)
|
||||
}
|
||||
|
||||
// getOrHandshake returns nil if the vpnIp is not routable
|
||||
func (f *Interface) getOrHandshake(vpnIp iputil.VpnIp) *HostInfo {
|
||||
// getOrHandshake returns nil if the vpnIp is not routable.
|
||||
// If the 2nd return var is false then the hostinfo is not ready to be used in a tunnel
|
||||
func (f *Interface) getOrHandshake(vpnIp iputil.VpnIp, cacheCallback func(*HandshakeHostInfo)) (*HostInfo, bool) {
|
||||
if !ipMaskContains(f.lightHouse.myVpnIp, f.lightHouse.myVpnZeros, vpnIp) {
|
||||
vpnIp = f.inside.RouteFor(vpnIp)
|
||||
if vpnIp == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
hostinfo, err := f.hostMap.PromoteBestQueryVpnIp(vpnIp, f)
|
||||
|
||||
//if err != nil || hostinfo.ConnectionState == nil {
|
||||
if err != nil {
|
||||
hostinfo, err = f.handshakeManager.pendingHostMap.QueryVpnIp(vpnIp)
|
||||
if err != nil {
|
||||
hostinfo = f.handshakeManager.AddVpnIp(vpnIp, f.initHostInfo)
|
||||
}
|
||||
}
|
||||
ci := hostinfo.ConnectionState
|
||||
|
||||
if ci != nil && ci.eKey != nil && ci.ready {
|
||||
return hostinfo
|
||||
}
|
||||
|
||||
// Handshake is not ready, we need to grab the lock now before we start the handshake process
|
||||
hostinfo.Lock()
|
||||
defer hostinfo.Unlock()
|
||||
|
||||
// Double check, now that we have the lock
|
||||
ci = hostinfo.ConnectionState
|
||||
if ci != nil && ci.eKey != nil && ci.ready {
|
||||
return hostinfo
|
||||
}
|
||||
|
||||
// If we have already created the handshake packet, we don't want to call the function at all.
|
||||
if !hostinfo.HandshakeReady {
|
||||
ixHandshakeStage0(f, vpnIp, hostinfo)
|
||||
// FIXME: Maybe make XX selectable, but probably not since psk makes it nearly pointless for us.
|
||||
//xx_handshakeStage0(f, ip, hostinfo)
|
||||
|
||||
// If this is a static host, we don't need to wait for the HostQueryReply
|
||||
// We can trigger the handshake right now
|
||||
_, doTrigger := f.lightHouse.GetStaticHostList()[vpnIp]
|
||||
if !doTrigger {
|
||||
// Add any calculated remotes, and trigger early handshake if one found
|
||||
doTrigger = f.lightHouse.addCalculatedRemotes(vpnIp)
|
||||
}
|
||||
|
||||
if doTrigger {
|
||||
select {
|
||||
case f.handshakeManager.trigger <- vpnIp:
|
||||
default:
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
}
|
||||
|
||||
return hostinfo
|
||||
}
|
||||
|
||||
// initHostInfo is the init function to pass to (*HandshakeManager).AddVpnIP that
|
||||
// will create the initial Noise ConnectionState
|
||||
func (f *Interface) initHostInfo(hostinfo *HostInfo) {
|
||||
hostinfo.ConnectionState = f.newConnectionState(f.l, true, noise.HandshakeIX, []byte{}, 0)
|
||||
return f.handshakeManager.GetOrHandshake(vpnIp, cacheCallback)
|
||||
}
|
||||
|
||||
func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubType, hostinfo *HostInfo, p, nb, out []byte) {
|
||||
@@ -186,7 +142,7 @@ func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubTyp
|
||||
}
|
||||
|
||||
// check if packet is in outbound fw rules
|
||||
dropReason := f.firewall.Drop(p, *fp, false, hostinfo, f.caPool, nil)
|
||||
dropReason := f.firewall.Drop(*fp, false, hostinfo, f.pki.GetCAPool(), nil)
|
||||
if dropReason != nil {
|
||||
if f.l.Level >= logrus.DebugLevel {
|
||||
f.l.WithField("fwPacket", fp).
|
||||
@@ -201,7 +157,10 @@ func (f *Interface) sendMessageNow(t header.MessageType, st header.MessageSubTyp
|
||||
|
||||
// SendMessageToVpnIp handles real ip:port lookup and sends to the current best known address for vpnIp
|
||||
func (f *Interface) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, nb, out []byte) {
|
||||
hostInfo := f.getOrHandshake(vpnIp)
|
||||
hostInfo, ready := f.getOrHandshake(vpnIp, func(hh *HandshakeHostInfo) {
|
||||
hh.cachePacket(f.l, t, st, p, f.SendMessageToHostInfo, f.cachedPacketMetrics)
|
||||
})
|
||||
|
||||
if hostInfo == nil {
|
||||
if f.l.Level >= logrus.DebugLevel {
|
||||
f.l.WithField("vpnIp", vpnIp).
|
||||
@@ -210,16 +169,8 @@ func (f *Interface) SendMessageToVpnIp(t header.MessageType, st header.MessageSu
|
||||
return
|
||||
}
|
||||
|
||||
if !hostInfo.ConnectionState.ready {
|
||||
// Because we might be sending stored packets, lock here to stop new things going to
|
||||
// the packet queue.
|
||||
hostInfo.ConnectionState.queueLock.Lock()
|
||||
if !hostInfo.ConnectionState.ready {
|
||||
hostInfo.cachePacket(f.l, t, st, p, f.SendMessageToHostInfo, f.cachedPacketMetrics)
|
||||
hostInfo.ConnectionState.queueLock.Unlock()
|
||||
return
|
||||
}
|
||||
hostInfo.ConnectionState.queueLock.Unlock()
|
||||
if !ready {
|
||||
return
|
||||
}
|
||||
|
||||
f.SendMessageToHostInfo(t, st, hostInfo, p, nb, out)
|
||||
@@ -239,7 +190,7 @@ func (f *Interface) sendTo(t header.MessageType, st header.MessageSubType, ci *C
|
||||
f.sendNoMetrics(t, st, ci, hostinfo, remote, p, nb, out, 0)
|
||||
}
|
||||
|
||||
// sendVia sends a payload through a Relay tunnel. No authentication or encryption is done
|
||||
// SendVia sends a payload through a Relay tunnel. No authentication or encryption is done
|
||||
// to the payload for the ultimate target host, making this a useful method for sending
|
||||
// handshake messages to peers through relay tunnels.
|
||||
// via is the HostInfo through which the message is relayed.
|
||||
@@ -337,7 +288,7 @@ func (f *Interface) sendNoMetrics(t header.MessageType, st header.MessageSubType
|
||||
if t != header.CloseTunnel && hostinfo.lastRebindCount != f.rebindCount {
|
||||
//NOTE: there is an update hole if a tunnel isn't used and exactly 256 rebinds occur before the tunnel is
|
||||
// finally used again. This tunnel would eventually be torn down and recreated if this action didn't help.
|
||||
f.lightHouse.QueryServer(hostinfo.vpnIp, f)
|
||||
f.lightHouse.QueryServer(hostinfo.vpnIp)
|
||||
hostinfo.lastRebindCount = f.rebindCount
|
||||
if f.l.Level >= logrus.DebugLevel {
|
||||
f.l.WithField("vpnIp", hostinfo.vpnIp).Debug("Lighthouse update triggered for punch due to rebind counter")
|
||||
|
||||
125
interface.go
125
interface.go
@@ -13,7 +13,6 @@ import (
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/firewall"
|
||||
"github.com/slackhq/nebula/header"
|
||||
@@ -26,9 +25,9 @@ const mtu = 9001
|
||||
|
||||
type InterfaceConfig struct {
|
||||
HostMap *HostMap
|
||||
Outside *udp.Conn
|
||||
Outside udp.Conn
|
||||
Inside overlay.Device
|
||||
certState *CertState
|
||||
pki *PKI
|
||||
Cipher string
|
||||
Firewall *Firewall
|
||||
ServeDns bool
|
||||
@@ -41,20 +40,22 @@ type InterfaceConfig struct {
|
||||
routines int
|
||||
MessageMetrics *MessageMetrics
|
||||
version string
|
||||
caPool *cert.NebulaCAPool
|
||||
disconnectInvalid bool
|
||||
relayManager *relayManager
|
||||
punchy *Punchy
|
||||
|
||||
tryPromoteEvery uint32
|
||||
reQueryEvery uint32
|
||||
reQueryWait time.Duration
|
||||
|
||||
ConntrackCacheTimeout time.Duration
|
||||
l *logrus.Logger
|
||||
}
|
||||
|
||||
type Interface struct {
|
||||
hostMap *HostMap
|
||||
outside *udp.Conn
|
||||
outside udp.Conn
|
||||
inside overlay.Device
|
||||
certState atomic.Pointer[CertState]
|
||||
pki *PKI
|
||||
cipher string
|
||||
firewall *Firewall
|
||||
connectionManager *connectionManager
|
||||
@@ -67,11 +68,14 @@ type Interface struct {
|
||||
dropLocalBroadcast bool
|
||||
dropMulticast bool
|
||||
routines int
|
||||
caPool *cert.NebulaCAPool
|
||||
disconnectInvalid bool
|
||||
disconnectInvalid atomic.Bool
|
||||
closed atomic.Bool
|
||||
relayManager *relayManager
|
||||
|
||||
tryPromoteEvery atomic.Uint32
|
||||
reQueryEvery atomic.Uint32
|
||||
reQueryWait atomic.Int64
|
||||
|
||||
sendRecvErrorConfig sendRecvErrorConfig
|
||||
|
||||
// rebindCount is used to decide if an active tunnel should trigger a punch notification through a lighthouse
|
||||
@@ -80,7 +84,7 @@ type Interface struct {
|
||||
|
||||
conntrackCacheTimeout time.Duration
|
||||
|
||||
writers []*udp.Conn
|
||||
writers []udp.Conn
|
||||
readers []io.ReadWriteCloser
|
||||
|
||||
metricHandshakes metrics.Histogram
|
||||
@@ -144,15 +148,17 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
||||
if c.Inside == nil {
|
||||
return nil, errors.New("no inside interface (tun)")
|
||||
}
|
||||
if c.certState == nil {
|
||||
if c.pki == nil {
|
||||
return nil, errors.New("no certificate state")
|
||||
}
|
||||
if c.Firewall == nil {
|
||||
return nil, errors.New("no firewall rules")
|
||||
}
|
||||
|
||||
myVpnIp := iputil.Ip2VpnIp(c.certState.certificate.Details.Ips[0].IP)
|
||||
certificate := c.pki.GetCertState().Certificate
|
||||
myVpnIp := iputil.Ip2VpnIp(certificate.Details.Ips[0].IP)
|
||||
ifce := &Interface{
|
||||
pki: c.pki,
|
||||
hostMap: c.HostMap,
|
||||
outside: c.Outside,
|
||||
inside: c.Inside,
|
||||
@@ -162,15 +168,13 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
||||
handshakeManager: c.HandshakeManager,
|
||||
createTime: time.Now(),
|
||||
lightHouse: c.lightHouse,
|
||||
localBroadcast: myVpnIp | ^iputil.Ip2VpnIp(c.certState.certificate.Details.Ips[0].Mask),
|
||||
localBroadcast: myVpnIp | ^iputil.Ip2VpnIp(certificate.Details.Ips[0].Mask),
|
||||
dropLocalBroadcast: c.DropLocalBroadcast,
|
||||
dropMulticast: c.DropMulticast,
|
||||
routines: c.routines,
|
||||
version: c.version,
|
||||
writers: make([]*udp.Conn, c.routines),
|
||||
writers: make([]udp.Conn, c.routines),
|
||||
readers: make([]io.ReadWriteCloser, c.routines),
|
||||
caPool: c.caPool,
|
||||
disconnectInvalid: c.disconnectInvalid,
|
||||
myVpnIp: myVpnIp,
|
||||
relayManager: c.relayManager,
|
||||
|
||||
@@ -186,7 +190,10 @@ func NewInterface(ctx context.Context, c *InterfaceConfig) (*Interface, error) {
|
||||
l: c.l,
|
||||
}
|
||||
|
||||
ifce.certState.Store(c.certState)
|
||||
ifce.tryPromoteEvery.Store(c.tryPromoteEvery)
|
||||
ifce.reQueryEvery.Store(c.reQueryEvery)
|
||||
ifce.reQueryWait.Store(int64(c.reQueryWait))
|
||||
|
||||
ifce.connectionManager = newConnectionManager(ctx, c.l, ifce, c.checkInterval, c.pendingDeletionInterval, c.punchy)
|
||||
|
||||
return ifce, nil
|
||||
@@ -243,7 +250,7 @@ func (f *Interface) run() {
|
||||
func (f *Interface) listenOut(i int) {
|
||||
runtime.LockOSThread()
|
||||
|
||||
var li *udp.Conn
|
||||
var li udp.Conn
|
||||
// TODO clean this up with a coherent interface for each outside connection
|
||||
if i > 0 {
|
||||
li = f.writers[i]
|
||||
@@ -283,47 +290,24 @@ func (f *Interface) listenIn(reader io.ReadWriteCloser, i int) {
|
||||
}
|
||||
|
||||
func (f *Interface) RegisterConfigChangeCallbacks(c *config.C) {
|
||||
c.RegisterReloadCallback(f.reloadCA)
|
||||
c.RegisterReloadCallback(f.reloadCertKey)
|
||||
c.RegisterReloadCallback(f.reloadFirewall)
|
||||
c.RegisterReloadCallback(f.reloadSendRecvError)
|
||||
c.RegisterReloadCallback(f.reloadDisconnectInvalid)
|
||||
c.RegisterReloadCallback(f.reloadMisc)
|
||||
|
||||
for _, udpConn := range f.writers {
|
||||
c.RegisterReloadCallback(udpConn.ReloadConfig)
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Interface) reloadCA(c *config.C) {
|
||||
// reload and check regardless
|
||||
// todo: need mutex?
|
||||
newCAs, err := loadCAFromConfig(f.l, c)
|
||||
if err != nil {
|
||||
f.l.WithError(err).Error("Could not refresh trusted CA certificates")
|
||||
return
|
||||
func (f *Interface) reloadDisconnectInvalid(c *config.C) {
|
||||
initial := c.InitialLoad()
|
||||
if initial || c.HasChanged("pki.disconnect_invalid") {
|
||||
f.disconnectInvalid.Store(c.GetBool("pki.disconnect_invalid", true))
|
||||
if !initial {
|
||||
f.l.Infof("pki.disconnect_invalid changed to %v", f.disconnectInvalid.Load())
|
||||
}
|
||||
}
|
||||
|
||||
f.caPool = newCAs
|
||||
f.l.WithField("fingerprints", f.caPool.GetFingerprints()).Info("Trusted CA certificates refreshed")
|
||||
}
|
||||
|
||||
func (f *Interface) reloadCertKey(c *config.C) {
|
||||
// reload and check in all cases
|
||||
cs, err := NewCertStateFromConfig(c)
|
||||
if err != nil {
|
||||
f.l.WithError(err).Error("Could not refresh client cert")
|
||||
return
|
||||
}
|
||||
|
||||
// did IP in cert change? if so, don't set
|
||||
currentCert := f.certState.Load().certificate
|
||||
oldIPs := currentCert.Details.Ips
|
||||
newIPs := cs.certificate.Details.Ips
|
||||
if len(oldIPs) > 0 && len(newIPs) > 0 && oldIPs[0].String() != newIPs[0].String() {
|
||||
f.l.WithField("new_ip", newIPs[0]).WithField("old_ip", oldIPs[0]).Error("IP in new cert was different from old")
|
||||
return
|
||||
}
|
||||
|
||||
f.certState.Store(cs)
|
||||
f.l.WithField("cert", cs.certificate).Info("Client cert refreshed from disk")
|
||||
}
|
||||
|
||||
func (f *Interface) reloadFirewall(c *config.C) {
|
||||
@@ -333,7 +317,7 @@ func (f *Interface) reloadFirewall(c *config.C) {
|
||||
return
|
||||
}
|
||||
|
||||
fw, err := NewFirewallFromConfig(f.l, f.certState.Load().certificate, c)
|
||||
fw, err := NewFirewallFromConfig(f.l, f.pki.GetCertState().Certificate, c)
|
||||
if err != nil {
|
||||
f.l.WithError(err).Error("Error while creating firewall during reload")
|
||||
return
|
||||
@@ -348,8 +332,8 @@ func (f *Interface) reloadFirewall(c *config.C) {
|
||||
// If rulesVersion is back to zero, we have wrapped all the way around. Be
|
||||
// safe and just reset conntrack in this case.
|
||||
if fw.rulesVersion == 0 {
|
||||
f.l.WithField("firewallHash", fw.GetRuleHash()).
|
||||
WithField("oldFirewallHash", oldFw.GetRuleHash()).
|
||||
f.l.WithField("firewallHashes", fw.GetRuleHashes()).
|
||||
WithField("oldFirewallHashes", oldFw.GetRuleHashes()).
|
||||
WithField("rulesVersion", fw.rulesVersion).
|
||||
Warn("firewall rulesVersion has overflowed, resetting conntrack")
|
||||
} else {
|
||||
@@ -359,8 +343,8 @@ func (f *Interface) reloadFirewall(c *config.C) {
|
||||
f.firewall = fw
|
||||
|
||||
oldFw.Destroy()
|
||||
f.l.WithField("firewallHash", fw.GetRuleHash()).
|
||||
WithField("oldFirewallHash", oldFw.GetRuleHash()).
|
||||
f.l.WithField("firewallHashes", fw.GetRuleHashes()).
|
||||
WithField("oldFirewallHashes", oldFw.GetRuleHashes()).
|
||||
WithField("rulesVersion", fw.rulesVersion).
|
||||
Info("New firewall has been installed")
|
||||
}
|
||||
@@ -389,6 +373,26 @@ func (f *Interface) reloadSendRecvError(c *config.C) {
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Interface) reloadMisc(c *config.C) {
|
||||
if c.HasChanged("counters.try_promote") {
|
||||
n := c.GetUint32("counters.try_promote", defaultPromoteEvery)
|
||||
f.tryPromoteEvery.Store(n)
|
||||
f.l.Info("counters.try_promote has changed")
|
||||
}
|
||||
|
||||
if c.HasChanged("counters.requery_every_packets") {
|
||||
n := c.GetUint32("counters.requery_every_packets", defaultReQueryEvery)
|
||||
f.reQueryEvery.Store(n)
|
||||
f.l.Info("counters.requery_every_packets has changed")
|
||||
}
|
||||
|
||||
if c.HasChanged("timers.requery_wait_duration") {
|
||||
n := c.GetDuration("timers.requery_wait_duration", defaultReQueryWait)
|
||||
f.reQueryWait.Store(int64(n))
|
||||
f.l.Info("timers.requery_wait_duration has changed")
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
|
||||
ticker := time.NewTicker(i)
|
||||
defer ticker.Stop()
|
||||
@@ -405,7 +409,7 @@ func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
|
||||
f.firewall.EmitStats()
|
||||
f.handshakeManager.EmitStats()
|
||||
udpStats()
|
||||
certExpirationGauge.Update(int64(f.certState.Load().certificate.Details.NotAfter.Sub(time.Now()) / time.Second))
|
||||
certExpirationGauge.Update(int64(f.pki.GetCertState().Certificate.Details.NotAfter.Sub(time.Now()) / time.Second))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -413,6 +417,13 @@ func (f *Interface) emitStats(ctx context.Context, i time.Duration) {
|
||||
func (f *Interface) Close() error {
|
||||
f.closed.Store(true)
|
||||
|
||||
for _, u := range f.writers {
|
||||
err := u.Close()
|
||||
if err != nil {
|
||||
f.l.WithError(err).Error("Error while closing udp socket")
|
||||
}
|
||||
}
|
||||
|
||||
// Release the tun device
|
||||
return f.inside.Close()
|
||||
}
|
||||
|
||||
@@ -6,8 +6,19 @@ import (
|
||||
"golang.org/x/net/ipv4"
|
||||
)
|
||||
|
||||
const (
|
||||
// Need 96 bytes for the largest reject packet:
|
||||
// - 20 byte ipv4 header
|
||||
// - 8 byte icmpv4 header
|
||||
// - 68 byte body (60 byte max orig ipv4 header + 8 byte orig icmpv4 header)
|
||||
MaxRejectPacketSize = ipv4.HeaderLen + 8 + 60 + 8
|
||||
)
|
||||
|
||||
func CreateRejectPacket(packet []byte, out []byte) []byte {
|
||||
// TODO ipv4 only, need to fix when inside supports ipv6
|
||||
if len(packet) < ipv4.HeaderLen || int(packet[0]>>4) != ipv4.Version {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch packet[9] {
|
||||
case 6: // tcp
|
||||
return ipv4CreateRejectTCPPacket(packet, out)
|
||||
@@ -19,20 +30,28 @@ func CreateRejectPacket(packet []byte, out []byte) []byte {
|
||||
func ipv4CreateRejectICMPPacket(packet []byte, out []byte) []byte {
|
||||
ihl := int(packet[0]&0x0f) << 2
|
||||
|
||||
// ICMP reply includes header and first 8 bytes of the packet
|
||||
if len(packet) < ihl {
|
||||
// We need at least this many bytes for this to be a valid packet
|
||||
return nil
|
||||
}
|
||||
|
||||
// ICMP reply includes original header and first 8 bytes of the packet
|
||||
packetLen := len(packet)
|
||||
if packetLen > ihl+8 {
|
||||
packetLen = ihl + 8
|
||||
}
|
||||
|
||||
outLen := ipv4.HeaderLen + 8 + packetLen
|
||||
if outLen > cap(out) {
|
||||
return nil
|
||||
}
|
||||
|
||||
out = out[:(outLen)]
|
||||
out = out[:outLen]
|
||||
|
||||
ipHdr := out[0:ipv4.HeaderLen]
|
||||
ipHdr[0] = ipv4.Version<<4 | (ipv4.HeaderLen >> 2) // version, ihl
|
||||
ipHdr[1] = 0 // DSCP, ECN
|
||||
binary.BigEndian.PutUint16(ipHdr[2:], uint16(ipv4.HeaderLen+8+packetLen)) // Total Length
|
||||
ipHdr[0] = ipv4.Version<<4 | (ipv4.HeaderLen >> 2) // version, ihl
|
||||
ipHdr[1] = 0 // DSCP, ECN
|
||||
binary.BigEndian.PutUint16(ipHdr[2:], uint16(outLen)) // Total Length
|
||||
|
||||
ipHdr[4] = 0 // id
|
||||
ipHdr[5] = 0 // .
|
||||
@@ -76,7 +95,15 @@ func ipv4CreateRejectTCPPacket(packet []byte, out []byte) []byte {
|
||||
ihl := int(packet[0]&0x0f) << 2
|
||||
outLen := ipv4.HeaderLen + tcpLen
|
||||
|
||||
out = out[:(outLen)]
|
||||
if len(packet) < ihl+tcpLen {
|
||||
// We need at least this many bytes for this to be a valid packet
|
||||
return nil
|
||||
}
|
||||
if outLen > cap(out) {
|
||||
return nil
|
||||
}
|
||||
|
||||
out = out[:outLen]
|
||||
|
||||
ipHdr := out[0:ipv4.HeaderLen]
|
||||
ipHdr[0] = ipv4.Version<<4 | (ipv4.HeaderLen >> 2) // version, ihl
|
||||
|
||||
73
iputil/packet_test.go
Normal file
73
iputil/packet_test.go
Normal file
@@ -0,0 +1,73 @@
|
||||
package iputil
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/net/ipv4"
|
||||
)
|
||||
|
||||
func Test_CreateRejectPacket(t *testing.T) {
|
||||
h := ipv4.Header{
|
||||
Len: 20,
|
||||
Src: net.IPv4(10, 0, 0, 1),
|
||||
Dst: net.IPv4(10, 0, 0, 2),
|
||||
Protocol: 1, // ICMP
|
||||
}
|
||||
|
||||
b, err := h.Marshal()
|
||||
if err != nil {
|
||||
t.Fatalf("h.Marhshal: %v", err)
|
||||
}
|
||||
b = append(b, []byte{0, 3, 0, 4}...)
|
||||
|
||||
expectedLen := ipv4.HeaderLen + 8 + h.Len + 4
|
||||
out := make([]byte, expectedLen)
|
||||
rejectPacket := CreateRejectPacket(b, out)
|
||||
assert.NotNil(t, rejectPacket)
|
||||
assert.Len(t, rejectPacket, expectedLen)
|
||||
|
||||
// ICMP with max header len
|
||||
h = ipv4.Header{
|
||||
Len: 60,
|
||||
Src: net.IPv4(10, 0, 0, 1),
|
||||
Dst: net.IPv4(10, 0, 0, 2),
|
||||
Protocol: 1, // ICMP
|
||||
Options: make([]byte, 40),
|
||||
}
|
||||
|
||||
b, err = h.Marshal()
|
||||
if err != nil {
|
||||
t.Fatalf("h.Marhshal: %v", err)
|
||||
}
|
||||
b = append(b, []byte{0, 3, 0, 4, 0, 0, 0, 0}...)
|
||||
|
||||
expectedLen = MaxRejectPacketSize
|
||||
out = make([]byte, MaxRejectPacketSize)
|
||||
rejectPacket = CreateRejectPacket(b, out)
|
||||
assert.NotNil(t, rejectPacket)
|
||||
assert.Len(t, rejectPacket, expectedLen)
|
||||
|
||||
// TCP with max header len
|
||||
h = ipv4.Header{
|
||||
Len: 60,
|
||||
Src: net.IPv4(10, 0, 0, 1),
|
||||
Dst: net.IPv4(10, 0, 0, 2),
|
||||
Protocol: 6, // TCP
|
||||
Options: make([]byte, 40),
|
||||
}
|
||||
|
||||
b, err = h.Marshal()
|
||||
if err != nil {
|
||||
t.Fatalf("h.Marhshal: %v", err)
|
||||
}
|
||||
b = append(b, []byte{0, 3, 0, 4}...)
|
||||
b = append(b, make([]byte, 16)...)
|
||||
|
||||
expectedLen = ipv4.HeaderLen + 20
|
||||
out = make([]byte, expectedLen)
|
||||
rejectPacket = CreateRejectPacket(b, out)
|
||||
assert.NotNil(t, rejectPacket)
|
||||
assert.Len(t, rejectPacket, expectedLen)
|
||||
}
|
||||
143
lighthouse.go
143
lighthouse.go
@@ -39,7 +39,7 @@ type LightHouse struct {
|
||||
myVpnIp iputil.VpnIp
|
||||
myVpnZeros iputil.VpnIp
|
||||
myVpnNet *net.IPNet
|
||||
punchConn *udp.Conn
|
||||
punchConn udp.Conn
|
||||
punchy *Punchy
|
||||
|
||||
// Local cache of answers from light houses
|
||||
@@ -64,18 +64,19 @@ type LightHouse struct {
|
||||
staticList atomic.Pointer[map[iputil.VpnIp]struct{}]
|
||||
lighthouses atomic.Pointer[map[iputil.VpnIp]struct{}]
|
||||
|
||||
interval atomic.Int64
|
||||
updateCancel context.CancelFunc
|
||||
updateParentCtx context.Context
|
||||
updateUdp EncWriter
|
||||
nebulaPort uint32 // 32 bits because protobuf does not have a uint16
|
||||
interval atomic.Int64
|
||||
updateCancel context.CancelFunc
|
||||
ifce EncWriter
|
||||
nebulaPort uint32 // 32 bits because protobuf does not have a uint16
|
||||
|
||||
advertiseAddrs atomic.Pointer[[]netIpAndPort]
|
||||
|
||||
// IP's of relays that can be used by peers to access me
|
||||
relaysForMe atomic.Pointer[[]iputil.VpnIp]
|
||||
|
||||
calculatedRemotes atomic.Pointer[cidr.Tree4] // Maps VpnIp to []*calculatedRemote
|
||||
queryChan chan iputil.VpnIp
|
||||
|
||||
calculatedRemotes atomic.Pointer[cidr.Tree4[[]*calculatedRemote]] // Maps VpnIp to []*calculatedRemote
|
||||
|
||||
metrics *MessageMetrics
|
||||
metricHolepunchTx metrics.Counter
|
||||
@@ -84,7 +85,7 @@ type LightHouse struct {
|
||||
|
||||
// NewLightHouseFromConfig will build a Lighthouse struct from the values provided in the config object
|
||||
// addrMap should be nil unless this is during a config reload
|
||||
func NewLightHouseFromConfig(ctx context.Context, l *logrus.Logger, c *config.C, myVpnNet *net.IPNet, pc *udp.Conn, p *Punchy) (*LightHouse, error) {
|
||||
func NewLightHouseFromConfig(ctx context.Context, l *logrus.Logger, c *config.C, myVpnNet *net.IPNet, pc udp.Conn, p *Punchy) (*LightHouse, error) {
|
||||
amLighthouse := c.GetBool("lighthouse.am_lighthouse", false)
|
||||
nebulaPort := uint32(c.GetInt("listen.port", 0))
|
||||
if amLighthouse && nebulaPort == 0 {
|
||||
@@ -111,6 +112,7 @@ func NewLightHouseFromConfig(ctx context.Context, l *logrus.Logger, c *config.C,
|
||||
nebulaPort: nebulaPort,
|
||||
punchConn: pc,
|
||||
punchy: p,
|
||||
queryChan: make(chan iputil.VpnIp, c.GetUint32("handshakes.query_buffer", 64)),
|
||||
l: l,
|
||||
}
|
||||
lighthouses := make(map[iputil.VpnIp]struct{})
|
||||
@@ -133,13 +135,15 @@ func NewLightHouseFromConfig(ctx context.Context, l *logrus.Logger, c *config.C,
|
||||
c.RegisterReloadCallback(func(c *config.C) {
|
||||
err := h.reload(c, false)
|
||||
switch v := err.(type) {
|
||||
case util.ContextualError:
|
||||
case *util.ContextualError:
|
||||
v.Log(l)
|
||||
case error:
|
||||
l.WithError(err).Error("failed to reload lighthouse")
|
||||
}
|
||||
})
|
||||
|
||||
h.startQueryWorker()
|
||||
|
||||
return &h, nil
|
||||
}
|
||||
|
||||
@@ -167,7 +171,7 @@ func (lh *LightHouse) GetRelaysForMe() []iputil.VpnIp {
|
||||
return *lh.relaysForMe.Load()
|
||||
}
|
||||
|
||||
func (lh *LightHouse) getCalculatedRemotes() *cidr.Tree4 {
|
||||
func (lh *LightHouse) getCalculatedRemotes() *cidr.Tree4[[]*calculatedRemote] {
|
||||
return lh.calculatedRemotes.Load()
|
||||
}
|
||||
|
||||
@@ -217,7 +221,7 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
||||
lh.updateCancel()
|
||||
}
|
||||
|
||||
lh.LhUpdateWorker(lh.updateParentCtx, lh.updateUdp)
|
||||
lh.StartUpdateWorker()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -262,6 +266,18 @@ func (lh *LightHouse) reload(c *config.C, initial bool) error {
|
||||
|
||||
//NOTE: many things will get much simpler when we combine static_host_map and lighthouse.hosts in config
|
||||
if initial || c.HasChanged("static_host_map") || c.HasChanged("static_map.cadence") || c.HasChanged("static_map.network") || c.HasChanged("static_map.lookup_timeout") {
|
||||
// Clean up. Entries still in the static_host_map will be re-built.
|
||||
// Entries no longer present must have their (possible) background DNS goroutines stopped.
|
||||
if existingStaticList := lh.staticList.Load(); existingStaticList != nil {
|
||||
lh.RLock()
|
||||
for staticVpnIp := range *existingStaticList {
|
||||
if am, ok := lh.addrMap[staticVpnIp]; ok && am != nil {
|
||||
am.hr.Cancel()
|
||||
}
|
||||
}
|
||||
lh.RUnlock()
|
||||
}
|
||||
// Build a new list based on current config.
|
||||
staticList := make(map[iputil.VpnIp]struct{})
|
||||
err := lh.loadStaticMap(c, lh.myVpnNet, staticList)
|
||||
if err != nil {
|
||||
@@ -432,9 +448,9 @@ func (lh *LightHouse) loadStaticMap(c *config.C, tunCidr *net.IPNet, staticList
|
||||
return nil
|
||||
}
|
||||
|
||||
func (lh *LightHouse) Query(ip iputil.VpnIp, f EncWriter) *RemoteList {
|
||||
func (lh *LightHouse) Query(ip iputil.VpnIp) *RemoteList {
|
||||
if !lh.IsLighthouseIP(ip) {
|
||||
lh.QueryServer(ip, f)
|
||||
lh.QueryServer(ip)
|
||||
}
|
||||
lh.RLock()
|
||||
if v, ok := lh.addrMap[ip]; ok {
|
||||
@@ -445,30 +461,14 @@ func (lh *LightHouse) Query(ip iputil.VpnIp, f EncWriter) *RemoteList {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This is asynchronous so no reply should be expected
|
||||
func (lh *LightHouse) QueryServer(ip iputil.VpnIp, f EncWriter) {
|
||||
if lh.amLighthouse {
|
||||
// QueryServer is asynchronous so no reply should be expected
|
||||
func (lh *LightHouse) QueryServer(ip iputil.VpnIp) {
|
||||
// Don't put lighthouse ips in the query channel because we can't query lighthouses about lighthouses
|
||||
if lh.amLighthouse || lh.IsLighthouseIP(ip) {
|
||||
return
|
||||
}
|
||||
|
||||
if lh.IsLighthouseIP(ip) {
|
||||
return
|
||||
}
|
||||
|
||||
// Send a query to the lighthouses and hope for the best next time
|
||||
query, err := NewLhQueryByInt(ip).Marshal()
|
||||
if err != nil {
|
||||
lh.l.WithError(err).WithField("vpnIp", ip).Error("Failed to marshal lighthouse query payload")
|
||||
return
|
||||
}
|
||||
|
||||
lighthouses := lh.GetLighthouses()
|
||||
lh.metricTx(NebulaMeta_HostQuery, int64(len(lighthouses)))
|
||||
nb := make([]byte, 12, 12)
|
||||
out := make([]byte, mtu)
|
||||
for n := range lighthouses {
|
||||
f.SendMessageToVpnIp(header.LightHouse, 0, n, query, nb, out)
|
||||
}
|
||||
lh.queryChan <- ip
|
||||
}
|
||||
|
||||
func (lh *LightHouse) QueryCache(ip iputil.VpnIp) *RemoteList {
|
||||
@@ -583,11 +583,10 @@ func (lh *LightHouse) addCalculatedRemotes(vpnIp iputil.VpnIp) bool {
|
||||
if tree == nil {
|
||||
return false
|
||||
}
|
||||
value := tree.MostSpecificContains(vpnIp)
|
||||
if value == nil {
|
||||
ok, calculatedRemotes := tree.MostSpecificContains(vpnIp)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
calculatedRemotes := value.([]*calculatedRemote)
|
||||
|
||||
var calculated []*Ip4AndPort
|
||||
for _, cr := range calculatedRemotes {
|
||||
@@ -742,33 +741,73 @@ func NewUDPAddrFromLH6(ipp *Ip6AndPort) *udp.Addr {
|
||||
return udp.NewAddr(lhIp6ToIp(ipp), uint16(ipp.Port))
|
||||
}
|
||||
|
||||
func (lh *LightHouse) LhUpdateWorker(ctx context.Context, f EncWriter) {
|
||||
lh.updateParentCtx = ctx
|
||||
lh.updateUdp = f
|
||||
func (lh *LightHouse) startQueryWorker() {
|
||||
if lh.amLighthouse {
|
||||
return
|
||||
}
|
||||
|
||||
go func() {
|
||||
nb := make([]byte, 12, 12)
|
||||
out := make([]byte, mtu)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-lh.ctx.Done():
|
||||
return
|
||||
case ip := <-lh.queryChan:
|
||||
lh.innerQueryServer(ip, nb, out)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (lh *LightHouse) innerQueryServer(ip iputil.VpnIp, nb, out []byte) {
|
||||
if lh.IsLighthouseIP(ip) {
|
||||
return
|
||||
}
|
||||
|
||||
// Send a query to the lighthouses and hope for the best next time
|
||||
query, err := NewLhQueryByInt(ip).Marshal()
|
||||
if err != nil {
|
||||
lh.l.WithError(err).WithField("vpnIp", ip).Error("Failed to marshal lighthouse query payload")
|
||||
return
|
||||
}
|
||||
|
||||
lighthouses := lh.GetLighthouses()
|
||||
lh.metricTx(NebulaMeta_HostQuery, int64(len(lighthouses)))
|
||||
|
||||
for n := range lighthouses {
|
||||
lh.ifce.SendMessageToVpnIp(header.LightHouse, 0, n, query, nb, out)
|
||||
}
|
||||
}
|
||||
|
||||
func (lh *LightHouse) StartUpdateWorker() {
|
||||
interval := lh.GetUpdateInterval()
|
||||
if lh.amLighthouse || interval == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
clockSource := time.NewTicker(time.Second * time.Duration(interval))
|
||||
updateCtx, cancel := context.WithCancel(ctx)
|
||||
updateCtx, cancel := context.WithCancel(lh.ctx)
|
||||
lh.updateCancel = cancel
|
||||
defer clockSource.Stop()
|
||||
|
||||
for {
|
||||
lh.SendUpdate(f)
|
||||
go func() {
|
||||
defer clockSource.Stop()
|
||||
|
||||
select {
|
||||
case <-updateCtx.Done():
|
||||
return
|
||||
case <-clockSource.C:
|
||||
continue
|
||||
for {
|
||||
lh.SendUpdate()
|
||||
|
||||
select {
|
||||
case <-updateCtx.Done():
|
||||
return
|
||||
case <-clockSource.C:
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (lh *LightHouse) SendUpdate(f EncWriter) {
|
||||
func (lh *LightHouse) SendUpdate() {
|
||||
var v4 []*Ip4AndPort
|
||||
var v6 []*Ip6AndPort
|
||||
|
||||
@@ -821,7 +860,7 @@ func (lh *LightHouse) SendUpdate(f EncWriter) {
|
||||
}
|
||||
|
||||
for vpnIp := range lighthouses {
|
||||
f.SendMessageToVpnIp(header.LightHouse, 0, vpnIp, mm, nb, out)
|
||||
lh.ifce.SendMessageToVpnIp(header.LightHouse, 0, vpnIp, mm, nb, out)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/slackhq/nebula/test"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
//TODO: Add a test to ensure udpAddr is copied and not reused
|
||||
@@ -65,6 +66,35 @@ func Test_lhStaticMapping(t *testing.T) {
|
||||
assert.EqualError(t, err, "lighthouse 10.128.0.3 does not have a static_host_map entry")
|
||||
}
|
||||
|
||||
func TestReloadLighthouseInterval(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
_, myVpnNet, _ := net.ParseCIDR("10.128.0.1/16")
|
||||
lh1 := "10.128.0.2"
|
||||
|
||||
c := config.NewC(l)
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||
"hosts": []interface{}{lh1},
|
||||
"interval": "1s",
|
||||
}
|
||||
|
||||
c.Settings["static_host_map"] = map[interface{}]interface{}{lh1: []interface{}{"1.1.1.1:4242"}}
|
||||
lh, err := NewLightHouseFromConfig(context.Background(), l, c, myVpnNet, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
lh.ifce = &mockEncWriter{}
|
||||
|
||||
// The first one routine is kicked off by main.go currently, lets make sure that one dies
|
||||
c.ReloadConfigString("lighthouse:\n interval: 5")
|
||||
assert.Equal(t, int64(5), lh.interval.Load())
|
||||
|
||||
// Subsequent calls are killed off by the LightHouse.Reload function
|
||||
c.ReloadConfigString("lighthouse:\n interval: 10")
|
||||
assert.Equal(t, int64(10), lh.interval.Load())
|
||||
|
||||
// If this completes then nothing is stealing our reload routine
|
||||
c.ReloadConfigString("lighthouse:\n interval: 11")
|
||||
assert.Equal(t, int64(11), lh.interval.Load())
|
||||
}
|
||||
|
||||
func BenchmarkLighthouseHandleRequest(b *testing.B) {
|
||||
l := test.NewLogger()
|
||||
_, myVpnNet, _ := net.ParseCIDR("10.128.0.1/0")
|
||||
@@ -242,8 +272,17 @@ func TestLighthouse_reload(t *testing.T) {
|
||||
lh, err := NewLightHouseFromConfig(context.Background(), l, c, &net.IPNet{IP: net.IP{10, 128, 0, 1}, Mask: net.IPMask{255, 255, 255, 0}}, nil, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
c.Settings["static_host_map"] = map[interface{}]interface{}{"10.128.0.2": []interface{}{"1.1.1.1:4242"}}
|
||||
lh.reload(c, false)
|
||||
nc := map[interface{}]interface{}{
|
||||
"static_host_map": map[interface{}]interface{}{
|
||||
"10.128.0.2": []interface{}{"1.1.1.1:4242"},
|
||||
},
|
||||
}
|
||||
rc, err := yaml.Marshal(nc)
|
||||
assert.NoError(t, err)
|
||||
c.ReloadConfigString(string(rc))
|
||||
|
||||
err = lh.reload(c, false)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func newLHHostRequest(fromAddr *udp.Addr, myVpnIp, queryVpnIp iputil.VpnIp, lhh *LightHouseHandler) testLhReply {
|
||||
|
||||
147
main.go
147
main.go
@@ -3,7 +3,6 @@ package nebula
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
@@ -19,7 +18,7 @@ import (
|
||||
|
||||
type m map[string]interface{}
|
||||
|
||||
func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logger, tunFd *int) (retcon *Control, reterr error) {
|
||||
func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logger, deviceFactory overlay.DeviceFactory) (retcon *Control, reterr error) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
// Automatically cancel the context if Main returns an error, to signal all created goroutines to quit.
|
||||
defer func() {
|
||||
@@ -46,7 +45,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
||||
|
||||
err := configLogger(l, c)
|
||||
if err != nil {
|
||||
return nil, util.NewContextualError("Failed to configure the logger", nil, err)
|
||||
return nil, util.ContextualizeIfNeeded("Failed to configure the logger", err)
|
||||
}
|
||||
|
||||
c.RegisterReloadCallback(func(c *config.C) {
|
||||
@@ -56,36 +55,31 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
||||
}
|
||||
})
|
||||
|
||||
caPool, err := loadCAFromConfig(l, c)
|
||||
pki, err := NewPKIFromConfig(l, c)
|
||||
if err != nil {
|
||||
//The errors coming out of loadCA are already nicely formatted
|
||||
return nil, util.NewContextualError("Failed to load ca from config", nil, err)
|
||||
return nil, util.ContextualizeIfNeeded("Failed to load PKI from config", err)
|
||||
}
|
||||
l.WithField("fingerprints", caPool.GetFingerprints()).Debug("Trusted CA fingerprints")
|
||||
|
||||
cs, err := NewCertStateFromConfig(c)
|
||||
certificate := pki.GetCertState().Certificate
|
||||
fw, err := NewFirewallFromConfig(l, certificate, c)
|
||||
if err != nil {
|
||||
//The errors coming out of NewCertStateFromConfig are already nicely formatted
|
||||
return nil, util.NewContextualError("Failed to load certificate from config", nil, err)
|
||||
return nil, util.ContextualizeIfNeeded("Error while loading firewall rules", err)
|
||||
}
|
||||
l.WithField("cert", cs.certificate).Debug("Client nebula certificate")
|
||||
|
||||
fw, err := NewFirewallFromConfig(l, cs.certificate, c)
|
||||
if err != nil {
|
||||
return nil, util.NewContextualError("Error while loading firewall rules", nil, err)
|
||||
}
|
||||
l.WithField("firewallHash", fw.GetRuleHash()).Info("Firewall started")
|
||||
l.WithField("firewallHashes", fw.GetRuleHashes()).Info("Firewall started")
|
||||
|
||||
// TODO: make sure mask is 4 bytes
|
||||
tunCidr := cs.certificate.Details.Ips[0]
|
||||
tunCidr := certificate.Details.Ips[0]
|
||||
|
||||
ssh, err := sshd.NewSSHServer(l.WithField("subsystem", "sshd"))
|
||||
if err != nil {
|
||||
return nil, util.ContextualizeIfNeeded("Error while creating SSH server", err)
|
||||
}
|
||||
wireSSHReload(l, ssh, c)
|
||||
var sshStart func()
|
||||
if c.GetBool("sshd.enabled", false) {
|
||||
sshStart, err = configSSH(l, ssh, c)
|
||||
if err != nil {
|
||||
return nil, util.NewContextualError("Error while configuring the sshd", nil, err)
|
||||
return nil, util.ContextualizeIfNeeded("Error while configuring the sshd", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -134,9 +128,13 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
||||
if !configTest {
|
||||
c.CatchHUP(ctx)
|
||||
|
||||
tun, err = overlay.NewDeviceFromConfig(c, l, tunCidr, tunFd, routines)
|
||||
if deviceFactory == nil {
|
||||
deviceFactory = overlay.NewDeviceFromConfig
|
||||
}
|
||||
|
||||
tun, err = deviceFactory(c, l, tunCidr, routines)
|
||||
if err != nil {
|
||||
return nil, util.NewContextualError("Failed to get a tun/tap device", nil, err)
|
||||
return nil, util.ContextualizeIfNeeded("Failed to get a tun/tap device", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
@@ -147,7 +145,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
||||
}
|
||||
|
||||
// set up our UDP listener
|
||||
udpConns := make([]*udp.Conn, routines)
|
||||
udpConns := make([]udp.Conn, routines)
|
||||
port := c.GetInt("listen.port", 0)
|
||||
|
||||
if !configTest {
|
||||
@@ -160,78 +158,36 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
||||
} else {
|
||||
listenHost, err = net.ResolveIPAddr("ip", rawListenHost)
|
||||
if err != nil {
|
||||
return nil, util.NewContextualError("Failed to resolve listen.host", nil, err)
|
||||
return nil, util.ContextualizeIfNeeded("Failed to resolve listen.host", err)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < routines; i++ {
|
||||
l.Infof("listening %q %d", listenHost.IP, port)
|
||||
udpServer, err := udp.NewListener(l, listenHost.IP, port, routines > 1, c.GetInt("listen.batch", 64))
|
||||
if err != nil {
|
||||
return nil, util.NewContextualError("Failed to open udp listener", m{"queue": i}, err)
|
||||
}
|
||||
udpServer.ReloadConfig(c)
|
||||
udpConns[i] = udpServer
|
||||
}
|
||||
}
|
||||
|
||||
// Set up my internal host map
|
||||
var preferredRanges []*net.IPNet
|
||||
rawPreferredRanges := c.GetStringSlice("preferred_ranges", []string{})
|
||||
// First, check if 'preferred_ranges' is set and fallback to 'local_range'
|
||||
if len(rawPreferredRanges) > 0 {
|
||||
for _, rawPreferredRange := range rawPreferredRanges {
|
||||
_, preferredRange, err := net.ParseCIDR(rawPreferredRange)
|
||||
if err != nil {
|
||||
return nil, util.NewContextualError("Failed to parse preferred ranges", nil, err)
|
||||
}
|
||||
preferredRanges = append(preferredRanges, preferredRange)
|
||||
}
|
||||
}
|
||||
|
||||
// local_range was superseded by preferred_ranges. If it is still present,
|
||||
// merge the local_range setting into preferred_ranges. We will probably
|
||||
// deprecate local_range and remove in the future.
|
||||
rawLocalRange := c.GetString("local_range", "")
|
||||
if rawLocalRange != "" {
|
||||
_, localRange, err := net.ParseCIDR(rawLocalRange)
|
||||
if err != nil {
|
||||
return nil, util.NewContextualError("Failed to parse local_range", nil, err)
|
||||
}
|
||||
|
||||
// Check if the entry for local_range was already specified in
|
||||
// preferred_ranges. Don't put it into the slice twice if so.
|
||||
var found bool
|
||||
for _, r := range preferredRanges {
|
||||
if r.String() == localRange.String() {
|
||||
found = true
|
||||
break
|
||||
// If port is dynamic, discover it before the next pass through the for loop
|
||||
// This way all routines will use the same port correctly
|
||||
if port == 0 {
|
||||
uPort, err := udpServer.LocalAddr()
|
||||
if err != nil {
|
||||
return nil, util.NewContextualError("Failed to get listening port", nil, err)
|
||||
}
|
||||
port = int(uPort.Port)
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
preferredRanges = append(preferredRanges, localRange)
|
||||
}
|
||||
}
|
||||
|
||||
hostMap := NewHostMap(l, "main", tunCidr, preferredRanges)
|
||||
hostMap.metricsEnabled = c.GetBool("stats.message_metrics", false)
|
||||
|
||||
l.
|
||||
WithField("network", hostMap.vpnCIDR.String()).
|
||||
WithField("preferredRanges", hostMap.preferredRanges).
|
||||
Info("Main HostMap created")
|
||||
|
||||
/*
|
||||
config.SetDefault("promoter.interval", 10)
|
||||
go hostMap.Promoter(config.GetInt("promoter.interval"))
|
||||
*/
|
||||
|
||||
hostMap := NewHostMapFromConfig(l, tunCidr, c)
|
||||
punchy := NewPunchyFromConfig(l, c)
|
||||
lightHouse, err := NewLightHouseFromConfig(ctx, l, c, tunCidr, udpConns[0], punchy)
|
||||
switch {
|
||||
case errors.As(err, &util.ContextualError{}):
|
||||
return nil, err
|
||||
case err != nil:
|
||||
return nil, util.NewContextualError("Failed to initialize lighthouse handler", nil, err)
|
||||
if err != nil {
|
||||
return nil, util.ContextualizeIfNeeded("Failed to initialize lighthouse handler", err)
|
||||
}
|
||||
|
||||
var messageMetrics *MessageMetrics
|
||||
@@ -252,13 +208,9 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
||||
messageMetrics: messageMetrics,
|
||||
}
|
||||
|
||||
handshakeManager := NewHandshakeManager(l, tunCidr, preferredRanges, hostMap, lightHouse, udpConns[0], handshakeConfig)
|
||||
handshakeManager := NewHandshakeManager(l, hostMap, lightHouse, udpConns[0], handshakeConfig)
|
||||
lightHouse.handshakeTrigger = handshakeManager.trigger
|
||||
|
||||
//TODO: These will be reused for psk
|
||||
//handshakeMACKey := config.GetString("handshake_mac.key", "")
|
||||
//handshakeAcceptedMACKeys := config.GetStringSlice("handshake_mac.accepted_keys", []string{})
|
||||
|
||||
serveDns := false
|
||||
if c.GetBool("lighthouse.serve_dns", false) {
|
||||
if c.GetBool("lighthouse.am_lighthouse", false) {
|
||||
@@ -270,11 +222,12 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
||||
|
||||
checkInterval := c.GetInt("timers.connection_alive_interval", 5)
|
||||
pendingDeletionInterval := c.GetInt("timers.pending_deletion_interval", 10)
|
||||
|
||||
ifConfig := &InterfaceConfig{
|
||||
HostMap: hostMap,
|
||||
Inside: tun,
|
||||
Outside: udpConns[0],
|
||||
certState: cs,
|
||||
pki: pki,
|
||||
Cipher: c.GetString("cipher", "aes"),
|
||||
Firewall: fw,
|
||||
ServeDns: serveDns,
|
||||
@@ -282,13 +235,14 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
||||
lightHouse: lightHouse,
|
||||
checkInterval: time.Second * time.Duration(checkInterval),
|
||||
pendingDeletionInterval: time.Second * time.Duration(pendingDeletionInterval),
|
||||
tryPromoteEvery: c.GetUint32("counters.try_promote", defaultPromoteEvery),
|
||||
reQueryEvery: c.GetUint32("counters.requery_every_packets", defaultReQueryEvery),
|
||||
reQueryWait: c.GetDuration("timers.requery_wait_duration", defaultReQueryWait),
|
||||
DropLocalBroadcast: c.GetBool("tun.drop_local_broadcast", false),
|
||||
DropMulticast: c.GetBool("tun.drop_multicast", false),
|
||||
routines: routines,
|
||||
MessageMetrics: messageMetrics,
|
||||
version: buildVersion,
|
||||
caPool: caPool,
|
||||
disconnectInvalid: c.GetBool("pki.disconnect_invalid", false),
|
||||
relayManager: NewRelayManager(ctx, l, hostMap, c),
|
||||
punchy: punchy,
|
||||
|
||||
@@ -315,21 +269,21 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
||||
// TODO: Better way to attach these, probably want a new interface in InterfaceConfig
|
||||
// I don't want to make this initial commit too far-reaching though
|
||||
ifce.writers = udpConns
|
||||
lightHouse.ifce = ifce
|
||||
|
||||
ifce.RegisterConfigChangeCallbacks(c)
|
||||
|
||||
ifce.reloadDisconnectInvalid(c)
|
||||
ifce.reloadSendRecvError(c)
|
||||
|
||||
go handshakeManager.Run(ctx, ifce)
|
||||
go lightHouse.LhUpdateWorker(ctx, ifce)
|
||||
handshakeManager.f = ifce
|
||||
go handshakeManager.Run(ctx)
|
||||
}
|
||||
|
||||
// TODO - stats third-party modules start uncancellable goroutines. Update those libs to accept
|
||||
// a context so that they can exit when the context is Done.
|
||||
statsStart, err := startStats(l, c, buildVersion, configTest)
|
||||
|
||||
if err != nil {
|
||||
return nil, util.NewContextualError("Failed to start stats emitter", nil, err)
|
||||
return nil, util.ContextualizeIfNeeded("Failed to start stats emitter", err)
|
||||
}
|
||||
|
||||
if configTest {
|
||||
@@ -339,7 +293,7 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
||||
//TODO: check if we _should_ be emitting stats
|
||||
go ifce.emitStats(ctx, c.GetDuration("stats.interval", time.Second*10))
|
||||
|
||||
attachCommands(l, c, ssh, hostMap, handshakeManager.pendingHostMap, lightHouse, ifce)
|
||||
attachCommands(l, c, ssh, ifce)
|
||||
|
||||
// Start DNS server last to allow using the nebula IP as lighthouse.dns.host
|
||||
var dnsStart func()
|
||||
@@ -348,5 +302,14 @@ func Main(c *config.C, configTest bool, buildVersion string, logger *logrus.Logg
|
||||
dnsStart = dnsMain(l, hostMap, c)
|
||||
}
|
||||
|
||||
return &Control{ifce, l, cancel, sshStart, statsStart, dnsStart}, nil
|
||||
return &Control{
|
||||
ifce,
|
||||
l,
|
||||
ctx,
|
||||
cancel,
|
||||
sshStart,
|
||||
statsStart,
|
||||
dnsStart,
|
||||
lightHouse.StartUpdateWorker,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@ func (c nistCurve) DH(privkey, pubkey []byte) ([]byte, error) {
|
||||
}
|
||||
ecdhPrivKey, err := c.curve.NewPrivateKey(privkey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to unmarshal pubkey: %w", err)
|
||||
return nil, fmt.Errorf("unable to unmarshal private key: %w", err)
|
||||
}
|
||||
|
||||
return ecdhPrivKey.ECDH(ecdhPubKey)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user