mirror of
https://github.com/slackhq/nebula.git
synced 2025-11-22 16:34:25 +01:00
Compare commits
162 Commits
v1.7.2
...
changelog-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
824cd3f0d6 | ||
|
|
9f692175e1 | ||
|
|
22af56f156 | ||
|
|
1d73e463cd | ||
|
|
105e0ec66c | ||
|
|
4870bb680d | ||
|
|
a1498ca8f8 | ||
|
|
9877648da9 | ||
|
|
8e0a7bcbb7 | ||
|
|
8c29b15c6d | ||
|
|
04d7a8ccba | ||
|
|
b55b9019a7 | ||
|
|
2e85d138cd | ||
|
|
9bfdfbafc1 | ||
|
|
ab81b62ea0 | ||
|
|
45bbad2f21 | ||
|
|
3dc56e1184 | ||
|
|
0736cfa562 | ||
|
|
248cf194cd | ||
|
|
8a6a0f0636 | ||
|
|
f5f6c269ac | ||
|
|
9a63fa0a07 | ||
|
|
e264a0ff88 | ||
|
|
00458302ca | ||
|
|
e6009b8491 | ||
|
|
b9aace1e58 | ||
|
|
a76723eaf5 | ||
|
|
8109cf2170 | ||
|
|
97e9834f82 | ||
|
|
506ba5ab5b | ||
|
|
d372df56ab | ||
|
|
40cfd00e87 | ||
|
|
b14bad586a | ||
|
|
4c066d8c32 | ||
|
|
249ae41fec | ||
|
|
d9cae9e062 | ||
|
|
a92056a7db | ||
|
|
4eb1da0958 | ||
|
|
50b24c102e | ||
|
|
c0130f8161 | ||
|
|
f19a28645e | ||
|
|
fd1906b16f | ||
|
|
d6e4b88bb5 | ||
|
|
18f69af455 | ||
|
|
aa18d7fa4f | ||
|
|
b5c3486796 | ||
|
|
f39bfbb7fa | ||
|
|
4f4941e187 | ||
|
|
5f17db5dfa | ||
|
|
f31bab5f1a | ||
|
|
9cd944d320 | ||
|
|
f7db0eb5cc | ||
|
|
7e7d5e00ca | ||
|
|
24f336ec56 | ||
|
|
d7f52dec41 | ||
|
|
e54f9dd206 | ||
|
|
df78158cfa | ||
|
|
8b55caa15e | ||
|
|
7ed9f2a688 | ||
|
|
3aca576b07 | ||
|
|
a99618e95c | ||
|
|
8e94eb974e | ||
|
|
41e2e1de02 | ||
|
|
d95fb4a314 | ||
|
|
cdcea00669 | ||
|
|
9bd92a7fc2 | ||
|
|
a5a07cc760 | ||
|
|
c1711bc9c5 | ||
|
|
7efa750aef | ||
|
|
a390125935 | ||
|
|
bbb15f8cb1 | ||
|
|
8b68a08723 | ||
|
|
f8fb9759e9 | ||
|
|
1f1d660200 | ||
|
|
279265058f | ||
|
|
2a778de07e | ||
|
|
2affd371e3 | ||
|
|
cc8b3cc961 | ||
|
|
f346cf4109 | ||
|
|
8f44f22c37 | ||
|
|
8822f1366c | ||
|
|
e3f5a129c1 | ||
|
|
0f0534d739 | ||
|
|
c5a403b7a8 | ||
|
|
f23d328561 | ||
|
|
a977ee653d | ||
|
|
1f83d1758d | ||
|
|
3210198276 | ||
|
|
0cef634635 | ||
|
|
637dc18bf8 | ||
|
|
ea36949d8a | ||
|
|
0564d0a2cf | ||
|
|
b22ba6eb49 | ||
|
|
3a221812f6 | ||
|
|
927ff4cc03 | ||
|
|
e5945a60aa | ||
|
|
072edd56b3 | ||
|
|
beb5f6bddc | ||
|
|
8be9792059 | ||
|
|
af2fc48378 | ||
|
|
1d2f95e718 | ||
|
|
3a8743d511 | ||
|
|
0209402942 | ||
|
|
fb55f5b762 | ||
|
|
01cddb8013 | ||
|
|
1083279a45 | ||
|
|
fe16ea566d | ||
|
|
3356e03d85 | ||
|
|
f41db52560 | ||
|
|
5181cb0474 | ||
|
|
a44e1b8b05 | ||
|
|
276978377a | ||
|
|
777eb96aea | ||
|
|
0912ef14f4 | ||
|
|
77a8ce1712 | ||
|
|
87b628ba24 | ||
|
|
50d6a1e8ca | ||
|
|
e78fe0b9ef | ||
|
|
5fccbb8676 | ||
|
|
c289c7a7ca | ||
|
|
e3fbfbfd4d | ||
|
|
282ca4368e | ||
|
|
280fa026ea | ||
|
|
dbdb48f182 | ||
|
|
f7e392995a | ||
|
|
d271df8da8 | ||
|
|
eea5e6a5df | ||
|
|
790268a176 | ||
|
|
06b480e177 | ||
|
|
076ebc6c6e | ||
|
|
7edcf620c0 | ||
|
|
5a131b2975 | ||
|
|
223cc6e660 | ||
|
|
5671c6607c | ||
|
|
7ecafbe61d | ||
|
|
546eb3bfbc | ||
|
|
7364d99e34 | ||
|
|
83b6dc7b16 | ||
|
|
3d0da7c859 | ||
|
|
ed00f5d530 | ||
|
|
38e56a4858 | ||
|
|
fce93ccb54 | ||
|
|
0d715effbc | ||
|
|
0c003b64f1 | ||
|
|
14d0106716 | ||
|
|
959b015b3b | ||
|
|
0bffa76b5e | ||
|
|
03e70210a5 | ||
|
|
9c6592b159 | ||
|
|
e5af94e27a | ||
|
|
96f51f78ea | ||
|
|
a10baeee92 | ||
|
|
52c9e360e7 | ||
|
|
8caaff7109 | ||
|
|
1e3c155896 | ||
|
|
f5db03c834 | ||
|
|
c5ce945852 | ||
|
|
7e380bde7e | ||
|
|
a3e59a38ef | ||
|
|
8ba5d64dbc | ||
|
|
3bbf5f4e67 | ||
|
|
928731acfe |
22
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
22
.github/ISSUE_TEMPLATE/bug-report.yml
vendored
@@ -8,13 +8,13 @@ body:
|
||||
attributes:
|
||||
value: |
|
||||
### Thank you for taking the time to file a bug report!
|
||||
|
||||
|
||||
Please fill out this form as completely as possible.
|
||||
|
||||
- type: input
|
||||
id: version
|
||||
attributes:
|
||||
label: What version of `nebula` are you using?
|
||||
label: What version of `nebula` are you using? (`nebula -version`)
|
||||
placeholder: 0.0.0
|
||||
validations:
|
||||
required: true
|
||||
@@ -41,10 +41,17 @@ body:
|
||||
attributes:
|
||||
label: Logs from affected hosts
|
||||
description: |
|
||||
Provide logs from all affected hosts during the time of the issue.
|
||||
Please provide logs from ALL affected hosts during the time of the issue. If you do not provide logs we will be unable to assist you!
|
||||
|
||||
[Learn how to find Nebula logs here.](https://nebula.defined.net/docs/guides/viewing-nebula-logs/)
|
||||
|
||||
Improve formatting by using <code>```</code> at the beginning and end of each log block.
|
||||
value: |
|
||||
```
|
||||
|
||||
```
|
||||
validations:
|
||||
required: false
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: configs
|
||||
@@ -52,6 +59,11 @@ body:
|
||||
label: Config files from affected hosts
|
||||
description: |
|
||||
Provide config files for all affected hosts.
|
||||
|
||||
Improve formatting by using <code>```</code> at the beginning and end of each config file.
|
||||
value: |
|
||||
```
|
||||
|
||||
```
|
||||
validations:
|
||||
required: false
|
||||
required: true
|
||||
|
||||
22
.github/dependabot.yml
vendored
Normal file
22
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
|
||||
- package-ecosystem: "gomod"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
groups:
|
||||
golang-x-dependencies:
|
||||
patterns:
|
||||
- "golang.org/x/*"
|
||||
zx2c4-dependencies:
|
||||
patterns:
|
||||
- "golang.zx2c4.com/*"
|
||||
protobuf-dependencies:
|
||||
patterns:
|
||||
- "github.com/golang/protobuf"
|
||||
- "google.golang.org/protobuf"
|
||||
6
.github/workflows/gofmt.yml
vendored
6
.github/workflows/gofmt.yml
vendored
@@ -14,11 +14,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
|
||||
- name: Install goimports
|
||||
|
||||
284
.github/workflows/release.yml
vendored
284
.github/workflows/release.yml
vendored
@@ -7,24 +7,24 @@ name: Create release and upload binaries
|
||||
|
||||
jobs:
|
||||
build-linux:
|
||||
name: Build Linux All
|
||||
name: Build Linux/BSD All
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" release-linux release-freebsd
|
||||
make BUILD_NUMBER="${GITHUB_REF#refs/tags/v}" release-linux release-freebsd release-openbsd release-netbsd
|
||||
mkdir release
|
||||
mv build/*.tar.gz release
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: linux-latest
|
||||
path: release
|
||||
@@ -33,11 +33,11 @@ jobs:
|
||||
name: Build Windows
|
||||
runs-on: windows-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
|
||||
- name: Build
|
||||
@@ -55,7 +55,7 @@ jobs:
|
||||
mv dist\windows\wintun build\dist\windows\
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: windows-latest
|
||||
path: build
|
||||
@@ -64,18 +64,18 @@ jobs:
|
||||
name: Build Universal Darwin
|
||||
env:
|
||||
HAS_SIGNING_CREDS: ${{ secrets.AC_USERNAME != '' }}
|
||||
runs-on: macos-11
|
||||
runs-on: macos-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
|
||||
- name: Import certificates
|
||||
if: env.HAS_SIGNING_CREDS == 'true'
|
||||
uses: Apple-Actions/import-codesign-certs@v1
|
||||
uses: Apple-Actions/import-codesign-certs@v3
|
||||
with:
|
||||
p12-file-base64: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_P12_BASE64 }}
|
||||
p12-password: ${{ secrets.APPLE_DEVELOPER_CERTIFICATE_PASSWORD }}
|
||||
@@ -104,22 +104,72 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: darwin-latest
|
||||
path: ./release/*
|
||||
|
||||
build-docker:
|
||||
name: Create and Upload Docker Images
|
||||
# Technically we only need build-linux to succeed, but if any platforms fail we'll
|
||||
# want to investigate and restart the build
|
||||
needs: [build-linux, build-darwin, build-windows]
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
HAS_DOCKER_CREDS: ${{ vars.DOCKERHUB_USERNAME != '' && secrets.DOCKERHUB_TOKEN != '' }}
|
||||
# XXX It's not possible to write a conditional here, so instead we do it on every step
|
||||
#if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
||||
steps:
|
||||
# Be sure to checkout the code before downloading artifacts, or they will
|
||||
# be overwritten
|
||||
- name: Checkout code
|
||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download artifacts
|
||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: linux-latest
|
||||
path: artifacts
|
||||
|
||||
- name: Login to Docker Hub
|
||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build and push images
|
||||
if: ${{ env.HAS_DOCKER_CREDS == 'true' }}
|
||||
env:
|
||||
DOCKER_IMAGE_REPO: ${{ vars.DOCKER_IMAGE_REPO || 'nebulaoss/nebula' }}
|
||||
DOCKER_IMAGE_TAG: ${{ vars.DOCKER_IMAGE_TAG || 'latest' }}
|
||||
run: |
|
||||
mkdir -p build/linux-{amd64,arm64}
|
||||
tar -zxvf artifacts/nebula-linux-amd64.tar.gz -C build/linux-amd64/
|
||||
tar -zxvf artifacts/nebula-linux-arm64.tar.gz -C build/linux-arm64/
|
||||
docker buildx build . --push -f docker/Dockerfile --platform linux/amd64,linux/arm64 --tag "${DOCKER_IMAGE_REPO}:${DOCKER_IMAGE_TAG}" --tag "${DOCKER_IMAGE_REPO}:${GITHUB_REF#refs/tags/v}"
|
||||
|
||||
release:
|
||||
name: Create and Upload Release
|
||||
needs: [build-linux, build-darwin, build-windows]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v2
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Zip Windows
|
||||
run: |
|
||||
cd windows-latest
|
||||
cd artifacts/windows-latest
|
||||
cp windows-amd64/* .
|
||||
zip -r nebula-windows-amd64.zip nebula.exe nebula-cert.exe dist
|
||||
cp windows-arm64/* .
|
||||
@@ -127,6 +177,7 @@ jobs:
|
||||
|
||||
- name: Create sha256sum
|
||||
run: |
|
||||
cd artifacts
|
||||
for dir in linux-latest darwin-latest windows-latest
|
||||
do
|
||||
(
|
||||
@@ -156,195 +207,12 @@ jobs:
|
||||
|
||||
- name: Create Release
|
||||
id: create_release
|
||||
uses: actions/create-release@v1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
tag_name: ${{ github.ref }}
|
||||
release_name: Release ${{ github.ref }}
|
||||
draft: false
|
||||
prerelease: false
|
||||
|
||||
##
|
||||
## Upload assets (I wish we could just upload the whole folder at once...
|
||||
##
|
||||
|
||||
- name: Upload SHASUM256.txt
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./SHASUM256.txt
|
||||
asset_name: SHASUM256.txt
|
||||
asset_content_type: text/plain
|
||||
|
||||
- name: Upload darwin zip
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./darwin-latest/nebula-darwin.zip
|
||||
asset_name: nebula-darwin.zip
|
||||
asset_content_type: application/zip
|
||||
|
||||
- name: Upload windows-amd64
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./windows-latest/nebula-windows-amd64.zip
|
||||
asset_name: nebula-windows-amd64.zip
|
||||
asset_content_type: application/zip
|
||||
|
||||
- name: Upload windows-arm64
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./windows-latest/nebula-windows-arm64.zip
|
||||
asset_name: nebula-windows-arm64.zip
|
||||
asset_content_type: application/zip
|
||||
|
||||
- name: Upload linux-amd64
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-amd64.tar.gz
|
||||
asset_name: nebula-linux-amd64.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-386
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-386.tar.gz
|
||||
asset_name: nebula-linux-386.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-ppc64le
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-ppc64le.tar.gz
|
||||
asset_name: nebula-linux-ppc64le.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-arm-5
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-arm-5.tar.gz
|
||||
asset_name: nebula-linux-arm-5.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-arm-6
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-arm-6.tar.gz
|
||||
asset_name: nebula-linux-arm-6.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-arm-7
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-arm-7.tar.gz
|
||||
asset_name: nebula-linux-arm-7.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-arm64
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-arm64.tar.gz
|
||||
asset_name: nebula-linux-arm64.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-mips
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-mips.tar.gz
|
||||
asset_name: nebula-linux-mips.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-mipsle
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-mipsle.tar.gz
|
||||
asset_name: nebula-linux-mipsle.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-mips64
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-mips64.tar.gz
|
||||
asset_name: nebula-linux-mips64.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-mips64le
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-mips64le.tar.gz
|
||||
asset_name: nebula-linux-mips64le.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-mips-softfloat
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-mips-softfloat.tar.gz
|
||||
asset_name: nebula-linux-mips-softfloat.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload linux-riscv64
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-linux-riscv64.tar.gz
|
||||
asset_name: nebula-linux-riscv64.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
|
||||
- name: Upload freebsd-amd64
|
||||
uses: actions/upload-release-asset@v1.0.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
with:
|
||||
upload_url: ${{ steps.create_release.outputs.upload_url }}
|
||||
asset_path: ./linux-latest/nebula-freebsd-amd64.tar.gz
|
||||
asset_name: nebula-freebsd-amd64.tar.gz
|
||||
asset_content_type: application/gzip
|
||||
run: |
|
||||
cd artifacts
|
||||
gh release create \
|
||||
--verify-tag \
|
||||
--title "Release ${{ github.ref_name }}" \
|
||||
"${{ github.ref_name }}" \
|
||||
SHASUM256.txt *-latest/*.zip *-latest/*.tar.gz
|
||||
|
||||
48
.github/workflows/smoke-extra.yml
vendored
Normal file
48
.github/workflows/smoke-extra.yml
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
name: smoke-extra
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
pull_request:
|
||||
types: [opened, synchronize, labeled, reopened]
|
||||
paths:
|
||||
- '.github/workflows/smoke**'
|
||||
- '**Makefile'
|
||||
- '**.go'
|
||||
- '**.proto'
|
||||
- 'go.mod'
|
||||
- 'go.sum'
|
||||
jobs:
|
||||
|
||||
smoke-extra:
|
||||
if: github.ref == 'refs/heads/master' || contains(github.event.pull_request.labels.*.name, 'smoke-test-extra')
|
||||
name: Run extra smoke tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
check-latest: true
|
||||
|
||||
- name: install vagrant
|
||||
run: sudo apt-get update && sudo apt-get install -y vagrant virtualbox
|
||||
|
||||
- name: freebsd-amd64
|
||||
run: make smoke-vagrant/freebsd-amd64
|
||||
|
||||
- name: openbsd-amd64
|
||||
run: make smoke-vagrant/openbsd-amd64
|
||||
|
||||
- name: netbsd-amd64
|
||||
run: make smoke-vagrant/netbsd-amd64
|
||||
|
||||
- name: linux-386
|
||||
run: make smoke-vagrant/linux-386
|
||||
|
||||
- name: linux-amd64-ipv6disable
|
||||
run: make smoke-vagrant/linux-amd64-ipv6disable
|
||||
|
||||
timeout-minutes: 30
|
||||
8
.github/workflows/smoke.yml
vendored
8
.github/workflows/smoke.yml
vendored
@@ -18,15 +18,15 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
|
||||
- name: build
|
||||
run: make bin-docker
|
||||
run: make bin-docker CGO_ENABLED=1 BUILD_ARGS=-race
|
||||
|
||||
- name: setup docker image
|
||||
working-directory: ./.github/workflows/smoke
|
||||
|
||||
2
.github/workflows/smoke/build-relay.sh
vendored
2
.github/workflows/smoke/build-relay.sh
vendored
@@ -41,4 +41,4 @@ EOF
|
||||
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
|
||||
)
|
||||
|
||||
sudo docker build -t nebula:smoke-relay .
|
||||
docker build -t nebula:smoke-relay .
|
||||
|
||||
7
.github/workflows/smoke/build.sh
vendored
7
.github/workflows/smoke/build.sh
vendored
@@ -11,6 +11,11 @@ mkdir ./build
|
||||
cp ../../../../build/linux-amd64/nebula .
|
||||
cp ../../../../build/linux-amd64/nebula-cert .
|
||||
|
||||
if [ "$1" ]
|
||||
then
|
||||
cp "../../../../build/$1/nebula" "$1-nebula"
|
||||
fi
|
||||
|
||||
HOST="lighthouse1" \
|
||||
AM_LIGHTHOUSE=true \
|
||||
../genconfig.sh >lighthouse1.yml
|
||||
@@ -36,4 +41,4 @@ mkdir ./build
|
||||
../../../../nebula-cert sign -name "host4" -groups "host,host4" -ip "192.168.100.4/24"
|
||||
)
|
||||
|
||||
sudo docker build -t "nebula:${NAME:-smoke}" .
|
||||
docker build -t "nebula:${NAME:-smoke}" .
|
||||
|
||||
2
.github/workflows/smoke/genconfig.sh
vendored
2
.github/workflows/smoke/genconfig.sh
vendored
@@ -47,7 +47,7 @@ listen:
|
||||
port: ${LISTEN_PORT:-4242}
|
||||
|
||||
tun:
|
||||
dev: ${TUN_DEV:-nebula1}
|
||||
dev: ${TUN_DEV:-tun0}
|
||||
|
||||
firewall:
|
||||
inbound_action: reject
|
||||
|
||||
52
.github/workflows/smoke/smoke-relay.sh
vendored
52
.github/workflows/smoke/smoke-relay.sh
vendored
@@ -14,24 +14,24 @@ cleanup() {
|
||||
set +e
|
||||
if [ "$(jobs -r)" ]
|
||||
then
|
||||
sudo docker kill lighthouse1 host2 host3 host4
|
||||
docker kill lighthouse1 host2 host3 host4
|
||||
fi
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
sudo docker run --name lighthouse1 --rm nebula:smoke-relay -config lighthouse1.yml -test
|
||||
sudo docker run --name host2 --rm nebula:smoke-relay -config host2.yml -test
|
||||
sudo docker run --name host3 --rm nebula:smoke-relay -config host3.yml -test
|
||||
sudo docker run --name host4 --rm nebula:smoke-relay -config host4.yml -test
|
||||
docker run --name lighthouse1 --rm nebula:smoke-relay -config lighthouse1.yml -test
|
||||
docker run --name host2 --rm nebula:smoke-relay -config host2.yml -test
|
||||
docker run --name host3 --rm nebula:smoke-relay -config host3.yml -test
|
||||
docker run --name host4 --rm nebula:smoke-relay -config host4.yml -test
|
||||
|
||||
sudo docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
||||
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
||||
sleep 1
|
||||
sudo docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
||||
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
||||
sleep 1
|
||||
sudo docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
||||
docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
||||
sleep 1
|
||||
sudo docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
|
||||
docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke-relay -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
|
||||
sleep 1
|
||||
|
||||
set +x
|
||||
@@ -39,44 +39,44 @@ echo
|
||||
echo " *** Testing ping from lighthouse1"
|
||||
echo
|
||||
set -x
|
||||
sudo docker exec lighthouse1 ping -c1 192.168.100.2
|
||||
sudo docker exec lighthouse1 ping -c1 192.168.100.3
|
||||
sudo docker exec lighthouse1 ping -c1 192.168.100.4
|
||||
docker exec lighthouse1 ping -c1 192.168.100.2
|
||||
docker exec lighthouse1 ping -c1 192.168.100.3
|
||||
docker exec lighthouse1 ping -c1 192.168.100.4
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host2"
|
||||
echo
|
||||
set -x
|
||||
sudo docker exec host2 ping -c1 192.168.100.1
|
||||
docker exec host2 ping -c1 192.168.100.1
|
||||
# Should fail because no relay configured in this direction
|
||||
! sudo docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
! sudo docker exec host2 ping -c1 192.168.100.4 -w5 || exit 1
|
||||
! docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
! docker exec host2 ping -c1 192.168.100.4 -w5 || exit 1
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host3"
|
||||
echo
|
||||
set -x
|
||||
sudo docker exec host3 ping -c1 192.168.100.1
|
||||
sudo docker exec host3 ping -c1 192.168.100.2
|
||||
sudo docker exec host3 ping -c1 192.168.100.4
|
||||
docker exec host3 ping -c1 192.168.100.1
|
||||
docker exec host3 ping -c1 192.168.100.2
|
||||
docker exec host3 ping -c1 192.168.100.4
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host4"
|
||||
echo
|
||||
set -x
|
||||
sudo docker exec host4 ping -c1 192.168.100.1
|
||||
docker exec host4 ping -c1 192.168.100.1
|
||||
# Should fail because relays not allowed
|
||||
! sudo docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
|
||||
sudo docker exec host4 ping -c1 192.168.100.3
|
||||
! docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
|
||||
docker exec host4 ping -c1 192.168.100.3
|
||||
|
||||
sudo docker exec host4 sh -c 'kill 1'
|
||||
sudo docker exec host3 sh -c 'kill 1'
|
||||
sudo docker exec host2 sh -c 'kill 1'
|
||||
sudo docker exec lighthouse1 sh -c 'kill 1'
|
||||
sleep 1
|
||||
docker exec host4 sh -c 'kill 1'
|
||||
docker exec host3 sh -c 'kill 1'
|
||||
docker exec host2 sh -c 'kill 1'
|
||||
docker exec lighthouse1 sh -c 'kill 1'
|
||||
sleep 5
|
||||
|
||||
if [ "$(jobs -r)" ]
|
||||
then
|
||||
|
||||
105
.github/workflows/smoke/smoke-vagrant.sh
vendored
Executable file
105
.github/workflows/smoke/smoke-vagrant.sh
vendored
Executable file
@@ -0,0 +1,105 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e -x
|
||||
|
||||
set -o pipefail
|
||||
|
||||
export VAGRANT_CWD="$PWD/vagrant-$1"
|
||||
|
||||
mkdir -p logs
|
||||
|
||||
cleanup() {
|
||||
echo
|
||||
echo " *** cleanup"
|
||||
echo
|
||||
|
||||
set +e
|
||||
if [ "$(jobs -r)" ]
|
||||
then
|
||||
docker kill lighthouse1 host2
|
||||
fi
|
||||
vagrant destroy -f
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
CONTAINER="nebula:${NAME:-smoke}"
|
||||
|
||||
docker run --name lighthouse1 --rm "$CONTAINER" -config lighthouse1.yml -test
|
||||
docker run --name host2 --rm "$CONTAINER" -config host2.yml -test
|
||||
|
||||
vagrant up
|
||||
vagrant ssh -c "cd /nebula && /nebula/$1-nebula -config host3.yml -test"
|
||||
|
||||
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
||||
sleep 1
|
||||
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
||||
sleep 1
|
||||
vagrant ssh -c "cd /nebula && sudo sh -c 'echo \$\$ >/nebula/pid && exec /nebula/$1-nebula -config host3.yml'" &
|
||||
sleep 15
|
||||
|
||||
# grab tcpdump pcaps for debugging
|
||||
docker exec lighthouse1 tcpdump -i nebula1 -q -w - -U 2>logs/lighthouse1.inside.log >logs/lighthouse1.inside.pcap &
|
||||
docker exec lighthouse1 tcpdump -i eth0 -q -w - -U 2>logs/lighthouse1.outside.log >logs/lighthouse1.outside.pcap &
|
||||
docker exec host2 tcpdump -i nebula1 -q -w - -U 2>logs/host2.inside.log >logs/host2.inside.pcap &
|
||||
docker exec host2 tcpdump -i eth0 -q -w - -U 2>logs/host2.outside.log >logs/host2.outside.pcap &
|
||||
# vagrant ssh -c "tcpdump -i nebula1 -q -w - -U" 2>logs/host3.inside.log >logs/host3.inside.pcap &
|
||||
# vagrant ssh -c "tcpdump -i eth0 -q -w - -U" 2>logs/host3.outside.log >logs/host3.outside.pcap &
|
||||
|
||||
docker exec host2 ncat -nklv 0.0.0.0 2000 &
|
||||
vagrant ssh -c "ncat -nklv 0.0.0.0 2000" &
|
||||
#docker exec host2 ncat -e '/usr/bin/echo host2' -nkluv 0.0.0.0 3000 &
|
||||
#vagrant ssh -c "ncat -e '/usr/bin/echo host3' -nkluv 0.0.0.0 3000" &
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from lighthouse1"
|
||||
echo
|
||||
set -x
|
||||
docker exec lighthouse1 ping -c1 192.168.100.2
|
||||
docker exec lighthouse1 ping -c1 192.168.100.3
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host2"
|
||||
echo
|
||||
set -x
|
||||
docker exec host2 ping -c1 192.168.100.1
|
||||
# Should fail because not allowed by host3 inbound firewall
|
||||
! docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ncat from host2"
|
||||
echo
|
||||
set -x
|
||||
# Should fail because not allowed by host3 inbound firewall
|
||||
#! docker exec host2 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
||||
#! docker exec host2 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host3"
|
||||
echo
|
||||
set -x
|
||||
vagrant ssh -c "ping -c1 192.168.100.1"
|
||||
vagrant ssh -c "ping -c1 192.168.100.2"
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ncat from host3"
|
||||
echo
|
||||
set -x
|
||||
#vagrant ssh -c "ncat -nzv -w5 192.168.100.2 2000"
|
||||
#vagrant ssh -c "ncat -nzuv -w5 192.168.100.2 3000" | grep -q host2
|
||||
|
||||
vagrant ssh -c "sudo xargs kill </nebula/pid"
|
||||
docker exec host2 sh -c 'kill 1'
|
||||
docker exec lighthouse1 sh -c 'kill 1'
|
||||
sleep 1
|
||||
|
||||
if [ "$(jobs -r)" ]
|
||||
then
|
||||
echo "nebula still running after SIGTERM sent" >&2
|
||||
exit 1
|
||||
fi
|
||||
92
.github/workflows/smoke/smoke.sh
vendored
92
.github/workflows/smoke/smoke.sh
vendored
@@ -14,7 +14,7 @@ cleanup() {
|
||||
set +e
|
||||
if [ "$(jobs -r)" ]
|
||||
then
|
||||
sudo docker kill lighthouse1 host2 host3 host4
|
||||
docker kill lighthouse1 host2 host3 host4
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -22,51 +22,51 @@ trap cleanup EXIT
|
||||
|
||||
CONTAINER="nebula:${NAME:-smoke}"
|
||||
|
||||
sudo docker run --name lighthouse1 --rm "$CONTAINER" -config lighthouse1.yml -test
|
||||
sudo docker run --name host2 --rm "$CONTAINER" -config host2.yml -test
|
||||
sudo docker run --name host3 --rm "$CONTAINER" -config host3.yml -test
|
||||
sudo docker run --name host4 --rm "$CONTAINER" -config host4.yml -test
|
||||
docker run --name lighthouse1 --rm "$CONTAINER" -config lighthouse1.yml -test
|
||||
docker run --name host2 --rm "$CONTAINER" -config host2.yml -test
|
||||
docker run --name host3 --rm "$CONTAINER" -config host3.yml -test
|
||||
docker run --name host4 --rm "$CONTAINER" -config host4.yml -test
|
||||
|
||||
sudo docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
||||
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config lighthouse1.yml 2>&1 | tee logs/lighthouse1 | sed -u 's/^/ [lighthouse1] /' &
|
||||
sleep 1
|
||||
sudo docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
||||
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host2.yml 2>&1 | tee logs/host2 | sed -u 's/^/ [host2] /' &
|
||||
sleep 1
|
||||
sudo docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
||||
docker run --name host3 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host3.yml 2>&1 | tee logs/host3 | sed -u 's/^/ [host3] /' &
|
||||
sleep 1
|
||||
sudo docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
|
||||
docker run --name host4 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm "$CONTAINER" -config host4.yml 2>&1 | tee logs/host4 | sed -u 's/^/ [host4] /' &
|
||||
sleep 1
|
||||
|
||||
# grab tcpdump pcaps for debugging
|
||||
sudo docker exec lighthouse1 tcpdump -i nebula1 -q -w - -U 2>logs/lighthouse1.inside.log >logs/lighthouse1.inside.pcap &
|
||||
sudo docker exec lighthouse1 tcpdump -i eth0 -q -w - -U 2>logs/lighthouse1.outside.log >logs/lighthouse1.outside.pcap &
|
||||
sudo docker exec host2 tcpdump -i nebula1 -q -w - -U 2>logs/host2.inside.log >logs/host2.inside.pcap &
|
||||
sudo docker exec host2 tcpdump -i eth0 -q -w - -U 2>logs/host2.outside.log >logs/host2.outside.pcap &
|
||||
sudo docker exec host3 tcpdump -i nebula1 -q -w - -U 2>logs/host3.inside.log >logs/host3.inside.pcap &
|
||||
sudo docker exec host3 tcpdump -i eth0 -q -w - -U 2>logs/host3.outside.log >logs/host3.outside.pcap &
|
||||
sudo docker exec host4 tcpdump -i nebula1 -q -w - -U 2>logs/host4.inside.log >logs/host4.inside.pcap &
|
||||
sudo docker exec host4 tcpdump -i eth0 -q -w - -U 2>logs/host4.outside.log >logs/host4.outside.pcap &
|
||||
docker exec lighthouse1 tcpdump -i nebula1 -q -w - -U 2>logs/lighthouse1.inside.log >logs/lighthouse1.inside.pcap &
|
||||
docker exec lighthouse1 tcpdump -i eth0 -q -w - -U 2>logs/lighthouse1.outside.log >logs/lighthouse1.outside.pcap &
|
||||
docker exec host2 tcpdump -i nebula1 -q -w - -U 2>logs/host2.inside.log >logs/host2.inside.pcap &
|
||||
docker exec host2 tcpdump -i eth0 -q -w - -U 2>logs/host2.outside.log >logs/host2.outside.pcap &
|
||||
docker exec host3 tcpdump -i nebula1 -q -w - -U 2>logs/host3.inside.log >logs/host3.inside.pcap &
|
||||
docker exec host3 tcpdump -i eth0 -q -w - -U 2>logs/host3.outside.log >logs/host3.outside.pcap &
|
||||
docker exec host4 tcpdump -i nebula1 -q -w - -U 2>logs/host4.inside.log >logs/host4.inside.pcap &
|
||||
docker exec host4 tcpdump -i eth0 -q -w - -U 2>logs/host4.outside.log >logs/host4.outside.pcap &
|
||||
|
||||
sudo docker exec host2 ncat -nklv 0.0.0.0 2000 &
|
||||
sudo docker exec host3 ncat -nklv 0.0.0.0 2000 &
|
||||
sudo docker exec host2 ncat -e '/usr/bin/echo host2' -nkluv 0.0.0.0 3000 &
|
||||
sudo docker exec host3 ncat -e '/usr/bin/echo host3' -nkluv 0.0.0.0 3000 &
|
||||
docker exec host2 ncat -nklv 0.0.0.0 2000 &
|
||||
docker exec host3 ncat -nklv 0.0.0.0 2000 &
|
||||
docker exec host2 ncat -e '/usr/bin/echo host2' -nkluv 0.0.0.0 3000 &
|
||||
docker exec host3 ncat -e '/usr/bin/echo host3' -nkluv 0.0.0.0 3000 &
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from lighthouse1"
|
||||
echo
|
||||
set -x
|
||||
sudo docker exec lighthouse1 ping -c1 192.168.100.2
|
||||
sudo docker exec lighthouse1 ping -c1 192.168.100.3
|
||||
docker exec lighthouse1 ping -c1 192.168.100.2
|
||||
docker exec lighthouse1 ping -c1 192.168.100.3
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host2"
|
||||
echo
|
||||
set -x
|
||||
sudo docker exec host2 ping -c1 192.168.100.1
|
||||
docker exec host2 ping -c1 192.168.100.1
|
||||
# Should fail because not allowed by host3 inbound firewall
|
||||
! sudo docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
! docker exec host2 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
|
||||
set +x
|
||||
echo
|
||||
@@ -74,34 +74,34 @@ echo " *** Testing ncat from host2"
|
||||
echo
|
||||
set -x
|
||||
# Should fail because not allowed by host3 inbound firewall
|
||||
! sudo docker exec host2 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
||||
! sudo docker exec host2 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
||||
! docker exec host2 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
||||
! docker exec host2 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host3"
|
||||
echo
|
||||
set -x
|
||||
sudo docker exec host3 ping -c1 192.168.100.1
|
||||
sudo docker exec host3 ping -c1 192.168.100.2
|
||||
docker exec host3 ping -c1 192.168.100.1
|
||||
docker exec host3 ping -c1 192.168.100.2
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ncat from host3"
|
||||
echo
|
||||
set -x
|
||||
sudo docker exec host3 ncat -nzv -w5 192.168.100.2 2000
|
||||
sudo docker exec host3 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2
|
||||
docker exec host3 ncat -nzv -w5 192.168.100.2 2000
|
||||
docker exec host3 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2
|
||||
|
||||
set +x
|
||||
echo
|
||||
echo " *** Testing ping from host4"
|
||||
echo
|
||||
set -x
|
||||
sudo docker exec host4 ping -c1 192.168.100.1
|
||||
docker exec host4 ping -c1 192.168.100.1
|
||||
# Should fail because not allowed by host4 outbound firewall
|
||||
! sudo docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
|
||||
! sudo docker exec host4 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
! docker exec host4 ping -c1 192.168.100.2 -w5 || exit 1
|
||||
! docker exec host4 ping -c1 192.168.100.3 -w5 || exit 1
|
||||
|
||||
set +x
|
||||
echo
|
||||
@@ -109,10 +109,10 @@ echo " *** Testing ncat from host4"
|
||||
echo
|
||||
set -x
|
||||
# Should fail because not allowed by host4 outbound firewall
|
||||
! sudo docker exec host4 ncat -nzv -w5 192.168.100.2 2000 || exit 1
|
||||
! sudo docker exec host4 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
||||
! sudo docker exec host4 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2 || exit 1
|
||||
! sudo docker exec host4 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
||||
! docker exec host4 ncat -nzv -w5 192.168.100.2 2000 || exit 1
|
||||
! docker exec host4 ncat -nzv -w5 192.168.100.3 2000 || exit 1
|
||||
! docker exec host4 ncat -nzuv -w5 192.168.100.2 3000 | grep -q host2 || exit 1
|
||||
! docker exec host4 ncat -nzuv -w5 192.168.100.3 3000 | grep -q host3 || exit 1
|
||||
|
||||
set +x
|
||||
echo
|
||||
@@ -120,16 +120,16 @@ echo " *** Testing conntrack"
|
||||
echo
|
||||
set -x
|
||||
# host2 can ping host3 now that host3 pinged it first
|
||||
sudo docker exec host2 ping -c1 192.168.100.3
|
||||
docker exec host2 ping -c1 192.168.100.3
|
||||
# host4 can ping host2 once conntrack established
|
||||
sudo docker exec host2 ping -c1 192.168.100.4
|
||||
sudo docker exec host4 ping -c1 192.168.100.2
|
||||
docker exec host2 ping -c1 192.168.100.4
|
||||
docker exec host4 ping -c1 192.168.100.2
|
||||
|
||||
sudo docker exec host4 sh -c 'kill 1'
|
||||
sudo docker exec host3 sh -c 'kill 1'
|
||||
sudo docker exec host2 sh -c 'kill 1'
|
||||
sudo docker exec lighthouse1 sh -c 'kill 1'
|
||||
sleep 1
|
||||
docker exec host4 sh -c 'kill 1'
|
||||
docker exec host3 sh -c 'kill 1'
|
||||
docker exec host2 sh -c 'kill 1'
|
||||
docker exec lighthouse1 sh -c 'kill 1'
|
||||
sleep 5
|
||||
|
||||
if [ "$(jobs -r)" ]
|
||||
then
|
||||
|
||||
7
.github/workflows/smoke/vagrant-freebsd-amd64/Vagrantfile
vendored
Normal file
7
.github/workflows/smoke/vagrant-freebsd-amd64/Vagrantfile
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "generic/freebsd14"
|
||||
|
||||
config.vm.synced_folder "../build", "/nebula", type: "rsync"
|
||||
end
|
||||
7
.github/workflows/smoke/vagrant-linux-386/Vagrantfile
vendored
Normal file
7
.github/workflows/smoke/vagrant-linux-386/Vagrantfile
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "ubuntu/xenial32"
|
||||
|
||||
config.vm.synced_folder "../build", "/nebula"
|
||||
end
|
||||
16
.github/workflows/smoke/vagrant-linux-amd64-ipv6disable/Vagrantfile
vendored
Normal file
16
.github/workflows/smoke/vagrant-linux-amd64-ipv6disable/Vagrantfile
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "ubuntu/jammy64"
|
||||
|
||||
config.vm.synced_folder "../build", "/nebula"
|
||||
|
||||
config.vm.provision :shell do |shell|
|
||||
shell.inline = <<-EOF
|
||||
sed -i 's/GRUB_CMDLINE_LINUX=""/GRUB_CMDLINE_LINUX="ipv6.disable=1"/' /etc/default/grub
|
||||
update-grub
|
||||
EOF
|
||||
shell.privileged = true
|
||||
shell.reboot = true
|
||||
end
|
||||
end
|
||||
7
.github/workflows/smoke/vagrant-netbsd-amd64/Vagrantfile
vendored
Normal file
7
.github/workflows/smoke/vagrant-netbsd-amd64/Vagrantfile
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "generic/netbsd9"
|
||||
|
||||
config.vm.synced_folder "../build", "/nebula", type: "rsync"
|
||||
end
|
||||
7
.github/workflows/smoke/vagrant-openbsd-amd64/Vagrantfile
vendored
Normal file
7
.github/workflows/smoke/vagrant-openbsd-amd64/Vagrantfile
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "generic/openbsd7"
|
||||
|
||||
config.vm.synced_folder "../build", "/nebula", type: "rsync"
|
||||
end
|
||||
35
.github/workflows/test.yml
vendored
35
.github/workflows/test.yml
vendored
@@ -18,11 +18,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
|
||||
- name: Build
|
||||
@@ -37,10 +37,13 @@ jobs:
|
||||
- name: End 2 end
|
||||
run: make e2evv
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
- name: Build test mobile
|
||||
run: make build-test-mobile
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: e2e packet flow
|
||||
path: e2e/mermaid/
|
||||
name: e2e packet flow linux-latest
|
||||
path: e2e/mermaid/linux-latest
|
||||
if-no-files-found: warn
|
||||
|
||||
test-linux-boringcrypto:
|
||||
@@ -48,11 +51,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
|
||||
- name: Build
|
||||
@@ -69,14 +72,14 @@ jobs:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [windows-latest, macos-11]
|
||||
os: [windows-latest, macos-latest]
|
||||
steps:
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: 'go.mod'
|
||||
go-version: '1.22'
|
||||
check-latest: true
|
||||
|
||||
- name: Build nebula
|
||||
@@ -94,8 +97,8 @@ jobs:
|
||||
- name: End 2 end
|
||||
run: make e2evv
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: e2e packet flow
|
||||
path: e2e/mermaid/
|
||||
name: e2e packet flow ${{ matrix.os }}
|
||||
path: e2e/mermaid/${{ matrix.os }}
|
||||
if-no-files-found: warn
|
||||
|
||||
223
CHANGELOG.md
223
CHANGELOG.md
@@ -7,6 +7,216 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [1.9.7] - 2025-10-8
|
||||
|
||||
### Security
|
||||
|
||||
- Fix an opportunity for emitting a packet that was sent with a spoofed source address within the configured vpn network. (#1494)
|
||||
|
||||
### Changed
|
||||
|
||||
- Disable sending `recv_error` messages when a packet is received outside the allowable counter window. (#1459)
|
||||
- Improve error messages and remove some unnecessary fatal conditions in the Windows and generic udp listener. (#1543)
|
||||
|
||||
## [1.9.6] - 2025-7-15
|
||||
|
||||
### Added
|
||||
|
||||
- Support dropping inactive tunnels. This is disabled by default in this release but can be enabled with `tunnels.drop_inactive`. See example config for more details. (#1413)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix Darwin freeze due to presence of some Network Extensions (#1426)
|
||||
- Ensure the same relay tunnel is always used when multiple relay tunnels are present (#1422)
|
||||
- Fix Windows freeze due to ICMP error handling (#1412)
|
||||
- Fix relay migration panic (#1403)
|
||||
|
||||
## [1.9.5] - 2024-12-05
|
||||
|
||||
### Added
|
||||
|
||||
- Gracefully ignore v2 certificates. (#1282)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix relays that refuse to re-establish after one of the remote tunnel pairs breaks. (#1277)
|
||||
|
||||
## [1.9.4] - 2024-09-09
|
||||
|
||||
### Added
|
||||
|
||||
- Support UDP dialing with gVisor. (#1181)
|
||||
|
||||
### Changed
|
||||
|
||||
- Make some Nebula state programmatically available via control object. (#1188)
|
||||
- Switch internal representation of IPs to netip, to prepare for IPv6 support
|
||||
in the overlay. (#1173)
|
||||
- Minor build and cleanup changes. (#1171, #1164, #1162)
|
||||
- Various dependency updates. (#1195, #1190, #1174, #1168, #1167, #1161, #1147, #1146)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix a bug on big endian hosts, like mips. (#1194)
|
||||
- Fix a rare panic if a local index collision happens. (#1191)
|
||||
- Fix integer wraparound in the calculation of handshake timeouts on 32-bit targets. (#1185)
|
||||
|
||||
## [1.9.3] - 2024-06-06
|
||||
|
||||
### Fixed
|
||||
|
||||
- Initialize messageCounter to 2 instead of verifying later. (#1156)
|
||||
|
||||
## [1.9.2] - 2024-06-03
|
||||
|
||||
### Fixed
|
||||
|
||||
- Ensure messageCounter is set before handshake is complete. (#1154)
|
||||
|
||||
## [1.9.1] - 2024-05-29
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed a potential deadlock in GetOrHandshake. (#1151)
|
||||
|
||||
## [1.9.0] - 2024-05-07
|
||||
|
||||
### Deprecated
|
||||
|
||||
- This release adds a new setting `default_local_cidr_any` that defaults to
|
||||
true to match previous behavior, but will default to false in the next
|
||||
release (1.10). When set to false, `local_cidr` is matched correctly for
|
||||
firewall rules on hosts acting as unsafe routers, and should be set for any
|
||||
firewall rules you want to allow unsafe route hosts to access. See the issue
|
||||
and example config for more details. (#1071, #1099)
|
||||
|
||||
### Added
|
||||
|
||||
- Nebula now has an official Docker image `nebulaoss/nebula` that is
|
||||
distroless and contains just the `nebula` and `nebula-cert` binaries. You
|
||||
can find it here: https://hub.docker.com/r/nebulaoss/nebula (#1037)
|
||||
|
||||
- Experimental binaries for `loong64` are now provided. (#1003)
|
||||
|
||||
- Added example service script for OpenRC. (#711)
|
||||
|
||||
- The SSH daemon now supports inlined host keys. (#1054)
|
||||
|
||||
- The SSH daemon now supports certificates with `sshd.trusted_cas`. (#1098)
|
||||
|
||||
### Changed
|
||||
|
||||
- Config setting `tun.unsafe_routes` is now reloadable. (#1083)
|
||||
|
||||
- Small documentation and internal improvements. (#1065, #1067, #1069, #1108,
|
||||
#1109, #1111, #1135)
|
||||
|
||||
- Various dependency updates. (#1139, #1138, #1134, #1133, #1126, #1123, #1110,
|
||||
#1094, #1092, #1087, #1086, #1085, #1072, #1063, #1059, #1055, #1053, #1047,
|
||||
#1046, #1034, #1022)
|
||||
|
||||
### Removed
|
||||
|
||||
- Support for the deprecated `local_range` option has been removed. Please
|
||||
change to `preferred_ranges` (which is also now reloadable). (#1043)
|
||||
|
||||
- We are now building with go1.22, which means that for Windows you need at
|
||||
least Windows 10 or Windows Server 2016. This is because support for earlier
|
||||
versions was removed in Go 1.21. See https://go.dev/doc/go1.21#windows (#981)
|
||||
|
||||
- Removed vagrant example, as it was unmaintained. (#1129)
|
||||
|
||||
- Removed Fedora and Arch nebula.service files, as they are maintained in the
|
||||
upstream repos. (#1128, #1132)
|
||||
|
||||
- Remove the TCP round trip tracking metrics, as they never had correct data
|
||||
and were an experiment to begin with. (#1114)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fixed a potential deadlock introduced in 1.8.1. (#1112)
|
||||
|
||||
- Fixed support for Linux when IPv6 has been disabled at the OS level. (#787)
|
||||
|
||||
- DNS will return NXDOMAIN now when there are no results. (#845)
|
||||
|
||||
- Allow `::` in `lighthouse.dns.host`. (#1115)
|
||||
|
||||
- Capitalization of `NotAfter` fixed in DNS TXT response. (#1127)
|
||||
|
||||
- Don't log invalid certificates. It is untrusted data and can cause a large
|
||||
volume of logs. (#1116)
|
||||
|
||||
## [1.8.2] - 2024-01-08
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix multiple routines when listen.port is zero. This was a regression
|
||||
introduced in v1.6.0. (#1057)
|
||||
|
||||
### Changed
|
||||
|
||||
- Small dependency update for Noise. (#1038)
|
||||
|
||||
## [1.8.1] - 2023-12-19
|
||||
|
||||
### Security
|
||||
|
||||
- Update `golang.org/x/crypto`, which includes a fix for CVE-2023-48795. (#1048)
|
||||
|
||||
### Fixed
|
||||
|
||||
- Fix a deadlock introduced in v1.8.0 that could occur during handshakes. (#1044)
|
||||
|
||||
- Fix mobile builds. (#1035)
|
||||
|
||||
## [1.8.0] - 2023-12-06
|
||||
|
||||
### Deprecated
|
||||
|
||||
- The next minor release of Nebula, 1.9.0, will require at least Windows 10 or
|
||||
Windows Server 2016. This is because support for earlier versions was removed
|
||||
in Go 1.21. See https://go.dev/doc/go1.21#windows
|
||||
|
||||
### Added
|
||||
|
||||
- Linux: Notify systemd of service readiness. This should resolve timing issues
|
||||
with services that depend on Nebula being active. For an example of how to
|
||||
enable this, see: `examples/service_scripts/nebula.service`. (#929)
|
||||
|
||||
- Windows: Use Registered IO (RIO) when possible. Testing on a Windows 11
|
||||
machine shows ~50x improvement in throughput. (#905)
|
||||
|
||||
- NetBSD, OpenBSD: Added rudimentary support. (#916, #812)
|
||||
|
||||
- FreeBSD: Add support for naming tun devices. (#903)
|
||||
|
||||
### Changed
|
||||
|
||||
- `pki.disconnect_invalid` will now default to true. This means that once a
|
||||
certificate expires, the tunnel will be disconnected. If you use SIGHUP to
|
||||
reload certificates without restarting Nebula, you should ensure all of your
|
||||
clients are on 1.7.0 or newer before you enable this feature. (#859)
|
||||
|
||||
- Limit how often a busy tunnel can requery the lighthouse. The new config
|
||||
option `timers.requery_wait_duration` defaults to `60s`. (#940)
|
||||
|
||||
- The internal structures for hostmaps were refactored to reduce memory usage
|
||||
and the potential for subtle bugs. (#843, #938, #953, #954, #955)
|
||||
|
||||
- Lots of dependency updates.
|
||||
|
||||
### Fixed
|
||||
|
||||
- Windows: Retry wintun device creation if it fails the first time. (#985)
|
||||
|
||||
- Fix issues with firewall reject packets that could cause panics. (#957)
|
||||
|
||||
- Fix relay migration during re-handshakes. (#964)
|
||||
|
||||
- Various other refactors and fixes. (#935, #952, #972, #961, #996, #1002,
|
||||
#987, #1004, #1030, #1032, ...)
|
||||
|
||||
## [1.7.2] - 2023-06-01
|
||||
|
||||
### Fixed
|
||||
@@ -488,7 +698,18 @@ created.)
|
||||
|
||||
- Initial public release.
|
||||
|
||||
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.7.2...HEAD
|
||||
[Unreleased]: https://github.com/slackhq/nebula/compare/v1.9.7...HEAD
|
||||
[1.9.7]: https://github.com/slackhq/nebula/releases/tag/v1.9.7
|
||||
[1.9.6]: https://github.com/slackhq/nebula/releases/tag/v1.9.6
|
||||
[1.9.5]: https://github.com/slackhq/nebula/releases/tag/v1.9.5
|
||||
[1.9.4]: https://github.com/slackhq/nebula/releases/tag/v1.9.4
|
||||
[1.9.3]: https://github.com/slackhq/nebula/releases/tag/v1.9.3
|
||||
[1.9.2]: https://github.com/slackhq/nebula/releases/tag/v1.9.2
|
||||
[1.9.1]: https://github.com/slackhq/nebula/releases/tag/v1.9.1
|
||||
[1.9.0]: https://github.com/slackhq/nebula/releases/tag/v1.9.0
|
||||
[1.8.2]: https://github.com/slackhq/nebula/releases/tag/v1.8.2
|
||||
[1.8.1]: https://github.com/slackhq/nebula/releases/tag/v1.8.1
|
||||
[1.8.0]: https://github.com/slackhq/nebula/releases/tag/v1.8.0
|
||||
[1.7.2]: https://github.com/slackhq/nebula/releases/tag/v1.7.2
|
||||
[1.7.1]: https://github.com/slackhq/nebula/releases/tag/v1.7.1
|
||||
[1.7.0]: https://github.com/slackhq/nebula/releases/tag/v1.7.0
|
||||
|
||||
@@ -33,6 +33,5 @@ l.WithError(err).
|
||||
WithField("vpnIp", IntIp(hostinfo.hostId)).
|
||||
WithField("udpAddr", addr).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix"}).
|
||||
WithField("cert", remoteCert).
|
||||
Info("Invalid certificate from host")
|
||||
```
|
||||
60
Makefile
60
Makefile
@@ -1,20 +1,14 @@
|
||||
GOMINVERSION = 1.20
|
||||
NEBULA_CMD_PATH = "./cmd/nebula"
|
||||
GO111MODULE = on
|
||||
export GO111MODULE
|
||||
CGO_ENABLED = 0
|
||||
export CGO_ENABLED
|
||||
|
||||
# Set up OS specific bits
|
||||
ifeq ($(OS),Windows_NT)
|
||||
#TODO: we should be able to ditch awk as well
|
||||
GOVERSION := $(shell go version | awk "{print substr($$3, 3)}")
|
||||
GOISMIN := $(shell IF "$(GOVERSION)" GEQ "$(GOMINVERSION)" ECHO 1)
|
||||
NEBULA_CMD_SUFFIX = .exe
|
||||
NULL_FILE = nul
|
||||
# RIO on windows does pointer stuff that makes go vet angry
|
||||
VET_FLAGS = -unsafeptr=false
|
||||
else
|
||||
GOVERSION := $(shell go version | awk '{print substr($$3, 3)}')
|
||||
GOISMIN := $(shell expr "$(GOVERSION)" ">=" "$(GOMINVERSION)")
|
||||
NEBULA_CMD_SUFFIX =
|
||||
NULL_FILE = /dev/null
|
||||
endif
|
||||
@@ -28,6 +22,9 @@ ifndef BUILD_NUMBER
|
||||
endif
|
||||
endif
|
||||
|
||||
DOCKER_IMAGE_REPO ?= nebulaoss/nebula
|
||||
DOCKER_IMAGE_TAG ?= latest
|
||||
|
||||
LDFLAGS = -X main.Build=$(BUILD_NUMBER)
|
||||
|
||||
ALL_LINUX = linux-amd64 \
|
||||
@@ -42,12 +39,24 @@ ALL_LINUX = linux-amd64 \
|
||||
linux-mips64 \
|
||||
linux-mips64le \
|
||||
linux-mips-softfloat \
|
||||
linux-riscv64
|
||||
linux-riscv64 \
|
||||
linux-loong64
|
||||
|
||||
ALL_FREEBSD = freebsd-amd64 \
|
||||
freebsd-arm64
|
||||
|
||||
ALL_OPENBSD = openbsd-amd64 \
|
||||
openbsd-arm64
|
||||
|
||||
ALL_NETBSD = netbsd-amd64 \
|
||||
netbsd-arm64
|
||||
|
||||
ALL = $(ALL_LINUX) \
|
||||
$(ALL_FREEBSD) \
|
||||
$(ALL_OPENBSD) \
|
||||
$(ALL_NETBSD) \
|
||||
darwin-amd64 \
|
||||
darwin-arm64 \
|
||||
freebsd-amd64 \
|
||||
windows-amd64 \
|
||||
windows-arm64
|
||||
|
||||
@@ -69,13 +78,21 @@ e2evvvv: e2ev
|
||||
e2e-bench: TEST_FLAGS = -bench=. -benchmem -run=^$
|
||||
e2e-bench: e2e
|
||||
|
||||
DOCKER_BIN = build/linux-amd64/nebula build/linux-amd64/nebula-cert
|
||||
|
||||
all: $(ALL:%=build/%/nebula) $(ALL:%=build/%/nebula-cert)
|
||||
|
||||
docker: docker/linux-$(shell go env GOARCH)
|
||||
|
||||
release: $(ALL:%=build/nebula-%.tar.gz)
|
||||
|
||||
release-linux: $(ALL_LINUX:%=build/nebula-%.tar.gz)
|
||||
|
||||
release-freebsd: build/nebula-freebsd-amd64.tar.gz
|
||||
release-freebsd: $(ALL_FREEBSD:%=build/nebula-%.tar.gz)
|
||||
|
||||
release-openbsd: $(ALL_OPENBSD:%=build/nebula-%.tar.gz)
|
||||
|
||||
release-netbsd: $(ALL_NETBSD:%=build/nebula-%.tar.gz)
|
||||
|
||||
release-boringcrypto: build/nebula-linux-$(shell go env GOARCH)-boringcrypto.tar.gz
|
||||
|
||||
@@ -93,6 +110,9 @@ bin-darwin: build/darwin-amd64/nebula build/darwin-amd64/nebula-cert
|
||||
bin-freebsd: build/freebsd-amd64/nebula build/freebsd-amd64/nebula-cert
|
||||
mv $? .
|
||||
|
||||
bin-freebsd-arm64: build/freebsd-arm64/nebula build/freebsd-arm64/nebula-cert
|
||||
mv $? .
|
||||
|
||||
bin-boringcrypto: build/linux-$(shell go env GOARCH)-boringcrypto/nebula build/linux-$(shell go env GOARCH)-boringcrypto/nebula-cert
|
||||
mv $? .
|
||||
|
||||
@@ -136,8 +156,11 @@ build/nebula-%.tar.gz: build/%/nebula build/%/nebula-cert
|
||||
build/nebula-%.zip: build/%/nebula.exe build/%/nebula-cert.exe
|
||||
cd build/$* && zip ../nebula-$*.zip nebula.exe nebula-cert.exe
|
||||
|
||||
docker/%: build/%/nebula build/%/nebula-cert
|
||||
docker build . $(DOCKER_BUILD_ARGS) -f docker/Dockerfile --platform "$(subst -,/,$*)" --tag "${DOCKER_IMAGE_REPO}:${DOCKER_IMAGE_TAG}" --tag "${DOCKER_IMAGE_REPO}:$(BUILD_NUMBER)"
|
||||
|
||||
vet:
|
||||
go vet -v ./...
|
||||
go vet $(VET_FLAGS) -v ./...
|
||||
|
||||
test:
|
||||
go test -v ./...
|
||||
@@ -149,6 +172,12 @@ test-cov-html:
|
||||
go test -coverprofile=coverage.out
|
||||
go tool cover -html=coverage.out
|
||||
|
||||
build-test-mobile:
|
||||
GOARCH=amd64 GOOS=ios go build $(shell go list ./... | grep -v '/cmd/\|/examples/')
|
||||
GOARCH=arm64 GOOS=ios go build $(shell go list ./... | grep -v '/cmd/\|/examples/')
|
||||
GOARCH=amd64 GOOS=android go build $(shell go list ./... | grep -v '/cmd/\|/examples/')
|
||||
GOARCH=arm64 GOOS=android go build $(shell go list ./... | grep -v '/cmd/\|/examples/')
|
||||
|
||||
bench:
|
||||
go test -bench=.
|
||||
|
||||
@@ -190,8 +219,13 @@ smoke-relay-docker: bin-docker
|
||||
cd .github/workflows/smoke/ && ./smoke-relay.sh
|
||||
|
||||
smoke-docker-race: BUILD_ARGS = -race
|
||||
smoke-docker-race: CGO_ENABLED = 1
|
||||
smoke-docker-race: smoke-docker
|
||||
|
||||
smoke-vagrant/%: bin-docker build/%/nebula
|
||||
cd .github/workflows/smoke/ && ./build.sh $*
|
||||
cd .github/workflows/smoke/ && ./smoke-vagrant.sh $*
|
||||
|
||||
.FORCE:
|
||||
.PHONY: e2e e2ev e2evv e2evvv e2evvvv test test-cov-html bench bench-cpu bench-cpu-long bin proto release service smoke-docker smoke-docker-race
|
||||
.PHONY: bench bench-cpu bench-cpu-long bin build-test-mobile e2e e2ev e2evv e2evvv e2evvvv proto release service smoke-docker smoke-docker-race test test-cov-html smoke-vagrant/%
|
||||
.DEFAULT_GOAL := bin
|
||||
|
||||
20
README.md
20
README.md
@@ -27,20 +27,36 @@ Check the [releases](https://github.com/slackhq/nebula/releases/latest) page for
|
||||
|
||||
#### Distribution Packages
|
||||
|
||||
- [Arch Linux](https://archlinux.org/packages/community/x86_64/nebula/)
|
||||
- [Arch Linux](https://archlinux.org/packages/extra/x86_64/nebula/)
|
||||
```
|
||||
$ sudo pacman -S nebula
|
||||
```
|
||||
|
||||
- [Fedora Linux](https://src.fedoraproject.org/rpms/nebula)
|
||||
```
|
||||
$ sudo dnf install nebula
|
||||
```
|
||||
|
||||
- [Debian Linux](https://packages.debian.org/source/stable/nebula)
|
||||
```
|
||||
$ sudo apt install nebula
|
||||
```
|
||||
|
||||
- [Alpine Linux](https://pkgs.alpinelinux.org/packages?name=nebula)
|
||||
```
|
||||
$ sudo apk add nebula
|
||||
```
|
||||
|
||||
- [macOS Homebrew](https://github.com/Homebrew/homebrew-core/blob/HEAD/Formula/nebula.rb)
|
||||
```
|
||||
$ brew install nebula
|
||||
```
|
||||
|
||||
- [Docker](https://hub.docker.com/r/nebulaoss/nebula)
|
||||
```
|
||||
$ docker pull nebulaoss/nebula
|
||||
```
|
||||
|
||||
#### Mobile
|
||||
|
||||
- [iOS](https://apps.apple.com/us/app/mobile-nebula/id1509587936?itsct=apps_box&itscg=30200)
|
||||
@@ -108,7 +124,7 @@ For each host, copy the nebula binary to the host, along with `config.yml` from
|
||||
|
||||
## Building Nebula from source
|
||||
|
||||
Download go and clone this repo. Change to the nebula directory.
|
||||
Make sure you have [go](https://go.dev/doc/install) installed and clone this repo. Change to the nebula directory.
|
||||
|
||||
To build nebula for all platforms:
|
||||
`make all`
|
||||
|
||||
114
allow_list.go
114
allow_list.go
@@ -2,17 +2,16 @@ package nebula
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"regexp"
|
||||
|
||||
"github.com/slackhq/nebula/cidr"
|
||||
"github.com/gaissmai/bart"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
)
|
||||
|
||||
type AllowList struct {
|
||||
// The values of this cidrTree are `bool`, signifying allow/deny
|
||||
cidrTree *cidr.Tree6
|
||||
cidrTree *bart.Table[bool]
|
||||
}
|
||||
|
||||
type RemoteAllowList struct {
|
||||
@@ -20,7 +19,7 @@ type RemoteAllowList struct {
|
||||
|
||||
// Inside Range Specific, keys of this tree are inside CIDRs and values
|
||||
// are *AllowList
|
||||
insideAllowLists *cidr.Tree6
|
||||
insideAllowLists *bart.Table[*AllowList]
|
||||
}
|
||||
|
||||
type LocalAllowList struct {
|
||||
@@ -88,7 +87,7 @@ func newAllowList(k string, raw interface{}, handleKey func(key string, value in
|
||||
return nil, fmt.Errorf("config `%s` has invalid type: %T", k, raw)
|
||||
}
|
||||
|
||||
tree := cidr.NewTree6()
|
||||
tree := new(bart.Table[bool])
|
||||
|
||||
// Keep track of the rules we have added for both ipv4 and ipv6
|
||||
type allowListRules struct {
|
||||
@@ -122,18 +121,20 @@ func newAllowList(k string, raw interface{}, handleKey func(key string, value in
|
||||
return nil, fmt.Errorf("config `%s` has invalid value (type %T): %v", k, rawValue, rawValue)
|
||||
}
|
||||
|
||||
_, ipNet, err := net.ParseCIDR(rawCIDR)
|
||||
ipNet, err := netip.ParsePrefix(rawCIDR)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
|
||||
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s. %w", k, rawCIDR, err)
|
||||
}
|
||||
|
||||
// TODO: should we error on duplicate CIDRs in the config?
|
||||
tree.AddCIDR(ipNet, value)
|
||||
ipNet = netip.PrefixFrom(ipNet.Addr().Unmap(), ipNet.Bits())
|
||||
|
||||
maskBits, maskSize := ipNet.Mask.Size()
|
||||
// TODO: should we error on duplicate CIDRs in the config?
|
||||
tree.Insert(ipNet, value)
|
||||
|
||||
maskBits := ipNet.Bits()
|
||||
|
||||
var rules *allowListRules
|
||||
if maskSize == 32 {
|
||||
if ipNet.Addr().Is4() {
|
||||
rules = &rules4
|
||||
} else {
|
||||
rules = &rules6
|
||||
@@ -156,8 +157,7 @@ func newAllowList(k string, raw interface{}, handleKey func(key string, value in
|
||||
|
||||
if !rules4.defaultSet {
|
||||
if rules4.allValuesMatch {
|
||||
_, zeroCIDR, _ := net.ParseCIDR("0.0.0.0/0")
|
||||
tree.AddCIDR(zeroCIDR, !rules4.allValues)
|
||||
tree.Insert(netip.PrefixFrom(netip.IPv4Unspecified(), 0), !rules4.allValues)
|
||||
} else {
|
||||
return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for 0.0.0.0/0", k)
|
||||
}
|
||||
@@ -165,8 +165,7 @@ func newAllowList(k string, raw interface{}, handleKey func(key string, value in
|
||||
|
||||
if !rules6.defaultSet {
|
||||
if rules6.allValuesMatch {
|
||||
_, zeroCIDR, _ := net.ParseCIDR("::/0")
|
||||
tree.AddCIDR(zeroCIDR, !rules6.allValues)
|
||||
tree.Insert(netip.PrefixFrom(netip.IPv6Unspecified(), 0), !rules6.allValues)
|
||||
} else {
|
||||
return nil, fmt.Errorf("config `%s` contains both true and false rules, but no default set for ::/0", k)
|
||||
}
|
||||
@@ -218,13 +217,13 @@ func getAllowListInterfaces(k string, v interface{}) ([]AllowListNameRule, error
|
||||
return nameRules, nil
|
||||
}
|
||||
|
||||
func getRemoteAllowRanges(c *config.C, k string) (*cidr.Tree6, error) {
|
||||
func getRemoteAllowRanges(c *config.C, k string) (*bart.Table[*AllowList], error) {
|
||||
value := c.Get(k)
|
||||
if value == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
remoteAllowRanges := cidr.NewTree6()
|
||||
remoteAllowRanges := new(bart.Table[*AllowList])
|
||||
|
||||
rawMap, ok := value.(map[interface{}]interface{})
|
||||
if !ok {
|
||||
@@ -241,60 +240,27 @@ func getRemoteAllowRanges(c *config.C, k string) (*cidr.Tree6, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, ipNet, err := net.ParseCIDR(rawCIDR)
|
||||
ipNet, err := netip.ParsePrefix(rawCIDR)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
|
||||
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s. %w", k, rawCIDR, err)
|
||||
}
|
||||
|
||||
remoteAllowRanges.AddCIDR(ipNet, allowList)
|
||||
remoteAllowRanges.Insert(netip.PrefixFrom(ipNet.Addr().Unmap(), ipNet.Bits()), allowList)
|
||||
}
|
||||
|
||||
return remoteAllowRanges, nil
|
||||
}
|
||||
|
||||
func (al *AllowList) Allow(ip net.IP) bool {
|
||||
func (al *AllowList) Allow(ip netip.Addr) bool {
|
||||
if al == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
result := al.cidrTree.MostSpecificContains(ip)
|
||||
switch v := result.(type) {
|
||||
case bool:
|
||||
return v
|
||||
default:
|
||||
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
|
||||
}
|
||||
result, _ := al.cidrTree.Lookup(ip)
|
||||
return result
|
||||
}
|
||||
|
||||
func (al *AllowList) AllowIpV4(ip iputil.VpnIp) bool {
|
||||
if al == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
result := al.cidrTree.MostSpecificContainsIpV4(ip)
|
||||
switch v := result.(type) {
|
||||
case bool:
|
||||
return v
|
||||
default:
|
||||
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
|
||||
}
|
||||
}
|
||||
|
||||
func (al *AllowList) AllowIpV6(hi, lo uint64) bool {
|
||||
if al == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
result := al.cidrTree.MostSpecificContainsIpV6(hi, lo)
|
||||
switch v := result.(type) {
|
||||
case bool:
|
||||
return v
|
||||
default:
|
||||
panic(fmt.Errorf("invalid state, allowlist returned: %T %v", result, result))
|
||||
}
|
||||
}
|
||||
|
||||
func (al *LocalAllowList) Allow(ip net.IP) bool {
|
||||
func (al *LocalAllowList) Allow(ip netip.Addr) bool {
|
||||
if al == nil {
|
||||
return true
|
||||
}
|
||||
@@ -316,45 +282,25 @@ func (al *LocalAllowList) AllowName(name string) bool {
|
||||
return !al.nameRules[0].Allow
|
||||
}
|
||||
|
||||
func (al *RemoteAllowList) AllowUnknownVpnIp(ip net.IP) bool {
|
||||
func (al *RemoteAllowList) AllowUnknownVpnIp(ip netip.Addr) bool {
|
||||
if al == nil {
|
||||
return true
|
||||
}
|
||||
return al.AllowList.Allow(ip)
|
||||
}
|
||||
|
||||
func (al *RemoteAllowList) Allow(vpnIp iputil.VpnIp, ip net.IP) bool {
|
||||
func (al *RemoteAllowList) Allow(vpnIp netip.Addr, ip netip.Addr) bool {
|
||||
if !al.getInsideAllowList(vpnIp).Allow(ip) {
|
||||
return false
|
||||
}
|
||||
return al.AllowList.Allow(ip)
|
||||
}
|
||||
|
||||
func (al *RemoteAllowList) AllowIpV4(vpnIp iputil.VpnIp, ip iputil.VpnIp) bool {
|
||||
if al == nil {
|
||||
return true
|
||||
}
|
||||
if !al.getInsideAllowList(vpnIp).AllowIpV4(ip) {
|
||||
return false
|
||||
}
|
||||
return al.AllowList.AllowIpV4(ip)
|
||||
}
|
||||
|
||||
func (al *RemoteAllowList) AllowIpV6(vpnIp iputil.VpnIp, hi, lo uint64) bool {
|
||||
if al == nil {
|
||||
return true
|
||||
}
|
||||
if !al.getInsideAllowList(vpnIp).AllowIpV6(hi, lo) {
|
||||
return false
|
||||
}
|
||||
return al.AllowList.AllowIpV6(hi, lo)
|
||||
}
|
||||
|
||||
func (al *RemoteAllowList) getInsideAllowList(vpnIp iputil.VpnIp) *AllowList {
|
||||
func (al *RemoteAllowList) getInsideAllowList(vpnIp netip.Addr) *AllowList {
|
||||
if al.insideAllowLists != nil {
|
||||
inside := al.insideAllowLists.MostSpecificContainsIpV4(vpnIp)
|
||||
if inside != nil {
|
||||
return inside.(*AllowList)
|
||||
inside, ok := al.insideAllowLists.Lookup(vpnIp)
|
||||
if ok {
|
||||
return inside
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
package nebula
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/netip"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/slackhq/nebula/cidr"
|
||||
"github.com/gaissmai/bart"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -18,7 +18,7 @@ func TestNewAllowListFromConfig(t *testing.T) {
|
||||
"192.168.0.0": true,
|
||||
}
|
||||
r, err := newAllowListFromConfig(c, "allowlist", nil)
|
||||
assert.EqualError(t, err, "config `allowlist` has invalid CIDR: 192.168.0.0")
|
||||
assert.EqualError(t, err, "config `allowlist` has invalid CIDR: 192.168.0.0. netip.ParsePrefix(\"192.168.0.0\"): no '/'")
|
||||
assert.Nil(t, r)
|
||||
|
||||
c.Settings["allowlist"] = map[interface{}]interface{}{
|
||||
@@ -98,26 +98,26 @@ func TestNewAllowListFromConfig(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAllowList_Allow(t *testing.T) {
|
||||
assert.Equal(t, true, ((*AllowList)(nil)).Allow(net.ParseIP("1.1.1.1")))
|
||||
assert.Equal(t, true, ((*AllowList)(nil)).Allow(netip.MustParseAddr("1.1.1.1")))
|
||||
|
||||
tree := cidr.NewTree6()
|
||||
tree.AddCIDR(cidr.Parse("0.0.0.0/0"), true)
|
||||
tree.AddCIDR(cidr.Parse("10.0.0.0/8"), false)
|
||||
tree.AddCIDR(cidr.Parse("10.42.42.42/32"), true)
|
||||
tree.AddCIDR(cidr.Parse("10.42.0.0/16"), true)
|
||||
tree.AddCIDR(cidr.Parse("10.42.42.0/24"), true)
|
||||
tree.AddCIDR(cidr.Parse("10.42.42.0/24"), false)
|
||||
tree.AddCIDR(cidr.Parse("::1/128"), true)
|
||||
tree.AddCIDR(cidr.Parse("::2/128"), false)
|
||||
tree := new(bart.Table[bool])
|
||||
tree.Insert(netip.MustParsePrefix("0.0.0.0/0"), true)
|
||||
tree.Insert(netip.MustParsePrefix("10.0.0.0/8"), false)
|
||||
tree.Insert(netip.MustParsePrefix("10.42.42.42/32"), true)
|
||||
tree.Insert(netip.MustParsePrefix("10.42.0.0/16"), true)
|
||||
tree.Insert(netip.MustParsePrefix("10.42.42.0/24"), true)
|
||||
tree.Insert(netip.MustParsePrefix("10.42.42.0/24"), false)
|
||||
tree.Insert(netip.MustParsePrefix("::1/128"), true)
|
||||
tree.Insert(netip.MustParsePrefix("::2/128"), false)
|
||||
al := &AllowList{cidrTree: tree}
|
||||
|
||||
assert.Equal(t, true, al.Allow(net.ParseIP("1.1.1.1")))
|
||||
assert.Equal(t, false, al.Allow(net.ParseIP("10.0.0.4")))
|
||||
assert.Equal(t, true, al.Allow(net.ParseIP("10.42.42.42")))
|
||||
assert.Equal(t, false, al.Allow(net.ParseIP("10.42.42.41")))
|
||||
assert.Equal(t, true, al.Allow(net.ParseIP("10.42.0.1")))
|
||||
assert.Equal(t, true, al.Allow(net.ParseIP("::1")))
|
||||
assert.Equal(t, false, al.Allow(net.ParseIP("::2")))
|
||||
assert.Equal(t, true, al.Allow(netip.MustParseAddr("1.1.1.1")))
|
||||
assert.Equal(t, false, al.Allow(netip.MustParseAddr("10.0.0.4")))
|
||||
assert.Equal(t, true, al.Allow(netip.MustParseAddr("10.42.42.42")))
|
||||
assert.Equal(t, false, al.Allow(netip.MustParseAddr("10.42.42.41")))
|
||||
assert.Equal(t, true, al.Allow(netip.MustParseAddr("10.42.0.1")))
|
||||
assert.Equal(t, true, al.Allow(netip.MustParseAddr("::1")))
|
||||
assert.Equal(t, false, al.Allow(netip.MustParseAddr("::2")))
|
||||
}
|
||||
|
||||
func TestLocalAllowList_AllowName(t *testing.T) {
|
||||
|
||||
@@ -1,41 +1,36 @@
|
||||
package nebula
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
|
||||
"github.com/slackhq/nebula/cidr"
|
||||
"github.com/gaissmai/bart"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
)
|
||||
|
||||
// This allows us to "guess" what the remote might be for a host while we wait
|
||||
// for the lighthouse response. See "lighthouse.calculated_remotes" in the
|
||||
// example config file.
|
||||
type calculatedRemote struct {
|
||||
ipNet net.IPNet
|
||||
maskIP iputil.VpnIp
|
||||
mask iputil.VpnIp
|
||||
port uint32
|
||||
ipNet netip.Prefix
|
||||
mask netip.Prefix
|
||||
port uint32
|
||||
}
|
||||
|
||||
func newCalculatedRemote(ipNet *net.IPNet, port int) (*calculatedRemote, error) {
|
||||
// Ensure this is an IPv4 mask that we expect
|
||||
ones, bits := ipNet.Mask.Size()
|
||||
if ones == 0 || bits != 32 {
|
||||
return nil, fmt.Errorf("invalid mask: %v", ipNet)
|
||||
}
|
||||
func newCalculatedRemote(maskCidr netip.Prefix, port int) (*calculatedRemote, error) {
|
||||
masked := maskCidr.Masked()
|
||||
if port < 0 || port > math.MaxUint16 {
|
||||
return nil, fmt.Errorf("invalid port: %d", port)
|
||||
}
|
||||
|
||||
return &calculatedRemote{
|
||||
ipNet: *ipNet,
|
||||
maskIP: iputil.Ip2VpnIp(ipNet.IP),
|
||||
mask: iputil.Ip2VpnIp(ipNet.Mask),
|
||||
port: uint32(port),
|
||||
ipNet: maskCidr,
|
||||
mask: masked,
|
||||
port: uint32(port),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -43,21 +38,41 @@ func (c *calculatedRemote) String() string {
|
||||
return fmt.Sprintf("CalculatedRemote(mask=%v port=%d)", c.ipNet, c.port)
|
||||
}
|
||||
|
||||
func (c *calculatedRemote) Apply(ip iputil.VpnIp) *Ip4AndPort {
|
||||
func (c *calculatedRemote) Apply(ip netip.Addr) *Ip4AndPort {
|
||||
// Combine the masked bytes of the "mask" IP with the unmasked bytes
|
||||
// of the overlay IP
|
||||
masked := (c.maskIP & c.mask) | (ip & ^c.mask)
|
||||
|
||||
return &Ip4AndPort{Ip: uint32(masked), Port: c.port}
|
||||
if c.ipNet.Addr().Is4() {
|
||||
return c.apply4(ip)
|
||||
}
|
||||
return c.apply6(ip)
|
||||
}
|
||||
|
||||
func NewCalculatedRemotesFromConfig(c *config.C, k string) (*cidr.Tree4, error) {
|
||||
func (c *calculatedRemote) apply4(ip netip.Addr) *Ip4AndPort {
|
||||
//TODO: IPV6-WORK this can be less crappy
|
||||
maskb := net.CIDRMask(c.mask.Bits(), c.mask.Addr().BitLen())
|
||||
mask := binary.BigEndian.Uint32(maskb[:])
|
||||
|
||||
b := c.mask.Addr().As4()
|
||||
maskIp := binary.BigEndian.Uint32(b[:])
|
||||
|
||||
b = ip.As4()
|
||||
intIp := binary.BigEndian.Uint32(b[:])
|
||||
|
||||
return &Ip4AndPort{(maskIp & mask) | (intIp & ^mask), c.port}
|
||||
}
|
||||
|
||||
func (c *calculatedRemote) apply6(ip netip.Addr) *Ip4AndPort {
|
||||
//TODO: IPV6-WORK
|
||||
panic("Can not calculate ipv6 remote addresses")
|
||||
}
|
||||
|
||||
func NewCalculatedRemotesFromConfig(c *config.C, k string) (*bart.Table[[]*calculatedRemote], error) {
|
||||
value := c.Get(k)
|
||||
if value == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
calculatedRemotes := cidr.NewTree4()
|
||||
calculatedRemotes := new(bart.Table[[]*calculatedRemote])
|
||||
|
||||
rawMap, ok := value.(map[any]any)
|
||||
if !ok {
|
||||
@@ -69,17 +84,18 @@ func NewCalculatedRemotesFromConfig(c *config.C, k string) (*cidr.Tree4, error)
|
||||
return nil, fmt.Errorf("config `%s` has invalid key (type %T): %v", k, rawKey, rawKey)
|
||||
}
|
||||
|
||||
_, ipNet, err := net.ParseCIDR(rawCIDR)
|
||||
cidr, err := netip.ParsePrefix(rawCIDR)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("config `%s` has invalid CIDR: %s", k, rawCIDR)
|
||||
}
|
||||
|
||||
//TODO: IPV6-WORK this does not verify that rawValue contains the same bits as cidr here
|
||||
entry, err := newCalculatedRemotesListFromConfig(rawValue)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("config '%s.%s': %w", k, rawCIDR, err)
|
||||
}
|
||||
|
||||
calculatedRemotes.AddCIDR(ipNet, entry)
|
||||
calculatedRemotes.Insert(cidr, entry)
|
||||
}
|
||||
|
||||
return calculatedRemotes, nil
|
||||
@@ -117,7 +133,7 @@ func newCalculatedRemotesEntryFromConfig(raw any) (*calculatedRemote, error) {
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("invalid mask (type %T): %v", rawValue, rawValue)
|
||||
}
|
||||
_, ipNet, err := net.ParseCIDR(rawMask)
|
||||
maskCidr, err := netip.ParsePrefix(rawMask)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid mask: %s", rawMask)
|
||||
}
|
||||
@@ -139,5 +155,5 @@ func newCalculatedRemotesEntryFromConfig(raw any) (*calculatedRemote, error) {
|
||||
return nil, fmt.Errorf("invalid port (type %T): %v", rawValue, rawValue)
|
||||
}
|
||||
|
||||
return newCalculatedRemote(ipNet, port)
|
||||
return newCalculatedRemote(maskCidr, port)
|
||||
}
|
||||
|
||||
@@ -1,27 +1,25 @@
|
||||
package nebula
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/netip"
|
||||
"testing"
|
||||
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCalculatedRemoteApply(t *testing.T) {
|
||||
_, ipNet, err := net.ParseCIDR("192.168.1.0/24")
|
||||
ipNet, err := netip.ParsePrefix("192.168.1.0/24")
|
||||
require.NoError(t, err)
|
||||
|
||||
c, err := newCalculatedRemote(ipNet, 4242)
|
||||
require.NoError(t, err)
|
||||
|
||||
input := iputil.Ip2VpnIp([]byte{10, 0, 10, 182})
|
||||
input, err := netip.ParseAddr("10.0.10.182")
|
||||
assert.NoError(t, err)
|
||||
|
||||
expected := &Ip4AndPort{
|
||||
Ip: uint32(iputil.Ip2VpnIp([]byte{192, 168, 1, 182})),
|
||||
Port: 4242,
|
||||
}
|
||||
expected, err := netip.ParseAddr("192.168.1.182")
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, expected, c.Apply(input))
|
||||
assert.Equal(t, NewIp4AndPortFromNetIP(expected, 4242), c.Apply(input))
|
||||
}
|
||||
|
||||
163
cert.go
163
cert.go
@@ -1,163 +0,0 @@
|
||||
package nebula
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/config"
|
||||
)
|
||||
|
||||
type CertState struct {
|
||||
certificate *cert.NebulaCertificate
|
||||
rawCertificate []byte
|
||||
rawCertificateNoKey []byte
|
||||
publicKey []byte
|
||||
privateKey []byte
|
||||
}
|
||||
|
||||
func NewCertState(certificate *cert.NebulaCertificate, privateKey []byte) (*CertState, error) {
|
||||
// Marshal the certificate to ensure it is valid
|
||||
rawCertificate, err := certificate.Marshal()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid nebula certificate on interface: %s", err)
|
||||
}
|
||||
|
||||
publicKey := certificate.Details.PublicKey
|
||||
cs := &CertState{
|
||||
rawCertificate: rawCertificate,
|
||||
certificate: certificate, // PublicKey has been set to nil above
|
||||
privateKey: privateKey,
|
||||
publicKey: publicKey,
|
||||
}
|
||||
|
||||
cs.certificate.Details.PublicKey = nil
|
||||
rawCertNoKey, err := cs.certificate.Marshal()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshalling certificate no key: %s", err)
|
||||
}
|
||||
cs.rawCertificateNoKey = rawCertNoKey
|
||||
// put public key back
|
||||
cs.certificate.Details.PublicKey = cs.publicKey
|
||||
return cs, nil
|
||||
}
|
||||
|
||||
func NewCertStateFromConfig(c *config.C) (*CertState, error) {
|
||||
var pemPrivateKey []byte
|
||||
var err error
|
||||
|
||||
privPathOrPEM := c.GetString("pki.key", "")
|
||||
|
||||
if privPathOrPEM == "" {
|
||||
return nil, errors.New("no pki.key path or PEM data provided")
|
||||
}
|
||||
|
||||
if strings.Contains(privPathOrPEM, "-----BEGIN") {
|
||||
pemPrivateKey = []byte(privPathOrPEM)
|
||||
privPathOrPEM = "<inline>"
|
||||
} else {
|
||||
pemPrivateKey, err = ioutil.ReadFile(privPathOrPEM)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read pki.key file %s: %s", privPathOrPEM, err)
|
||||
}
|
||||
}
|
||||
|
||||
rawKey, _, curve, err := cert.UnmarshalPrivateKey(pemPrivateKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while unmarshaling pki.key %s: %s", privPathOrPEM, err)
|
||||
}
|
||||
|
||||
var rawCert []byte
|
||||
|
||||
pubPathOrPEM := c.GetString("pki.cert", "")
|
||||
|
||||
if pubPathOrPEM == "" {
|
||||
return nil, errors.New("no pki.cert path or PEM data provided")
|
||||
}
|
||||
|
||||
if strings.Contains(pubPathOrPEM, "-----BEGIN") {
|
||||
rawCert = []byte(pubPathOrPEM)
|
||||
pubPathOrPEM = "<inline>"
|
||||
} else {
|
||||
rawCert, err = ioutil.ReadFile(pubPathOrPEM)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read pki.cert file %s: %s", pubPathOrPEM, err)
|
||||
}
|
||||
}
|
||||
|
||||
nebulaCert, _, err := cert.UnmarshalNebulaCertificateFromPEM(rawCert)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error while unmarshaling pki.cert %s: %s", pubPathOrPEM, err)
|
||||
}
|
||||
|
||||
if nebulaCert.Expired(time.Now()) {
|
||||
return nil, fmt.Errorf("nebula certificate for this host is expired")
|
||||
}
|
||||
|
||||
if len(nebulaCert.Details.Ips) == 0 {
|
||||
return nil, fmt.Errorf("no IPs encoded in certificate")
|
||||
}
|
||||
|
||||
if err = nebulaCert.VerifyPrivateKey(curve, rawKey); err != nil {
|
||||
return nil, fmt.Errorf("private key is not a pair with public key in nebula cert")
|
||||
}
|
||||
|
||||
return NewCertState(nebulaCert, rawKey)
|
||||
}
|
||||
|
||||
func loadCAFromConfig(l *logrus.Logger, c *config.C) (*cert.NebulaCAPool, error) {
|
||||
var rawCA []byte
|
||||
var err error
|
||||
|
||||
caPathOrPEM := c.GetString("pki.ca", "")
|
||||
if caPathOrPEM == "" {
|
||||
return nil, errors.New("no pki.ca path or PEM data provided")
|
||||
}
|
||||
|
||||
if strings.Contains(caPathOrPEM, "-----BEGIN") {
|
||||
rawCA = []byte(caPathOrPEM)
|
||||
|
||||
} else {
|
||||
rawCA, err = ioutil.ReadFile(caPathOrPEM)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read pki.ca file %s: %s", caPathOrPEM, err)
|
||||
}
|
||||
}
|
||||
|
||||
CAs, err := cert.NewCAPoolFromBytes(rawCA)
|
||||
if errors.Is(err, cert.ErrExpired) {
|
||||
var expired int
|
||||
for _, cert := range CAs.CAs {
|
||||
if cert.Expired(time.Now()) {
|
||||
expired++
|
||||
l.WithField("cert", cert).Warn("expired certificate present in CA pool")
|
||||
}
|
||||
}
|
||||
|
||||
if expired >= len(CAs.CAs) {
|
||||
return nil, errors.New("no valid CA certificates present")
|
||||
}
|
||||
|
||||
} else if err != nil {
|
||||
return nil, fmt.Errorf("error while adding CA certificate to CA trust store: %s", err)
|
||||
}
|
||||
|
||||
for _, fp := range c.GetStringSlice("pki.blocklist", []string{}) {
|
||||
l.WithField("fingerprint", fp).Info("Blocklisting cert")
|
||||
CAs.BlocklistFingerprint(fp)
|
||||
}
|
||||
|
||||
// Support deprecated config for at least one minor release to allow for migrations
|
||||
//TODO: remove in 2022 or later
|
||||
for _, fp := range c.GetStringSlice("pki.blacklist", []string{}) {
|
||||
l.WithField("fingerprint", fp).Info("Blocklisting cert")
|
||||
l.Warn("pki.blacklist is deprecated and will not be supported in a future release. Please migrate your config to use pki.blocklist")
|
||||
CAs.BlocklistFingerprint(fp)
|
||||
}
|
||||
|
||||
return CAs, nil
|
||||
}
|
||||
28
cert/ca.go
28
cert/ca.go
@@ -24,31 +24,39 @@ func NewCAPool() *NebulaCAPool {
|
||||
|
||||
// NewCAPoolFromBytes will create a new CA pool from the provided
|
||||
// input bytes, which must be a PEM-encoded set of nebula certificates.
|
||||
// If the pool contains unsupported certificates, they will generate warnings
|
||||
// in the []error return arg.
|
||||
// If the pool contains any expired certificates, an ErrExpired will be
|
||||
// returned along with the pool. The caller must handle any such errors.
|
||||
func NewCAPoolFromBytes(caPEMs []byte) (*NebulaCAPool, error) {
|
||||
func NewCAPoolFromBytes(caPEMs []byte) (*NebulaCAPool, []error, error) {
|
||||
pool := NewCAPool()
|
||||
var err error
|
||||
var expired bool
|
||||
var warnings []error
|
||||
good := 0
|
||||
|
||||
for {
|
||||
caPEMs, err = pool.AddCACertificate(caPEMs)
|
||||
if errors.Is(err, ErrExpired) {
|
||||
expired = true
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
warnings = append(warnings, err)
|
||||
} else if errors.Is(err, ErrInvalidPEMCertificateUnsupported) {
|
||||
warnings = append(warnings, err)
|
||||
} else if err != nil {
|
||||
return nil, warnings, err
|
||||
} else {
|
||||
// Only consider a good certificate if there were no errors present
|
||||
good++
|
||||
}
|
||||
|
||||
if len(caPEMs) == 0 || strings.TrimSpace(string(caPEMs)) == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if expired {
|
||||
return pool, ErrExpired
|
||||
if good == 0 {
|
||||
return nil, warnings, errors.New("no valid CA certificates present")
|
||||
}
|
||||
|
||||
return pool, nil
|
||||
return pool, warnings, nil
|
||||
}
|
||||
|
||||
// AddCACertificate verifies a Nebula CA certificate and adds it to the pool
|
||||
|
||||
@@ -28,6 +28,7 @@ const publicKeyLen = 32
|
||||
|
||||
const (
|
||||
CertBanner = "NEBULA CERTIFICATE"
|
||||
CertificateV2Banner = "NEBULA CERTIFICATE V2"
|
||||
X25519PrivateKeyBanner = "NEBULA X25519 PRIVATE KEY"
|
||||
X25519PublicKeyBanner = "NEBULA X25519 PUBLIC KEY"
|
||||
EncryptedEd25519PrivateKeyBanner = "NEBULA ED25519 ENCRYPTED PRIVATE KEY"
|
||||
@@ -163,6 +164,9 @@ func UnmarshalNebulaCertificateFromPEM(b []byte) (*NebulaCertificate, []byte, er
|
||||
if p == nil {
|
||||
return nil, r, fmt.Errorf("input did not contain a valid PEM encoded block")
|
||||
}
|
||||
if p.Type == CertificateV2Banner {
|
||||
return nil, r, fmt.Errorf("%w: %s", ErrInvalidPEMCertificateUnsupported, p.Type)
|
||||
}
|
||||
if p.Type != CertBanner {
|
||||
return nil, r, fmt.Errorf("bytes did not contain a proper nebula certificate banner")
|
||||
}
|
||||
@@ -272,6 +276,9 @@ func EncryptAndMarshalSigningPrivateKey(curve Curve, b []byte, passphrase []byte
|
||||
},
|
||||
Ciphertext: ciphertext,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch curve {
|
||||
case Curve_CURVE25519:
|
||||
@@ -321,7 +328,7 @@ func UnmarshalEd25519PrivateKey(b []byte) (ed25519.PrivateKey, []byte, error) {
|
||||
return k.Bytes, r, nil
|
||||
}
|
||||
|
||||
// UnmarshalNebulaCertificate will unmarshal a protobuf byte representation of a nebula cert into its
|
||||
// UnmarshalNebulaEncryptedData will unmarshal a protobuf byte representation of a nebula cert into its
|
||||
// protobuf-generated struct.
|
||||
func UnmarshalNebulaEncryptedData(b []byte) (*NebulaEncryptedData, error) {
|
||||
if len(b) == 0 {
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"crypto/ecdsa"
|
||||
"crypto/elliptic"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@@ -572,6 +573,13 @@ CmYKEG5lYnVsYSBQMjU2IHRlc3Qo4s+7mgYw4tXrsAc6QQRkaW2jFmllYvN4+/k2
|
||||
76gvQAGgBgESRzBFAiEAib0/te6eMiZOKD8gdDeloMTS0wGuX2t0C7TFdUhAQzgC
|
||||
IBNWYMep3ysx9zCgknfG5dKtwGTaqF++BWKDYdyl34KX
|
||||
-----END NEBULA CERTIFICATE-----
|
||||
`
|
||||
|
||||
v2 := `
|
||||
# valid PEM with the V2 header
|
||||
-----BEGIN NEBULA CERTIFICATE V2-----
|
||||
CmYKEG5lYnVsYSBQMjU2IHRlc3Qo4s+7mgYw4tXrsAc6QQRkaW2jFmllYvN4+/k2
|
||||
-----END NEBULA CERTIFICATE V2-----
|
||||
`
|
||||
|
||||
rootCA := NebulaCertificate{
|
||||
@@ -592,33 +600,46 @@ IBNWYMep3ysx9zCgknfG5dKtwGTaqF++BWKDYdyl34KX
|
||||
},
|
||||
}
|
||||
|
||||
p, err := NewCAPoolFromBytes([]byte(noNewLines))
|
||||
p, warn, err := NewCAPoolFromBytes([]byte(noNewLines))
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, warn)
|
||||
assert.Equal(t, p.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
|
||||
assert.Equal(t, p.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
||||
|
||||
pp, err := NewCAPoolFromBytes([]byte(withNewLines))
|
||||
pp, warn, err := NewCAPoolFromBytes([]byte(withNewLines))
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, warn)
|
||||
assert.Equal(t, pp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
|
||||
assert.Equal(t, pp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
||||
|
||||
// expired cert, no valid certs
|
||||
ppp, err := NewCAPoolFromBytes([]byte(expired))
|
||||
assert.Equal(t, ErrExpired, err)
|
||||
assert.Equal(t, ppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired")
|
||||
ppp, warn, err := NewCAPoolFromBytes([]byte(expired))
|
||||
assert.Error(t, err, "no valid CA certificates present")
|
||||
assert.Len(t, warn, 1)
|
||||
assert.Error(t, warn[0], ErrExpired)
|
||||
assert.Nil(t, ppp)
|
||||
|
||||
// expired cert, with valid certs
|
||||
pppp, err := NewCAPoolFromBytes(append([]byte(expired), noNewLines...))
|
||||
assert.Equal(t, ErrExpired, err)
|
||||
pppp, warn, err := NewCAPoolFromBytes(append([]byte(expired), noNewLines...))
|
||||
assert.Len(t, warn, 1)
|
||||
assert.Nil(t, err)
|
||||
assert.Error(t, warn[0], ErrExpired)
|
||||
assert.Equal(t, pppp.CAs[string("c9bfaf7ce8e84b2eeda2e27b469f4b9617bde192efd214b68891ecda6ed49522")].Details.Name, rootCA.Details.Name)
|
||||
assert.Equal(t, pppp.CAs[string("5c9c3f23e7ee7fe97637cbd3a0a5b854154d1d9aaaf7b566a51f4a88f76b64cd")].Details.Name, rootCA01.Details.Name)
|
||||
assert.Equal(t, pppp.CAs[string("152070be6bb19bc9e3bde4c2f0e7d8f4ff5448b4c9856b8eccb314fade0229b0")].Details.Name, "expired")
|
||||
assert.Equal(t, len(pppp.CAs), 3)
|
||||
|
||||
ppppp, err := NewCAPoolFromBytes([]byte(p256))
|
||||
ppppp, warn, err := NewCAPoolFromBytes([]byte(p256))
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, warn)
|
||||
assert.Equal(t, ppppp.CAs[string("a7938893ec8c4ef769b06d7f425e5e46f7a7f5ffa49c3bcf4a86b608caba9159")].Details.Name, rootCAP256.Details.Name)
|
||||
assert.Equal(t, len(ppppp.CAs), 1)
|
||||
|
||||
pppppp, warn, err := NewCAPoolFromBytes(append([]byte(p256), []byte(v2)...))
|
||||
assert.Nil(t, err)
|
||||
assert.True(t, errors.Is(warn[0], ErrInvalidPEMCertificateUnsupported))
|
||||
assert.Equal(t, pppppp.CAs[string("a7938893ec8c4ef769b06d7f425e5e46f7a7f5ffa49c3bcf4a86b608caba9159")].Details.Name, rootCAP256.Details.Name)
|
||||
assert.Equal(t, len(pppppp.CAs), 1)
|
||||
}
|
||||
|
||||
func appendByteSlices(b ...[]byte) []byte {
|
||||
|
||||
@@ -77,6 +77,9 @@ func aes256Decrypt(passphrase []byte, kdfParams *Argon2Parameters, data []byte)
|
||||
}
|
||||
|
||||
gcm, err := cipher.NewGCM(block)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nonce, ciphertext, err := splitNonceCiphertext(data, gcm.NonceSize())
|
||||
if err != nil {
|
||||
|
||||
@@ -5,10 +5,11 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
ErrRootExpired = errors.New("root certificate is expired")
|
||||
ErrExpired = errors.New("certificate is expired")
|
||||
ErrNotCA = errors.New("certificate is not a CA")
|
||||
ErrNotSelfSigned = errors.New("certificate is not self-signed")
|
||||
ErrBlockListed = errors.New("certificate is in the block list")
|
||||
ErrSignatureMismatch = errors.New("certificate signature did not match")
|
||||
ErrRootExpired = errors.New("root certificate is expired")
|
||||
ErrExpired = errors.New("certificate is expired")
|
||||
ErrNotCA = errors.New("certificate is not a CA")
|
||||
ErrNotSelfSigned = errors.New("certificate is not self-signed")
|
||||
ErrBlockListed = errors.New("certificate is in the block list")
|
||||
ErrSignatureMismatch = errors.New("certificate signature did not match")
|
||||
ErrInvalidPEMCertificateUnsupported = errors.New("bytes contain an unsupported certificate format")
|
||||
)
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
package cidr
|
||||
|
||||
import "net"
|
||||
|
||||
// Parse is a convenience function that returns only the IPNet
|
||||
// This function ignores errors since it is primarily a test helper, the result could be nil
|
||||
func Parse(s string) *net.IPNet {
|
||||
_, c, _ := net.ParseCIDR(s)
|
||||
return c
|
||||
}
|
||||
167
cidr/tree4.go
167
cidr/tree4.go
@@ -1,167 +0,0 @@
|
||||
package cidr
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
)
|
||||
|
||||
type Node struct {
|
||||
left *Node
|
||||
right *Node
|
||||
parent *Node
|
||||
value interface{}
|
||||
}
|
||||
|
||||
type entry struct {
|
||||
CIDR *net.IPNet
|
||||
Value *interface{}
|
||||
}
|
||||
|
||||
type Tree4 struct {
|
||||
root *Node
|
||||
list []entry
|
||||
}
|
||||
|
||||
const (
|
||||
startbit = iputil.VpnIp(0x80000000)
|
||||
)
|
||||
|
||||
func NewTree4() *Tree4 {
|
||||
tree := new(Tree4)
|
||||
tree.root = &Node{}
|
||||
tree.list = []entry{}
|
||||
return tree
|
||||
}
|
||||
|
||||
func (tree *Tree4) AddCIDR(cidr *net.IPNet, val interface{}) {
|
||||
bit := startbit
|
||||
node := tree.root
|
||||
next := tree.root
|
||||
|
||||
ip := iputil.Ip2VpnIp(cidr.IP)
|
||||
mask := iputil.Ip2VpnIp(cidr.Mask)
|
||||
|
||||
// Find our last ancestor in the tree
|
||||
for bit&mask != 0 {
|
||||
if ip&bit != 0 {
|
||||
next = node.right
|
||||
} else {
|
||||
next = node.left
|
||||
}
|
||||
|
||||
if next == nil {
|
||||
break
|
||||
}
|
||||
|
||||
bit = bit >> 1
|
||||
node = next
|
||||
}
|
||||
|
||||
// We already have this range so update the value
|
||||
if next != nil {
|
||||
addCIDR := cidr.String()
|
||||
for i, v := range tree.list {
|
||||
if addCIDR == v.CIDR.String() {
|
||||
tree.list = append(tree.list[:i], tree.list[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
tree.list = append(tree.list, entry{CIDR: cidr, Value: &val})
|
||||
node.value = val
|
||||
return
|
||||
}
|
||||
|
||||
// Build up the rest of the tree we don't already have
|
||||
for bit&mask != 0 {
|
||||
next = &Node{}
|
||||
next.parent = node
|
||||
|
||||
if ip&bit != 0 {
|
||||
node.right = next
|
||||
} else {
|
||||
node.left = next
|
||||
}
|
||||
|
||||
bit >>= 1
|
||||
node = next
|
||||
}
|
||||
|
||||
// Final node marks our cidr, set the value
|
||||
node.value = val
|
||||
tree.list = append(tree.list, entry{CIDR: cidr, Value: &val})
|
||||
}
|
||||
|
||||
// Contains finds the first match, which may be the least specific
|
||||
func (tree *Tree4) Contains(ip iputil.VpnIp) (value interface{}) {
|
||||
bit := startbit
|
||||
node := tree.root
|
||||
|
||||
for node != nil {
|
||||
if node.value != nil {
|
||||
return node.value
|
||||
}
|
||||
|
||||
if ip&bit != 0 {
|
||||
node = node.right
|
||||
} else {
|
||||
node = node.left
|
||||
}
|
||||
|
||||
bit >>= 1
|
||||
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
// MostSpecificContains finds the most specific match
|
||||
func (tree *Tree4) MostSpecificContains(ip iputil.VpnIp) (value interface{}) {
|
||||
bit := startbit
|
||||
node := tree.root
|
||||
|
||||
for node != nil {
|
||||
if node.value != nil {
|
||||
value = node.value
|
||||
}
|
||||
|
||||
if ip&bit != 0 {
|
||||
node = node.right
|
||||
} else {
|
||||
node = node.left
|
||||
}
|
||||
|
||||
bit >>= 1
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
// Match finds the most specific match
|
||||
func (tree *Tree4) Match(ip iputil.VpnIp) (value interface{}) {
|
||||
bit := startbit
|
||||
node := tree.root
|
||||
lastNode := node
|
||||
|
||||
for node != nil {
|
||||
lastNode = node
|
||||
if ip&bit != 0 {
|
||||
node = node.right
|
||||
} else {
|
||||
node = node.left
|
||||
}
|
||||
|
||||
bit >>= 1
|
||||
}
|
||||
|
||||
if bit == 0 && lastNode != nil {
|
||||
value = lastNode.value
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
||||
// List will return all CIDRs and their current values. Do not modify the contents!
|
||||
func (tree *Tree4) List() []entry {
|
||||
return tree.list
|
||||
}
|
||||
@@ -1,167 +0,0 @@
|
||||
package cidr
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCIDRTree_List(t *testing.T) {
|
||||
tree := NewTree4()
|
||||
tree.AddCIDR(Parse("1.0.0.0/16"), "1")
|
||||
tree.AddCIDR(Parse("1.0.0.0/8"), "2")
|
||||
tree.AddCIDR(Parse("1.0.0.0/16"), "3")
|
||||
tree.AddCIDR(Parse("1.0.0.0/16"), "4")
|
||||
list := tree.List()
|
||||
assert.Len(t, list, 2)
|
||||
assert.Equal(t, "1.0.0.0/8", list[0].CIDR.String())
|
||||
assert.Equal(t, "2", *list[0].Value)
|
||||
assert.Equal(t, "1.0.0.0/16", list[1].CIDR.String())
|
||||
assert.Equal(t, "4", *list[1].Value)
|
||||
}
|
||||
|
||||
func TestCIDRTree_Contains(t *testing.T) {
|
||||
tree := NewTree4()
|
||||
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
||||
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
||||
tree.AddCIDR(Parse("4.1.1.0/24"), "4a")
|
||||
tree.AddCIDR(Parse("4.1.1.1/32"), "4b")
|
||||
tree.AddCIDR(Parse("4.1.2.1/32"), "4c")
|
||||
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
|
||||
|
||||
tests := []struct {
|
||||
Result interface{}
|
||||
IP string
|
||||
}{
|
||||
{"1", "1.0.0.0"},
|
||||
{"1", "1.255.255.255"},
|
||||
{"2", "2.1.0.0"},
|
||||
{"2", "2.1.255.255"},
|
||||
{"3", "3.1.1.0"},
|
||||
{"3", "3.1.1.255"},
|
||||
{"4a", "4.1.1.255"},
|
||||
{"4a", "4.1.1.1"},
|
||||
{"5", "240.0.0.0"},
|
||||
{"5", "255.255.255.255"},
|
||||
{nil, "239.0.0.0"},
|
||||
{nil, "4.1.2.2"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
assert.Equal(t, tt.Result, tree.Contains(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
|
||||
}
|
||||
|
||||
tree = NewTree4()
|
||||
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
|
||||
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
|
||||
}
|
||||
|
||||
func TestCIDRTree_MostSpecificContains(t *testing.T) {
|
||||
tree := NewTree4()
|
||||
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
||||
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
||||
tree.AddCIDR(Parse("4.1.1.0/24"), "4a")
|
||||
tree.AddCIDR(Parse("4.1.1.0/30"), "4b")
|
||||
tree.AddCIDR(Parse("4.1.1.1/32"), "4c")
|
||||
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
|
||||
|
||||
tests := []struct {
|
||||
Result interface{}
|
||||
IP string
|
||||
}{
|
||||
{"1", "1.0.0.0"},
|
||||
{"1", "1.255.255.255"},
|
||||
{"2", "2.1.0.0"},
|
||||
{"2", "2.1.255.255"},
|
||||
{"3", "3.1.1.0"},
|
||||
{"3", "3.1.1.255"},
|
||||
{"4a", "4.1.1.255"},
|
||||
{"4b", "4.1.1.2"},
|
||||
{"4c", "4.1.1.1"},
|
||||
{"5", "240.0.0.0"},
|
||||
{"5", "255.255.255.255"},
|
||||
{nil, "239.0.0.0"},
|
||||
{nil, "4.1.2.2"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
assert.Equal(t, tt.Result, tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
|
||||
}
|
||||
|
||||
tree = NewTree4()
|
||||
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||
assert.Equal(t, "cool", tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
|
||||
assert.Equal(t, "cool", tree.MostSpecificContains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
|
||||
}
|
||||
|
||||
func TestCIDRTree_Match(t *testing.T) {
|
||||
tree := NewTree4()
|
||||
tree.AddCIDR(Parse("4.1.1.0/32"), "1a")
|
||||
tree.AddCIDR(Parse("4.1.1.1/32"), "1b")
|
||||
|
||||
tests := []struct {
|
||||
Result interface{}
|
||||
IP string
|
||||
}{
|
||||
{"1a", "4.1.1.0"},
|
||||
{"1b", "4.1.1.1"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
assert.Equal(t, tt.Result, tree.Match(iputil.Ip2VpnIp(net.ParseIP(tt.IP))))
|
||||
}
|
||||
|
||||
tree = NewTree4()
|
||||
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("0.0.0.0"))))
|
||||
assert.Equal(t, "cool", tree.Contains(iputil.Ip2VpnIp(net.ParseIP("255.255.255.255"))))
|
||||
}
|
||||
|
||||
func BenchmarkCIDRTree_Contains(b *testing.B) {
|
||||
tree := NewTree4()
|
||||
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
|
||||
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
|
||||
tree.AddCIDR(Parse("192.2.1.1/32"), "1")
|
||||
tree.AddCIDR(Parse("172.2.1.1/32"), "1")
|
||||
|
||||
ip := iputil.Ip2VpnIp(net.ParseIP("1.2.1.1"))
|
||||
b.Run("found", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
tree.Contains(ip)
|
||||
}
|
||||
})
|
||||
|
||||
ip = iputil.Ip2VpnIp(net.ParseIP("1.2.1.255"))
|
||||
b.Run("not found", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
tree.Contains(ip)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func BenchmarkCIDRTree_Match(b *testing.B) {
|
||||
tree := NewTree4()
|
||||
tree.AddCIDR(Parse("1.1.0.0/16"), "1")
|
||||
tree.AddCIDR(Parse("1.2.1.1/32"), "1")
|
||||
tree.AddCIDR(Parse("192.2.1.1/32"), "1")
|
||||
tree.AddCIDR(Parse("172.2.1.1/32"), "1")
|
||||
|
||||
ip := iputil.Ip2VpnIp(net.ParseIP("1.2.1.1"))
|
||||
b.Run("found", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
tree.Match(ip)
|
||||
}
|
||||
})
|
||||
|
||||
ip = iputil.Ip2VpnIp(net.ParseIP("1.2.1.255"))
|
||||
b.Run("not found", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
tree.Match(ip)
|
||||
}
|
||||
})
|
||||
}
|
||||
185
cidr/tree6.go
185
cidr/tree6.go
@@ -1,185 +0,0 @@
|
||||
package cidr
|
||||
|
||||
import (
|
||||
"net"
|
||||
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
)
|
||||
|
||||
const startbit6 = uint64(1 << 63)
|
||||
|
||||
type Tree6 struct {
|
||||
root4 *Node
|
||||
root6 *Node
|
||||
}
|
||||
|
||||
func NewTree6() *Tree6 {
|
||||
tree := new(Tree6)
|
||||
tree.root4 = &Node{}
|
||||
tree.root6 = &Node{}
|
||||
return tree
|
||||
}
|
||||
|
||||
func (tree *Tree6) AddCIDR(cidr *net.IPNet, val interface{}) {
|
||||
var node, next *Node
|
||||
|
||||
cidrIP, ipv4 := isIPV4(cidr.IP)
|
||||
if ipv4 {
|
||||
node = tree.root4
|
||||
next = tree.root4
|
||||
|
||||
} else {
|
||||
node = tree.root6
|
||||
next = tree.root6
|
||||
}
|
||||
|
||||
for i := 0; i < len(cidrIP); i += 4 {
|
||||
ip := iputil.Ip2VpnIp(cidrIP[i : i+4])
|
||||
mask := iputil.Ip2VpnIp(cidr.Mask[i : i+4])
|
||||
bit := startbit
|
||||
|
||||
// Find our last ancestor in the tree
|
||||
for bit&mask != 0 {
|
||||
if ip&bit != 0 {
|
||||
next = node.right
|
||||
} else {
|
||||
next = node.left
|
||||
}
|
||||
|
||||
if next == nil {
|
||||
break
|
||||
}
|
||||
|
||||
bit = bit >> 1
|
||||
node = next
|
||||
}
|
||||
|
||||
// Build up the rest of the tree we don't already have
|
||||
for bit&mask != 0 {
|
||||
next = &Node{}
|
||||
next.parent = node
|
||||
|
||||
if ip&bit != 0 {
|
||||
node.right = next
|
||||
} else {
|
||||
node.left = next
|
||||
}
|
||||
|
||||
bit >>= 1
|
||||
node = next
|
||||
}
|
||||
}
|
||||
|
||||
// Final node marks our cidr, set the value
|
||||
node.value = val
|
||||
}
|
||||
|
||||
// Finds the most specific match
|
||||
func (tree *Tree6) MostSpecificContains(ip net.IP) (value interface{}) {
|
||||
var node *Node
|
||||
|
||||
wholeIP, ipv4 := isIPV4(ip)
|
||||
if ipv4 {
|
||||
node = tree.root4
|
||||
} else {
|
||||
node = tree.root6
|
||||
}
|
||||
|
||||
for i := 0; i < len(wholeIP); i += 4 {
|
||||
ip := iputil.Ip2VpnIp(wholeIP[i : i+4])
|
||||
bit := startbit
|
||||
|
||||
for node != nil {
|
||||
if node.value != nil {
|
||||
value = node.value
|
||||
}
|
||||
|
||||
if bit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if ip&bit != 0 {
|
||||
node = node.right
|
||||
} else {
|
||||
node = node.left
|
||||
}
|
||||
|
||||
bit >>= 1
|
||||
}
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func (tree *Tree6) MostSpecificContainsIpV4(ip iputil.VpnIp) (value interface{}) {
|
||||
bit := startbit
|
||||
node := tree.root4
|
||||
|
||||
for node != nil {
|
||||
if node.value != nil {
|
||||
value = node.value
|
||||
}
|
||||
|
||||
if ip&bit != 0 {
|
||||
node = node.right
|
||||
} else {
|
||||
node = node.left
|
||||
}
|
||||
|
||||
bit >>= 1
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func (tree *Tree6) MostSpecificContainsIpV6(hi, lo uint64) (value interface{}) {
|
||||
ip := hi
|
||||
node := tree.root6
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
bit := startbit6
|
||||
|
||||
for node != nil {
|
||||
if node.value != nil {
|
||||
value = node.value
|
||||
}
|
||||
|
||||
if bit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if ip&bit != 0 {
|
||||
node = node.right
|
||||
} else {
|
||||
node = node.left
|
||||
}
|
||||
|
||||
bit >>= 1
|
||||
}
|
||||
|
||||
ip = lo
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func isIPV4(ip net.IP) (net.IP, bool) {
|
||||
if len(ip) == net.IPv4len {
|
||||
return ip, true
|
||||
}
|
||||
|
||||
if len(ip) == net.IPv6len && isZeros(ip[0:10]) && ip[10] == 0xff && ip[11] == 0xff {
|
||||
return ip[12:16], true
|
||||
}
|
||||
|
||||
return ip, false
|
||||
}
|
||||
|
||||
func isZeros(p net.IP) bool {
|
||||
for i := 0; i < len(p); i++ {
|
||||
if p[i] != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
package cidr
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCIDR6Tree_MostSpecificContains(t *testing.T) {
|
||||
tree := NewTree6()
|
||||
tree.AddCIDR(Parse("1.0.0.0/8"), "1")
|
||||
tree.AddCIDR(Parse("2.1.0.0/16"), "2")
|
||||
tree.AddCIDR(Parse("3.1.1.0/24"), "3")
|
||||
tree.AddCIDR(Parse("4.1.1.1/24"), "4a")
|
||||
tree.AddCIDR(Parse("4.1.1.1/30"), "4b")
|
||||
tree.AddCIDR(Parse("4.1.1.1/32"), "4c")
|
||||
tree.AddCIDR(Parse("254.0.0.0/4"), "5")
|
||||
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/64"), "6a")
|
||||
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/80"), "6b")
|
||||
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
|
||||
|
||||
tests := []struct {
|
||||
Result interface{}
|
||||
IP string
|
||||
}{
|
||||
{"1", "1.0.0.0"},
|
||||
{"1", "1.255.255.255"},
|
||||
{"2", "2.1.0.0"},
|
||||
{"2", "2.1.255.255"},
|
||||
{"3", "3.1.1.0"},
|
||||
{"3", "3.1.1.255"},
|
||||
{"4a", "4.1.1.255"},
|
||||
{"4b", "4.1.1.2"},
|
||||
{"4c", "4.1.1.1"},
|
||||
{"5", "240.0.0.0"},
|
||||
{"5", "255.255.255.255"},
|
||||
{"6a", "1:2:0:4:1:1:1:1"},
|
||||
{"6b", "1:2:0:4:5:1:1:1"},
|
||||
{"6c", "1:2:0:4:5:0:0:0"},
|
||||
{nil, "239.0.0.0"},
|
||||
{nil, "4.1.2.2"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
assert.Equal(t, tt.Result, tree.MostSpecificContains(net.ParseIP(tt.IP)))
|
||||
}
|
||||
|
||||
tree = NewTree6()
|
||||
tree.AddCIDR(Parse("1.1.1.1/0"), "cool")
|
||||
tree.AddCIDR(Parse("::/0"), "cool6")
|
||||
assert.Equal(t, "cool", tree.MostSpecificContains(net.ParseIP("0.0.0.0")))
|
||||
assert.Equal(t, "cool", tree.MostSpecificContains(net.ParseIP("255.255.255.255")))
|
||||
assert.Equal(t, "cool6", tree.MostSpecificContains(net.ParseIP("::")))
|
||||
assert.Equal(t, "cool6", tree.MostSpecificContains(net.ParseIP("1:2:3:4:5:6:7:8")))
|
||||
}
|
||||
|
||||
func TestCIDR6Tree_MostSpecificContainsIpV6(t *testing.T) {
|
||||
tree := NewTree6()
|
||||
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/64"), "6a")
|
||||
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/80"), "6b")
|
||||
tree.AddCIDR(Parse("1:2:0:4:5:0:0:0/96"), "6c")
|
||||
|
||||
tests := []struct {
|
||||
Result interface{}
|
||||
IP string
|
||||
}{
|
||||
{"6a", "1:2:0:4:1:1:1:1"},
|
||||
{"6b", "1:2:0:4:5:1:1:1"},
|
||||
{"6c", "1:2:0:4:5:0:0:0"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
ip := net.ParseIP(tt.IP)
|
||||
hi := binary.BigEndian.Uint64(ip[:8])
|
||||
lo := binary.BigEndian.Uint64(ip[8:])
|
||||
|
||||
assert.Equal(t, tt.Result, tree.MostSpecificContainsIpV6(hi, lo))
|
||||
}
|
||||
}
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net"
|
||||
"os"
|
||||
@@ -181,9 +180,15 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while generating ecdsa keys: %s", err)
|
||||
}
|
||||
// ref: https://github.com/golang/go/blob/go1.19/src/crypto/x509/sec1.go#L60
|
||||
rawPriv = key.D.FillBytes(make([]byte, 32))
|
||||
pub = elliptic.Marshal(elliptic.P256(), key.X, key.Y)
|
||||
|
||||
// ecdh.PrivateKey lets us get at the encoded bytes, even though
|
||||
// we aren't using ECDH here.
|
||||
eKey, err := key.ECDH()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while converting ecdsa key: %s", err)
|
||||
}
|
||||
rawPriv = eKey.Bytes()
|
||||
pub = eKey.PublicKey().Bytes()
|
||||
}
|
||||
|
||||
nc := cert.NebulaCertificate{
|
||||
@@ -213,27 +218,27 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
|
||||
return fmt.Errorf("error while signing: %s", err)
|
||||
}
|
||||
|
||||
var b []byte
|
||||
if *cf.encryption {
|
||||
b, err := cert.EncryptAndMarshalSigningPrivateKey(curve, rawPriv, passphrase, kdfParams)
|
||||
b, err = cert.EncryptAndMarshalSigningPrivateKey(curve, rawPriv, passphrase, kdfParams)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while encrypting out-key: %s", err)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(*cf.outKeyPath, b, 0600)
|
||||
} else {
|
||||
err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalSigningPrivateKey(curve, rawPriv), 0600)
|
||||
b = cert.MarshalSigningPrivateKey(curve, rawPriv)
|
||||
}
|
||||
|
||||
err = os.WriteFile(*cf.outKeyPath, b, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-key: %s", err)
|
||||
}
|
||||
|
||||
b, err := nc.MarshalToPEM()
|
||||
b, err = nc.MarshalToPEM()
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while marshalling certificate: %s", err)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(*cf.outCertPath, b, 0600)
|
||||
err = os.WriteFile(*cf.outCertPath, b, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-crt: %s", err)
|
||||
}
|
||||
@@ -244,7 +249,7 @@ func ca(args []string, out io.Writer, errOut io.Writer, pr PasswordReader) error
|
||||
return fmt.Errorf("error while generating qr code: %s", err)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(*cf.outQRPath, b, 0600)
|
||||
err = os.WriteFile(*cf.outQRPath, b, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-qr: %s", err)
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"bytes"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
@@ -107,7 +106,7 @@ func Test_ca(t *testing.T) {
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// create temp key file
|
||||
keyF, err := ioutil.TempFile("", "test.key")
|
||||
keyF, err := os.CreateTemp("", "test.key")
|
||||
assert.Nil(t, err)
|
||||
os.Remove(keyF.Name())
|
||||
|
||||
@@ -120,7 +119,7 @@ func Test_ca(t *testing.T) {
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// create temp cert file
|
||||
crtF, err := ioutil.TempFile("", "test.crt")
|
||||
crtF, err := os.CreateTemp("", "test.crt")
|
||||
assert.Nil(t, err)
|
||||
os.Remove(crtF.Name())
|
||||
os.Remove(keyF.Name())
|
||||
@@ -134,13 +133,13 @@ func Test_ca(t *testing.T) {
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// read cert and key files
|
||||
rb, _ := ioutil.ReadFile(keyF.Name())
|
||||
rb, _ := os.ReadFile(keyF.Name())
|
||||
lKey, b, err := cert.UnmarshalEd25519PrivateKey(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, lKey, 64)
|
||||
|
||||
rb, _ = ioutil.ReadFile(crtF.Name())
|
||||
rb, _ = os.ReadFile(crtF.Name())
|
||||
lCrt, b, err := cert.UnmarshalNebulaCertificateFromPEM(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
@@ -166,7 +165,7 @@ func Test_ca(t *testing.T) {
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// read encrypted key file and verify default params
|
||||
rb, _ = ioutil.ReadFile(keyF.Name())
|
||||
rb, _ = os.ReadFile(keyF.Name())
|
||||
k, _ := pem.Decode(rb)
|
||||
ned, err := cert.UnmarshalNebulaEncryptedData(k.Bytes)
|
||||
assert.Nil(t, err)
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/slackhq/nebula/cert"
|
||||
@@ -54,12 +53,12 @@ func keygen(args []string, out io.Writer, errOut io.Writer) error {
|
||||
return fmt.Errorf("invalid curve: %s", *cf.curve)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(*cf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
|
||||
err = os.WriteFile(*cf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-key: %s", err)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(*cf.outPubPath, cert.MarshalPublicKey(curve, pub), 0600)
|
||||
err = os.WriteFile(*cf.outPubPath, cert.MarshalPublicKey(curve, pub), 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-pub: %s", err)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
@@ -54,7 +53,7 @@ func Test_keygen(t *testing.T) {
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// create temp key file
|
||||
keyF, err := ioutil.TempFile("", "test.key")
|
||||
keyF, err := os.CreateTemp("", "test.key")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(keyF.Name())
|
||||
|
||||
@@ -67,7 +66,7 @@ func Test_keygen(t *testing.T) {
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// create temp pub file
|
||||
pubF, err := ioutil.TempFile("", "test.pub")
|
||||
pubF, err := os.CreateTemp("", "test.pub")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(pubF.Name())
|
||||
|
||||
@@ -80,13 +79,13 @@ func Test_keygen(t *testing.T) {
|
||||
assert.Equal(t, "", eb.String())
|
||||
|
||||
// read cert and key files
|
||||
rb, _ := ioutil.ReadFile(keyF.Name())
|
||||
rb, _ := os.ReadFile(keyF.Name())
|
||||
lKey, b, err := cert.UnmarshalX25519PrivateKey(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, lKey, 32)
|
||||
|
||||
rb, _ = ioutil.ReadFile(pubF.Name())
|
||||
rb, _ = os.ReadFile(pubF.Name())
|
||||
lPub, b, err := cert.UnmarshalX25519PublicKey(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
@@ -41,7 +40,7 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
rawCert, err := ioutil.ReadFile(*pf.path)
|
||||
rawCert, err := os.ReadFile(*pf.path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read cert; %s", err)
|
||||
}
|
||||
@@ -87,7 +86,7 @@ func printCert(args []string, out io.Writer, errOut io.Writer) error {
|
||||
return fmt.Errorf("error while generating qr code: %s", err)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(*pf.outQRPath, b, 0600)
|
||||
err = os.WriteFile(*pf.outQRPath, b, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-qr: %s", err)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -54,7 +53,7 @@ func Test_printCert(t *testing.T) {
|
||||
// invalid cert at path
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
tf, err := ioutil.TempFile("", "print-cert")
|
||||
tf, err := os.CreateTemp("", "print-cert")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(tf.Name())
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
@@ -73,7 +72,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
||||
return newHelpErrorf("cannot set both -in-pub and -out-key")
|
||||
}
|
||||
|
||||
rawCAKey, err := ioutil.ReadFile(*sf.caKeyPath)
|
||||
rawCAKey, err := os.ReadFile(*sf.caKeyPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while reading ca-key: %s", err)
|
||||
}
|
||||
@@ -112,7 +111,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
||||
return fmt.Errorf("error while parsing ca-key: %s", err)
|
||||
}
|
||||
|
||||
rawCACert, err := ioutil.ReadFile(*sf.caCertPath)
|
||||
rawCACert, err := os.ReadFile(*sf.caCertPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while reading ca-crt: %s", err)
|
||||
}
|
||||
@@ -178,7 +177,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
||||
|
||||
var pub, rawPriv []byte
|
||||
if *sf.inPubPath != "" {
|
||||
rawPub, err := ioutil.ReadFile(*sf.inPubPath)
|
||||
rawPub, err := os.ReadFile(*sf.inPubPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while reading in-pub: %s", err)
|
||||
}
|
||||
@@ -235,7 +234,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
||||
return fmt.Errorf("refusing to overwrite existing key: %s", *sf.outKeyPath)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(*sf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
|
||||
err = os.WriteFile(*sf.outKeyPath, cert.MarshalPrivateKey(curve, rawPriv), 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-key: %s", err)
|
||||
}
|
||||
@@ -246,7 +245,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
||||
return fmt.Errorf("error while marshalling certificate: %s", err)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(*sf.outCertPath, b, 0600)
|
||||
err = os.WriteFile(*sf.outCertPath, b, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-crt: %s", err)
|
||||
}
|
||||
@@ -257,7 +256,7 @@ func signCert(args []string, out io.Writer, errOut io.Writer, pr PasswordReader)
|
||||
return fmt.Errorf("error while generating qr code: %s", err)
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile(*sf.outQRPath, b, 0600)
|
||||
err = os.WriteFile(*sf.outQRPath, b, 0600)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while writing out-qr: %s", err)
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -104,7 +103,7 @@ func Test_signCert(t *testing.T) {
|
||||
// failed to unmarshal key
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
caKeyF, err := ioutil.TempFile("", "sign-cert.key")
|
||||
caKeyF, err := os.CreateTemp("", "sign-cert.key")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(caKeyF.Name())
|
||||
|
||||
@@ -128,7 +127,7 @@ func Test_signCert(t *testing.T) {
|
||||
// failed to unmarshal cert
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
caCrtF, err := ioutil.TempFile("", "sign-cert.crt")
|
||||
caCrtF, err := os.CreateTemp("", "sign-cert.crt")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(caCrtF.Name())
|
||||
|
||||
@@ -159,7 +158,7 @@ func Test_signCert(t *testing.T) {
|
||||
// failed to unmarshal pub
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
inPubF, err := ioutil.TempFile("", "in.pub")
|
||||
inPubF, err := os.CreateTemp("", "in.pub")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(inPubF.Name())
|
||||
|
||||
@@ -206,7 +205,7 @@ func Test_signCert(t *testing.T) {
|
||||
|
||||
// mismatched ca key
|
||||
_, caPriv2, _ := ed25519.GenerateKey(rand.Reader)
|
||||
caKeyF2, err := ioutil.TempFile("", "sign-cert-2.key")
|
||||
caKeyF2, err := os.CreateTemp("", "sign-cert-2.key")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(caKeyF2.Name())
|
||||
caKeyF2.Write(cert.MarshalEd25519PrivateKey(caPriv2))
|
||||
@@ -227,7 +226,7 @@ func Test_signCert(t *testing.T) {
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
// create temp key file
|
||||
keyF, err := ioutil.TempFile("", "test.key")
|
||||
keyF, err := os.CreateTemp("", "test.key")
|
||||
assert.Nil(t, err)
|
||||
os.Remove(keyF.Name())
|
||||
|
||||
@@ -241,7 +240,7 @@ func Test_signCert(t *testing.T) {
|
||||
os.Remove(keyF.Name())
|
||||
|
||||
// create temp cert file
|
||||
crtF, err := ioutil.TempFile("", "test.crt")
|
||||
crtF, err := os.CreateTemp("", "test.crt")
|
||||
assert.Nil(t, err)
|
||||
os.Remove(crtF.Name())
|
||||
|
||||
@@ -254,13 +253,13 @@ func Test_signCert(t *testing.T) {
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
// read cert and key files
|
||||
rb, _ := ioutil.ReadFile(keyF.Name())
|
||||
rb, _ := os.ReadFile(keyF.Name())
|
||||
lKey, b, err := cert.UnmarshalX25519PrivateKey(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
assert.Len(t, lKey, 32)
|
||||
|
||||
rb, _ = ioutil.ReadFile(crtF.Name())
|
||||
rb, _ = os.ReadFile(crtF.Name())
|
||||
lCrt, b, err := cert.UnmarshalNebulaCertificateFromPEM(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
@@ -296,7 +295,7 @@ func Test_signCert(t *testing.T) {
|
||||
assert.Empty(t, eb.String())
|
||||
|
||||
// read cert file and check pub key matches in-pub
|
||||
rb, _ = ioutil.ReadFile(crtF.Name())
|
||||
rb, _ = os.ReadFile(crtF.Name())
|
||||
lCrt, b, err = cert.UnmarshalNebulaCertificateFromPEM(rb)
|
||||
assert.Len(t, b, 0)
|
||||
assert.Nil(t, err)
|
||||
@@ -348,11 +347,11 @@ func Test_signCert(t *testing.T) {
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
|
||||
caKeyF, err = ioutil.TempFile("", "sign-cert.key")
|
||||
caKeyF, err = os.CreateTemp("", "sign-cert.key")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(caKeyF.Name())
|
||||
|
||||
caCrtF, err = ioutil.TempFile("", "sign-cert.crt")
|
||||
caCrtF, err = os.CreateTemp("", "sign-cert.crt")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(caCrtF.Name())
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -40,7 +39,7 @@ func verify(args []string, out io.Writer, errOut io.Writer) error {
|
||||
return err
|
||||
}
|
||||
|
||||
rawCACert, err := ioutil.ReadFile(*vf.caPath)
|
||||
rawCACert, err := os.ReadFile(*vf.caPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error while reading ca: %s", err)
|
||||
}
|
||||
@@ -57,7 +56,7 @@ func verify(args []string, out io.Writer, errOut io.Writer) error {
|
||||
}
|
||||
}
|
||||
|
||||
rawCert, err := ioutil.ReadFile(*vf.certPath)
|
||||
rawCert, err := os.ReadFile(*vf.certPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read crt; %s", err)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package main
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -56,7 +55,7 @@ func Test_verify(t *testing.T) {
|
||||
// invalid ca at path
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
caFile, err := ioutil.TempFile("", "verify-ca")
|
||||
caFile, err := os.CreateTemp("", "verify-ca")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(caFile.Name())
|
||||
|
||||
@@ -92,7 +91,7 @@ func Test_verify(t *testing.T) {
|
||||
// invalid crt at path
|
||||
ob.Reset()
|
||||
eb.Reset()
|
||||
certFile, err := ioutil.TempFile("", "verify-cert")
|
||||
certFile, err := os.CreateTemp("", "verify-cert")
|
||||
assert.Nil(t, err)
|
||||
defer os.Remove(certFile.Name())
|
||||
|
||||
|
||||
@@ -59,13 +59,8 @@ func main() {
|
||||
}
|
||||
|
||||
ctrl, err := nebula.Main(c, *configTest, Build, l, nil)
|
||||
|
||||
switch v := err.(type) {
|
||||
case util.ContextualError:
|
||||
v.Log(l)
|
||||
os.Exit(1)
|
||||
case error:
|
||||
l.WithError(err).Error("Failed to start")
|
||||
if err != nil {
|
||||
util.LogWithContextIfNeeded("Failed to start", err, l)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
||||
@@ -53,18 +53,14 @@ func main() {
|
||||
}
|
||||
|
||||
ctrl, err := nebula.Main(c, *configTest, Build, l, nil)
|
||||
|
||||
switch v := err.(type) {
|
||||
case util.ContextualError:
|
||||
v.Log(l)
|
||||
os.Exit(1)
|
||||
case error:
|
||||
l.WithError(err).Error("Failed to start")
|
||||
if err != nil {
|
||||
util.LogWithContextIfNeeded("Failed to start", err, l)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if !*configTest {
|
||||
ctrl.Start()
|
||||
notifyReady(l)
|
||||
ctrl.ShutdownBlock()
|
||||
}
|
||||
|
||||
|
||||
42
cmd/nebula/notify_linux.go
Normal file
42
cmd/nebula/notify_linux.go
Normal file
@@ -0,0 +1,42 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// SdNotifyReady tells systemd the service is ready and dependent services can now be started
|
||||
// https://www.freedesktop.org/software/systemd/man/sd_notify.html
|
||||
// https://www.freedesktop.org/software/systemd/man/systemd.service.html
|
||||
const SdNotifyReady = "READY=1"
|
||||
|
||||
func notifyReady(l *logrus.Logger) {
|
||||
sockName := os.Getenv("NOTIFY_SOCKET")
|
||||
if sockName == "" {
|
||||
l.Debugln("NOTIFY_SOCKET systemd env var not set, not sending ready signal")
|
||||
return
|
||||
}
|
||||
|
||||
conn, err := net.DialTimeout("unixgram", sockName, time.Second)
|
||||
if err != nil {
|
||||
l.WithError(err).Error("failed to connect to systemd notification socket")
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
|
||||
err = conn.SetWriteDeadline(time.Now().Add(time.Second))
|
||||
if err != nil {
|
||||
l.WithError(err).Error("failed to set the write deadline for the systemd notification socket")
|
||||
return
|
||||
}
|
||||
|
||||
if _, err = conn.Write([]byte(SdNotifyReady)); err != nil {
|
||||
l.WithError(err).Error("failed to signal the systemd notification socket")
|
||||
return
|
||||
}
|
||||
|
||||
l.Debugln("notified systemd the service is ready")
|
||||
}
|
||||
10
cmd/nebula/notify_notlinux.go
Normal file
10
cmd/nebula/notify_notlinux.go
Normal file
@@ -0,0 +1,10 @@
|
||||
//go:build !linux
|
||||
// +build !linux
|
||||
|
||||
package main
|
||||
|
||||
import "github.com/sirupsen/logrus"
|
||||
|
||||
func notifyReady(_ *logrus.Logger) {
|
||||
// No init service to notify
|
||||
}
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
"dario.cat/mergo"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
@@ -121,6 +121,10 @@ func (c *C) HasChanged(k string) bool {
|
||||
// CatchHUP will listen for the HUP signal in a go routine and reload all configs found in the
|
||||
// original path provided to Load. The old settings are shallow copied for change detection after the reload.
|
||||
func (c *C) CatchHUP(ctx context.Context) {
|
||||
if c.path == "" {
|
||||
return
|
||||
}
|
||||
|
||||
ch := make(chan os.Signal, 1)
|
||||
signal.Notify(ch, syscall.SIGHUP)
|
||||
|
||||
@@ -236,6 +240,15 @@ func (c *C) GetInt(k string, d int) int {
|
||||
return v
|
||||
}
|
||||
|
||||
// GetUint32 will get the uint32 for k or return the default d if not found or invalid
|
||||
func (c *C) GetUint32(k string, d uint32) uint32 {
|
||||
r := c.GetInt(k, int(d))
|
||||
if uint64(r) > uint64(math.MaxUint32) {
|
||||
return d
|
||||
}
|
||||
return uint32(r)
|
||||
}
|
||||
|
||||
// GetBool will get the bool for k or return the default d if not found or invalid
|
||||
func (c *C) GetBool(k string, d bool) bool {
|
||||
r := strings.ToLower(c.GetString(k, fmt.Sprintf("%v", d)))
|
||||
@@ -348,7 +361,7 @@ func (c *C) parse() error {
|
||||
var m map[interface{}]interface{}
|
||||
|
||||
for _, path := range c.files {
|
||||
b, err := ioutil.ReadFile(path)
|
||||
b, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/imdario/mergo"
|
||||
"dario.cat/mergo"
|
||||
"github.com/slackhq/nebula/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -16,10 +15,10 @@ import (
|
||||
|
||||
func TestConfig_Load(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
dir, err := ioutil.TempDir("", "config-test")
|
||||
dir, err := os.MkdirTemp("", "config-test")
|
||||
// invalid yaml
|
||||
c := NewC(l)
|
||||
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644)
|
||||
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte(" invalid yaml"), 0644)
|
||||
assert.EqualError(t, c.Load(dir), "yaml: unmarshal errors:\n line 1: cannot unmarshal !!str `invalid...` into map[interface {}]interface {}")
|
||||
|
||||
// simple multi config merge
|
||||
@@ -29,8 +28,8 @@ func TestConfig_Load(t *testing.T) {
|
||||
|
||||
assert.Nil(t, err)
|
||||
|
||||
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
||||
ioutil.WriteFile(filepath.Join(dir, "02.yml"), []byte("outer:\n inner: override\nnew: hi"), 0644)
|
||||
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
||||
os.WriteFile(filepath.Join(dir, "02.yml"), []byte("outer:\n inner: override\nnew: hi"), 0644)
|
||||
assert.Nil(t, c.Load(dir))
|
||||
expected := map[interface{}]interface{}{
|
||||
"outer": map[interface{}]interface{}{
|
||||
@@ -120,9 +119,9 @@ func TestConfig_HasChanged(t *testing.T) {
|
||||
func TestConfig_ReloadConfig(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
done := make(chan bool, 1)
|
||||
dir, err := ioutil.TempDir("", "config-test")
|
||||
dir, err := os.MkdirTemp("", "config-test")
|
||||
assert.Nil(t, err)
|
||||
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
||||
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: hi"), 0644)
|
||||
|
||||
c := NewC(l)
|
||||
assert.Nil(t, c.Load(dir))
|
||||
@@ -131,7 +130,7 @@ func TestConfig_ReloadConfig(t *testing.T) {
|
||||
assert.False(t, c.HasChanged("outer"))
|
||||
assert.False(t, c.HasChanged(""))
|
||||
|
||||
ioutil.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: ho"), 0644)
|
||||
os.WriteFile(filepath.Join(dir, "01.yaml"), []byte("outer:\n inner: ho"), 0644)
|
||||
|
||||
c.RegisterReloadCallback(func(c *C) {
|
||||
done <- true
|
||||
|
||||
@@ -3,15 +3,18 @@ package nebula
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
)
|
||||
|
||||
type trafficDecision int
|
||||
@@ -23,133 +26,128 @@ const (
|
||||
swapPrimary trafficDecision = 3
|
||||
migrateRelays trafficDecision = 4
|
||||
tryRehandshake trafficDecision = 5
|
||||
sendTestPacket trafficDecision = 6
|
||||
)
|
||||
|
||||
type connectionManager struct {
|
||||
in map[uint32]struct{}
|
||||
inLock *sync.RWMutex
|
||||
|
||||
out map[uint32]struct{}
|
||||
outLock *sync.RWMutex
|
||||
|
||||
// relayUsed holds which relay localIndexs are in use
|
||||
relayUsed map[uint32]struct{}
|
||||
relayUsedLock *sync.RWMutex
|
||||
|
||||
hostMap *HostMap
|
||||
trafficTimer *LockingTimerWheel[uint32]
|
||||
intf *Interface
|
||||
pendingDeletion map[uint32]struct{}
|
||||
punchy *Punchy
|
||||
hostMap *HostMap
|
||||
trafficTimer *LockingTimerWheel[uint32]
|
||||
intf *Interface
|
||||
punchy *Punchy
|
||||
|
||||
// Configuration settings
|
||||
checkInterval time.Duration
|
||||
pendingDeletionInterval time.Duration
|
||||
metricsTxPunchy metrics.Counter
|
||||
inactivityTimeout atomic.Int64
|
||||
dropInactive atomic.Bool
|
||||
|
||||
metricsTxPunchy metrics.Counter
|
||||
|
||||
l *logrus.Logger
|
||||
}
|
||||
|
||||
func newConnectionManager(ctx context.Context, l *logrus.Logger, intf *Interface, checkInterval, pendingDeletionInterval time.Duration, punchy *Punchy) *connectionManager {
|
||||
var max time.Duration
|
||||
if checkInterval < pendingDeletionInterval {
|
||||
max = pendingDeletionInterval
|
||||
} else {
|
||||
max = checkInterval
|
||||
func newConnectionManagerFromConfig(l *logrus.Logger, c *config.C, hm *HostMap, p *Punchy) *connectionManager {
|
||||
cm := &connectionManager{
|
||||
hostMap: hm,
|
||||
l: l,
|
||||
punchy: p,
|
||||
relayUsed: make(map[uint32]struct{}),
|
||||
relayUsedLock: &sync.RWMutex{},
|
||||
metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
|
||||
}
|
||||
|
||||
nc := &connectionManager{
|
||||
hostMap: intf.hostMap,
|
||||
in: make(map[uint32]struct{}),
|
||||
inLock: &sync.RWMutex{},
|
||||
out: make(map[uint32]struct{}),
|
||||
outLock: &sync.RWMutex{},
|
||||
relayUsed: make(map[uint32]struct{}),
|
||||
relayUsedLock: &sync.RWMutex{},
|
||||
trafficTimer: NewLockingTimerWheel[uint32](time.Millisecond*500, max),
|
||||
intf: intf,
|
||||
pendingDeletion: make(map[uint32]struct{}),
|
||||
checkInterval: checkInterval,
|
||||
pendingDeletionInterval: pendingDeletionInterval,
|
||||
punchy: punchy,
|
||||
metricsTxPunchy: metrics.GetOrRegisterCounter("messages.tx.punchy", nil),
|
||||
l: l,
|
||||
}
|
||||
cm.reload(c, true)
|
||||
c.RegisterReloadCallback(func(c *config.C) {
|
||||
cm.reload(c, false)
|
||||
})
|
||||
|
||||
nc.Start(ctx)
|
||||
return nc
|
||||
return cm
|
||||
}
|
||||
|
||||
func (n *connectionManager) In(localIndex uint32) {
|
||||
n.inLock.RLock()
|
||||
// If this already exists, return
|
||||
if _, ok := n.in[localIndex]; ok {
|
||||
n.inLock.RUnlock()
|
||||
return
|
||||
func (cm *connectionManager) reload(c *config.C, initial bool) {
|
||||
if initial {
|
||||
cm.checkInterval = time.Duration(c.GetInt("timers.connection_alive_interval", 5)) * time.Second
|
||||
cm.pendingDeletionInterval = time.Duration(c.GetInt("timers.pending_deletion_interval", 10)) * time.Second
|
||||
|
||||
// We want at least a minimum resolution of 500ms per tick so that we can hit these intervals
|
||||
// pretty close to their configured duration.
|
||||
// The inactivity duration is checked each time a hostinfo ticks through so we don't need the wheel to contain it.
|
||||
minDuration := min(time.Millisecond*500, cm.checkInterval, cm.pendingDeletionInterval)
|
||||
maxDuration := max(cm.checkInterval, cm.pendingDeletionInterval)
|
||||
cm.trafficTimer = NewLockingTimerWheel[uint32](minDuration, maxDuration)
|
||||
}
|
||||
|
||||
if initial || c.HasChanged("tunnels.inactivity_timeout") {
|
||||
old := cm.getInactivityTimeout()
|
||||
cm.inactivityTimeout.Store((int64)(c.GetDuration("tunnels.inactivity_timeout", 10*time.Minute)))
|
||||
if !initial {
|
||||
cm.l.WithField("oldDuration", old).
|
||||
WithField("newDuration", cm.getInactivityTimeout()).
|
||||
Info("Inactivity timeout has changed")
|
||||
}
|
||||
}
|
||||
|
||||
if initial || c.HasChanged("tunnels.drop_inactive") {
|
||||
old := cm.dropInactive.Load()
|
||||
cm.dropInactive.Store(c.GetBool("tunnels.drop_inactive", false))
|
||||
if !initial {
|
||||
cm.l.WithField("oldBool", old).
|
||||
WithField("newBool", cm.dropInactive.Load()).
|
||||
Info("Drop inactive setting has changed")
|
||||
}
|
||||
}
|
||||
n.inLock.RUnlock()
|
||||
n.inLock.Lock()
|
||||
n.in[localIndex] = struct{}{}
|
||||
n.inLock.Unlock()
|
||||
}
|
||||
|
||||
func (n *connectionManager) Out(localIndex uint32) {
|
||||
n.outLock.RLock()
|
||||
// If this already exists, return
|
||||
if _, ok := n.out[localIndex]; ok {
|
||||
n.outLock.RUnlock()
|
||||
return
|
||||
}
|
||||
n.outLock.RUnlock()
|
||||
n.outLock.Lock()
|
||||
n.out[localIndex] = struct{}{}
|
||||
n.outLock.Unlock()
|
||||
func (cm *connectionManager) getInactivityTimeout() time.Duration {
|
||||
return (time.Duration)(cm.inactivityTimeout.Load())
|
||||
}
|
||||
|
||||
func (n *connectionManager) RelayUsed(localIndex uint32) {
|
||||
n.relayUsedLock.RLock()
|
||||
func (cm *connectionManager) In(h *HostInfo) {
|
||||
h.in.Store(true)
|
||||
}
|
||||
|
||||
func (cm *connectionManager) Out(h *HostInfo) {
|
||||
h.out.Store(true)
|
||||
}
|
||||
|
||||
func (cm *connectionManager) RelayUsed(localIndex uint32) {
|
||||
cm.relayUsedLock.RLock()
|
||||
// If this already exists, return
|
||||
if _, ok := n.relayUsed[localIndex]; ok {
|
||||
n.relayUsedLock.RUnlock()
|
||||
if _, ok := cm.relayUsed[localIndex]; ok {
|
||||
cm.relayUsedLock.RUnlock()
|
||||
return
|
||||
}
|
||||
n.relayUsedLock.RUnlock()
|
||||
n.relayUsedLock.Lock()
|
||||
n.relayUsed[localIndex] = struct{}{}
|
||||
n.relayUsedLock.Unlock()
|
||||
cm.relayUsedLock.RUnlock()
|
||||
cm.relayUsedLock.Lock()
|
||||
cm.relayUsed[localIndex] = struct{}{}
|
||||
cm.relayUsedLock.Unlock()
|
||||
}
|
||||
|
||||
// getAndResetTrafficCheck returns if there was any inbound or outbound traffic within the last tick and
|
||||
// resets the state for this local index
|
||||
func (n *connectionManager) getAndResetTrafficCheck(localIndex uint32) (bool, bool) {
|
||||
n.inLock.Lock()
|
||||
n.outLock.Lock()
|
||||
_, in := n.in[localIndex]
|
||||
_, out := n.out[localIndex]
|
||||
delete(n.in, localIndex)
|
||||
delete(n.out, localIndex)
|
||||
n.inLock.Unlock()
|
||||
n.outLock.Unlock()
|
||||
func (cm *connectionManager) getAndResetTrafficCheck(h *HostInfo, now time.Time) (bool, bool) {
|
||||
in := h.in.Swap(false)
|
||||
out := h.out.Swap(false)
|
||||
if in || out {
|
||||
h.lastUsed = now
|
||||
}
|
||||
return in, out
|
||||
}
|
||||
|
||||
func (n *connectionManager) AddTrafficWatch(localIndex uint32) {
|
||||
// Use a write lock directly because it should be incredibly rare that we are ever already tracking this index
|
||||
n.outLock.Lock()
|
||||
if _, ok := n.out[localIndex]; ok {
|
||||
n.outLock.Unlock()
|
||||
return
|
||||
// AddTrafficWatch must be called for every new HostInfo.
|
||||
// We will continue to monitor the HostInfo until the tunnel is dropped.
|
||||
func (cm *connectionManager) AddTrafficWatch(h *HostInfo) {
|
||||
if h.out.Swap(true) == false {
|
||||
cm.trafficTimer.Add(h.localIndexId, cm.checkInterval)
|
||||
}
|
||||
n.out[localIndex] = struct{}{}
|
||||
n.trafficTimer.Add(localIndex, n.checkInterval)
|
||||
n.outLock.Unlock()
|
||||
}
|
||||
|
||||
func (n *connectionManager) Start(ctx context.Context) {
|
||||
go n.Run(ctx)
|
||||
}
|
||||
|
||||
func (n *connectionManager) Run(ctx context.Context) {
|
||||
//TODO: this tick should be based on the min wheel tick? Check firewall
|
||||
clockSource := time.NewTicker(500 * time.Millisecond)
|
||||
func (cm *connectionManager) Start(ctx context.Context) {
|
||||
clockSource := time.NewTicker(cm.trafficTimer.t.tickDuration)
|
||||
defer clockSource.Stop()
|
||||
|
||||
p := []byte("")
|
||||
@@ -162,125 +160,137 @@ func (n *connectionManager) Run(ctx context.Context) {
|
||||
return
|
||||
|
||||
case now := <-clockSource.C:
|
||||
n.trafficTimer.Advance(now)
|
||||
cm.trafficTimer.Advance(now)
|
||||
for {
|
||||
localIndex, has := n.trafficTimer.Purge()
|
||||
localIndex, has := cm.trafficTimer.Purge()
|
||||
if !has {
|
||||
break
|
||||
}
|
||||
|
||||
n.doTrafficCheck(localIndex, p, nb, out, now)
|
||||
cm.doTrafficCheck(localIndex, p, nb, out, now)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
|
||||
decision, hostinfo, primary := n.makeTrafficDecision(localIndex, p, nb, out, now)
|
||||
func (cm *connectionManager) doTrafficCheck(localIndex uint32, p, nb, out []byte, now time.Time) {
|
||||
decision, hostinfo, primary := cm.makeTrafficDecision(localIndex, now)
|
||||
|
||||
switch decision {
|
||||
case deleteTunnel:
|
||||
if n.hostMap.DeleteHostInfo(hostinfo) {
|
||||
if cm.hostMap.DeleteHostInfo(hostinfo) {
|
||||
// Only clearing the lighthouse cache if this is the last hostinfo for this vpn ip in the hostmap
|
||||
n.intf.lightHouse.DeleteVpnIp(hostinfo.vpnIp)
|
||||
cm.intf.lightHouse.DeleteVpnIp(hostinfo.vpnIp)
|
||||
}
|
||||
|
||||
case closeTunnel:
|
||||
n.intf.sendCloseTunnel(hostinfo)
|
||||
n.intf.closeTunnel(hostinfo)
|
||||
cm.intf.sendCloseTunnel(hostinfo)
|
||||
cm.intf.closeTunnel(hostinfo)
|
||||
|
||||
case swapPrimary:
|
||||
n.swapPrimary(hostinfo, primary)
|
||||
cm.swapPrimary(hostinfo, primary)
|
||||
|
||||
case migrateRelays:
|
||||
n.migrateRelayUsed(hostinfo, primary)
|
||||
cm.migrateRelayUsed(hostinfo, primary)
|
||||
|
||||
case tryRehandshake:
|
||||
n.tryRehandshake(hostinfo)
|
||||
cm.tryRehandshake(hostinfo)
|
||||
|
||||
case sendTestPacket:
|
||||
cm.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
|
||||
}
|
||||
|
||||
n.resetRelayTrafficCheck(hostinfo)
|
||||
cm.resetRelayTrafficCheck(hostinfo)
|
||||
}
|
||||
|
||||
func (n *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) {
|
||||
func (cm *connectionManager) resetRelayTrafficCheck(hostinfo *HostInfo) {
|
||||
if hostinfo != nil {
|
||||
n.relayUsedLock.Lock()
|
||||
defer n.relayUsedLock.Unlock()
|
||||
cm.relayUsedLock.Lock()
|
||||
defer cm.relayUsedLock.Unlock()
|
||||
// No need to migrate any relays, delete usage info now.
|
||||
for _, idx := range hostinfo.relayState.CopyRelayForIdxs() {
|
||||
delete(n.relayUsed, idx)
|
||||
delete(cm.relayUsed, idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) {
|
||||
func (cm *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo) {
|
||||
relayFor := oldhostinfo.relayState.CopyAllRelayFor()
|
||||
|
||||
for _, r := range relayFor {
|
||||
existing, ok := newhostinfo.relayState.QueryRelayForByIp(r.PeerIp)
|
||||
|
||||
var index uint32
|
||||
var relayFrom iputil.VpnIp
|
||||
var relayTo iputil.VpnIp
|
||||
var relayFrom netip.Addr
|
||||
var relayTo netip.Addr
|
||||
switch {
|
||||
case ok && existing.State == Established:
|
||||
// This relay already exists in newhostinfo, then do nothing.
|
||||
continue
|
||||
case ok && existing.State == Requested:
|
||||
// The relay exists in a Requested state; re-send the request
|
||||
index = existing.LocalIndex
|
||||
switch r.Type {
|
||||
case TerminalType:
|
||||
relayFrom = newhostinfo.vpnIp
|
||||
relayTo = existing.PeerIp
|
||||
case ForwardingType:
|
||||
relayFrom = existing.PeerIp
|
||||
relayTo = newhostinfo.vpnIp
|
||||
default:
|
||||
// should never happen
|
||||
case ok:
|
||||
switch existing.State {
|
||||
case Established, PeerRequested, Disestablished:
|
||||
// This relay already exists in newhostinfo, then do nothing.
|
||||
continue
|
||||
case Requested:
|
||||
// The relayed connection exists in a Requested state; re-send the request
|
||||
index = existing.LocalIndex
|
||||
switch r.Type {
|
||||
case TerminalType:
|
||||
relayFrom = cm.intf.myVpnNet.Addr()
|
||||
relayTo = existing.PeerIp
|
||||
case ForwardingType:
|
||||
relayFrom = existing.PeerIp
|
||||
relayTo = newhostinfo.vpnIp
|
||||
default:
|
||||
// should never happen
|
||||
panic(fmt.Sprintf("Migrating unknown relay type: %v", r.Type))
|
||||
}
|
||||
}
|
||||
case !ok:
|
||||
n.relayUsedLock.RLock()
|
||||
if _, relayUsed := n.relayUsed[r.LocalIndex]; !relayUsed {
|
||||
cm.relayUsedLock.RLock()
|
||||
if _, relayUsed := cm.relayUsed[r.LocalIndex]; !relayUsed {
|
||||
// The relay hasn't been used; don't migrate it.
|
||||
n.relayUsedLock.RUnlock()
|
||||
cm.relayUsedLock.RUnlock()
|
||||
continue
|
||||
}
|
||||
n.relayUsedLock.RUnlock()
|
||||
cm.relayUsedLock.RUnlock()
|
||||
// The relay doesn't exist at all; create some relay state and send the request.
|
||||
var err error
|
||||
index, err = AddRelay(n.l, newhostinfo, n.hostMap, r.PeerIp, nil, r.Type, Requested)
|
||||
index, err = AddRelay(cm.l, newhostinfo, cm.hostMap, r.PeerIp, nil, r.Type, Requested)
|
||||
if err != nil {
|
||||
n.l.WithError(err).Error("failed to migrate relay to new hostinfo")
|
||||
cm.l.WithError(err).Error("failed to migrate relay to new hostinfo")
|
||||
continue
|
||||
}
|
||||
switch r.Type {
|
||||
case TerminalType:
|
||||
relayFrom = newhostinfo.vpnIp
|
||||
relayFrom = cm.intf.myVpnNet.Addr()
|
||||
relayTo = r.PeerIp
|
||||
case ForwardingType:
|
||||
relayFrom = r.PeerIp
|
||||
relayTo = newhostinfo.vpnIp
|
||||
default:
|
||||
// should never happen
|
||||
panic(fmt.Sprintf("Migrating unknown relay type: %v", r.Type))
|
||||
}
|
||||
}
|
||||
|
||||
//TODO: IPV6-WORK
|
||||
relayFromB := relayFrom.As4()
|
||||
relayToB := relayTo.As4()
|
||||
|
||||
// Send a CreateRelayRequest to the peer.
|
||||
req := NebulaControl{
|
||||
Type: NebulaControl_CreateRelayRequest,
|
||||
InitiatorRelayIndex: index,
|
||||
RelayFromIp: uint32(relayFrom),
|
||||
RelayToIp: uint32(relayTo),
|
||||
RelayFromIp: binary.BigEndian.Uint32(relayFromB[:]),
|
||||
RelayToIp: binary.BigEndian.Uint32(relayToB[:]),
|
||||
}
|
||||
msg, err := req.Marshal()
|
||||
if err != nil {
|
||||
n.l.WithError(err).Error("failed to marshal Control message to migrate relay")
|
||||
cm.l.WithError(err).Error("failed to marshal Control message to migrate relay")
|
||||
} else {
|
||||
n.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||
n.l.WithFields(logrus.Fields{
|
||||
"relayFrom": iputil.VpnIp(req.RelayFromIp),
|
||||
"relayTo": iputil.VpnIp(req.RelayToIp),
|
||||
cm.intf.SendMessageToHostInfo(header.Control, 0, newhostinfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||
cm.l.WithFields(logrus.Fields{
|
||||
"relayFrom": req.RelayFromIp,
|
||||
"relayTo": req.RelayToIp,
|
||||
"initiatorRelayIndex": req.InitiatorRelayIndex,
|
||||
"responderRelayIndex": req.ResponderRelayIndex,
|
||||
"vpnIp": newhostinfo.vpnIp}).
|
||||
@@ -289,46 +299,45 @@ func (n *connectionManager) migrateRelayUsed(oldhostinfo, newhostinfo *HostInfo)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *connectionManager) makeTrafficDecision(localIndex uint32, p, nb, out []byte, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
|
||||
n.hostMap.RLock()
|
||||
defer n.hostMap.RUnlock()
|
||||
func (cm *connectionManager) makeTrafficDecision(localIndex uint32, now time.Time) (trafficDecision, *HostInfo, *HostInfo) {
|
||||
// Read lock the main hostmap to order decisions based on tunnels being the primary tunnel
|
||||
cm.hostMap.RLock()
|
||||
defer cm.hostMap.RUnlock()
|
||||
|
||||
hostinfo := n.hostMap.Indexes[localIndex]
|
||||
hostinfo := cm.hostMap.Indexes[localIndex]
|
||||
if hostinfo == nil {
|
||||
n.l.WithField("localIndex", localIndex).Debugf("Not found in hostmap")
|
||||
delete(n.pendingDeletion, localIndex)
|
||||
cm.l.WithField("localIndex", localIndex).Debugln("Not found in hostmap")
|
||||
return doNothing, nil, nil
|
||||
}
|
||||
|
||||
if n.isInvalidCertificate(now, hostinfo) {
|
||||
delete(n.pendingDeletion, hostinfo.localIndexId)
|
||||
if cm.isInvalidCertificate(now, hostinfo) {
|
||||
return closeTunnel, hostinfo, nil
|
||||
}
|
||||
|
||||
primary := n.hostMap.Hosts[hostinfo.vpnIp]
|
||||
primary := cm.hostMap.Hosts[hostinfo.vpnIp]
|
||||
mainHostInfo := true
|
||||
if primary != nil && primary != hostinfo {
|
||||
mainHostInfo = false
|
||||
}
|
||||
|
||||
// Check for traffic on this hostinfo
|
||||
inTraffic, outTraffic := n.getAndResetTrafficCheck(localIndex)
|
||||
inTraffic, outTraffic := cm.getAndResetTrafficCheck(hostinfo, now)
|
||||
|
||||
// A hostinfo is determined alive if there is incoming traffic
|
||||
if inTraffic {
|
||||
decision := doNothing
|
||||
if n.l.Level >= logrus.DebugLevel {
|
||||
hostinfo.logger(n.l).
|
||||
if cm.l.Level >= logrus.DebugLevel {
|
||||
hostinfo.logger(cm.l).
|
||||
WithField("tunnelCheck", m{"state": "alive", "method": "passive"}).
|
||||
Debug("Tunnel status")
|
||||
}
|
||||
delete(n.pendingDeletion, hostinfo.localIndexId)
|
||||
hostinfo.pendingDeletion.Store(false)
|
||||
|
||||
if mainHostInfo {
|
||||
decision = tryRehandshake
|
||||
|
||||
} else {
|
||||
if n.shouldSwapPrimary(hostinfo, primary) {
|
||||
if cm.shouldSwapPrimary(hostinfo, primary) {
|
||||
decision = swapPrimary
|
||||
} else {
|
||||
// migrate the relays to the primary, if in use.
|
||||
@@ -336,155 +345,175 @@ func (n *connectionManager) makeTrafficDecision(localIndex uint32, p, nb, out []
|
||||
}
|
||||
}
|
||||
|
||||
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
|
||||
cm.trafficTimer.Add(hostinfo.localIndexId, cm.checkInterval)
|
||||
|
||||
if !outTraffic {
|
||||
// Send a punch packet to keep the NAT state alive
|
||||
n.sendPunch(hostinfo)
|
||||
cm.sendPunch(hostinfo)
|
||||
}
|
||||
|
||||
return decision, hostinfo, primary
|
||||
}
|
||||
|
||||
if _, ok := n.pendingDeletion[hostinfo.localIndexId]; ok {
|
||||
if hostinfo.pendingDeletion.Load() {
|
||||
// We have already sent a test packet and nothing was returned, this hostinfo is dead
|
||||
hostinfo.logger(n.l).
|
||||
hostinfo.logger(cm.l).
|
||||
WithField("tunnelCheck", m{"state": "dead", "method": "active"}).
|
||||
Info("Tunnel status")
|
||||
|
||||
delete(n.pendingDeletion, hostinfo.localIndexId)
|
||||
return deleteTunnel, hostinfo, nil
|
||||
}
|
||||
|
||||
decision := doNothing
|
||||
if hostinfo != nil && hostinfo.ConnectionState != nil && mainHostInfo {
|
||||
if !outTraffic {
|
||||
inactiveFor, isInactive := cm.isInactive(hostinfo, now)
|
||||
if isInactive {
|
||||
// Tunnel is inactive, tear it down
|
||||
hostinfo.logger(cm.l).
|
||||
WithField("inactiveDuration", inactiveFor).
|
||||
WithField("primary", mainHostInfo).
|
||||
Info("Dropping tunnel due to inactivity")
|
||||
|
||||
return closeTunnel, hostinfo, primary
|
||||
}
|
||||
|
||||
// If we aren't sending or receiving traffic then its an unused tunnel and we don't to test the tunnel.
|
||||
// Just maintain NAT state if configured to do so.
|
||||
n.sendPunch(hostinfo)
|
||||
n.trafficTimer.Add(hostinfo.localIndexId, n.checkInterval)
|
||||
cm.sendPunch(hostinfo)
|
||||
cm.trafficTimer.Add(hostinfo.localIndexId, cm.checkInterval)
|
||||
return doNothing, nil, nil
|
||||
|
||||
}
|
||||
|
||||
if n.punchy.GetTargetEverything() {
|
||||
if cm.punchy.GetTargetEverything() {
|
||||
// This is similar to the old punchy behavior with a slight optimization.
|
||||
// We aren't receiving traffic but we are sending it, punch on all known
|
||||
// ips in case we need to re-prime NAT state
|
||||
n.sendPunch(hostinfo)
|
||||
cm.sendPunch(hostinfo)
|
||||
}
|
||||
|
||||
if n.l.Level >= logrus.DebugLevel {
|
||||
hostinfo.logger(n.l).
|
||||
if cm.l.Level >= logrus.DebugLevel {
|
||||
hostinfo.logger(cm.l).
|
||||
WithField("tunnelCheck", m{"state": "testing", "method": "active"}).
|
||||
Debug("Tunnel status")
|
||||
}
|
||||
|
||||
// Send a test packet to trigger an authenticated tunnel test, this should suss out any lingering tunnel issues
|
||||
n.intf.SendMessageToHostInfo(header.Test, header.TestRequest, hostinfo, p, nb, out)
|
||||
decision = sendTestPacket
|
||||
|
||||
} else {
|
||||
if n.l.Level >= logrus.DebugLevel {
|
||||
hostinfo.logger(n.l).Debugf("Hostinfo sadness")
|
||||
if cm.l.Level >= logrus.DebugLevel {
|
||||
hostinfo.logger(cm.l).Debugf("Hostinfo sadness")
|
||||
}
|
||||
}
|
||||
|
||||
n.pendingDeletion[hostinfo.localIndexId] = struct{}{}
|
||||
n.trafficTimer.Add(hostinfo.localIndexId, n.pendingDeletionInterval)
|
||||
return doNothing, nil, nil
|
||||
hostinfo.pendingDeletion.Store(true)
|
||||
cm.trafficTimer.Add(hostinfo.localIndexId, cm.pendingDeletionInterval)
|
||||
return decision, hostinfo, nil
|
||||
}
|
||||
|
||||
func (n *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
|
||||
func (cm *connectionManager) isInactive(hostinfo *HostInfo, now time.Time) (time.Duration, bool) {
|
||||
if cm.dropInactive.Load() == false {
|
||||
// We aren't configured to drop inactive tunnels
|
||||
return 0, false
|
||||
}
|
||||
|
||||
inactiveDuration := now.Sub(hostinfo.lastUsed)
|
||||
if inactiveDuration < cm.getInactivityTimeout() {
|
||||
// It's not considered inactive
|
||||
return inactiveDuration, false
|
||||
}
|
||||
|
||||
// The tunnel is inactive
|
||||
return inactiveDuration, true
|
||||
}
|
||||
|
||||
func (cm *connectionManager) shouldSwapPrimary(current, primary *HostInfo) bool {
|
||||
// The primary tunnel is the most recent handshake to complete locally and should work entirely fine.
|
||||
// If we are here then we have multiple tunnels for a host pair and neither side believes the same tunnel is primary.
|
||||
// Let's sort this out.
|
||||
|
||||
if current.vpnIp < n.intf.myVpnIp {
|
||||
if current.vpnIp.Compare(cm.intf.myVpnNet.Addr()) < 0 {
|
||||
// Only one side should flip primary because if both flip then we may never resolve to a single tunnel.
|
||||
// vpn ip is static across all tunnels for this host pair so lets use that to determine who is flipping.
|
||||
// The remotes vpn ip is lower than mine. I will not flip.
|
||||
return false
|
||||
}
|
||||
|
||||
certState := n.intf.certState.Load()
|
||||
return bytes.Equal(current.ConnectionState.certState.certificate.Signature, certState.certificate.Signature)
|
||||
certState := cm.intf.pki.GetCertState()
|
||||
return bytes.Equal(current.ConnectionState.myCert.Signature, certState.Certificate.Signature)
|
||||
}
|
||||
|
||||
func (n *connectionManager) swapPrimary(current, primary *HostInfo) {
|
||||
n.hostMap.Lock()
|
||||
func (cm *connectionManager) swapPrimary(current, primary *HostInfo) {
|
||||
cm.hostMap.Lock()
|
||||
// Make sure the primary is still the same after the write lock. This avoids a race with a rehandshake.
|
||||
if n.hostMap.Hosts[current.vpnIp] == primary {
|
||||
n.hostMap.unlockedMakePrimary(current)
|
||||
if cm.hostMap.Hosts[current.vpnIp] == primary {
|
||||
cm.hostMap.unlockedMakePrimary(current)
|
||||
}
|
||||
n.hostMap.Unlock()
|
||||
cm.hostMap.Unlock()
|
||||
}
|
||||
|
||||
// isInvalidCertificate will check if we should destroy a tunnel if pki.disconnect_invalid is true and
|
||||
// the certificate is no longer valid. Block listed certificates will skip the pki.disconnect_invalid
|
||||
// check and return true.
|
||||
func (n *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool {
|
||||
func (cm *connectionManager) isInvalidCertificate(now time.Time, hostinfo *HostInfo) bool {
|
||||
remoteCert := hostinfo.GetCert()
|
||||
if remoteCert == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
valid, err := remoteCert.VerifyWithCache(now, n.intf.caPool)
|
||||
valid, err := remoteCert.VerifyWithCache(now, cm.intf.pki.GetCAPool())
|
||||
if valid {
|
||||
return false
|
||||
}
|
||||
|
||||
if !n.intf.disconnectInvalid && err != cert.ErrBlockListed {
|
||||
if !cm.intf.disconnectInvalid.Load() && err != cert.ErrBlockListed {
|
||||
// Block listed certificates should always be disconnected
|
||||
return false
|
||||
}
|
||||
|
||||
fingerprint, _ := remoteCert.Sha256Sum()
|
||||
hostinfo.logger(n.l).WithError(err).
|
||||
hostinfo.logger(cm.l).WithError(err).
|
||||
WithField("fingerprint", fingerprint).
|
||||
Info("Remote certificate is no longer valid, tearing down the tunnel")
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *connectionManager) sendPunch(hostinfo *HostInfo) {
|
||||
if !n.punchy.GetPunch() {
|
||||
func (cm *connectionManager) sendPunch(hostinfo *HostInfo) {
|
||||
if !cm.punchy.GetPunch() {
|
||||
// Punching is disabled
|
||||
return
|
||||
}
|
||||
|
||||
if n.punchy.GetTargetEverything() {
|
||||
hostinfo.remotes.ForEach(n.hostMap.preferredRanges, func(addr *udp.Addr, preferred bool) {
|
||||
n.metricsTxPunchy.Inc(1)
|
||||
n.intf.outside.WriteTo([]byte{1}, addr)
|
||||
})
|
||||
|
||||
} else if hostinfo.remote != nil {
|
||||
n.metricsTxPunchy.Inc(1)
|
||||
n.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *connectionManager) tryRehandshake(hostinfo *HostInfo) {
|
||||
certState := n.intf.certState.Load()
|
||||
if bytes.Equal(hostinfo.ConnectionState.certState.certificate.Signature, certState.certificate.Signature) {
|
||||
if cm.intf.lightHouse.IsLighthouseIP(hostinfo.vpnIp) {
|
||||
// Do not punch to lighthouses, we assume our lighthouse update interval is good enough.
|
||||
// In the event the update interval is not sufficient to maintain NAT state then a publicly available lighthouse
|
||||
// would lose the ability to notify us and punchy.respond would become unreliable.
|
||||
return
|
||||
}
|
||||
|
||||
n.l.WithField("vpnIp", hostinfo.vpnIp).
|
||||
if cm.punchy.GetTargetEverything() {
|
||||
hostinfo.remotes.ForEach(cm.hostMap.GetPreferredRanges(), func(addr netip.AddrPort, preferred bool) {
|
||||
cm.metricsTxPunchy.Inc(1)
|
||||
cm.intf.outside.WriteTo([]byte{1}, addr)
|
||||
})
|
||||
|
||||
} else if hostinfo.remote.IsValid() {
|
||||
cm.metricsTxPunchy.Inc(1)
|
||||
cm.intf.outside.WriteTo([]byte{1}, hostinfo.remote)
|
||||
}
|
||||
}
|
||||
|
||||
func (cm *connectionManager) tryRehandshake(hostinfo *HostInfo) {
|
||||
certState := cm.intf.pki.GetCertState()
|
||||
if bytes.Equal(hostinfo.ConnectionState.myCert.Signature, certState.Certificate.Signature) {
|
||||
return
|
||||
}
|
||||
|
||||
cm.l.WithField("vpnIp", hostinfo.vpnIp).
|
||||
WithField("reason", "local certificate is not current").
|
||||
Info("Re-handshaking with remote")
|
||||
|
||||
//TODO: this is copied from getOrHandshake to keep the extra checks out of the hot path, figure it out
|
||||
newHostinfo := n.intf.handshakeManager.AddVpnIp(hostinfo.vpnIp, n.intf.initHostInfo)
|
||||
if !newHostinfo.HandshakeReady {
|
||||
ixHandshakeStage0(n.intf, newHostinfo.vpnIp, newHostinfo)
|
||||
}
|
||||
|
||||
//If this is a static host, we don't need to wait for the HostQueryReply
|
||||
//We can trigger the handshake right now
|
||||
if _, ok := n.intf.lightHouse.GetStaticHostList()[hostinfo.vpnIp]; ok {
|
||||
select {
|
||||
case n.intf.handshakeManager.trigger <- hostinfo.vpnIp:
|
||||
default:
|
||||
}
|
||||
}
|
||||
cm.intf.handshakeManager.StartHandshake(hostinfo.vpnIp, nil)
|
||||
}
|
||||
|
||||
@@ -1,31 +1,29 @@
|
||||
package nebula
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ed25519"
|
||||
"crypto/rand"
|
||||
"net"
|
||||
"net/netip"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/flynn/noise"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/test"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
var vpnIp iputil.VpnIp
|
||||
|
||||
func newTestLighthouse() *LightHouse {
|
||||
lh := &LightHouse{
|
||||
l: test.NewLogger(),
|
||||
addrMap: map[iputil.VpnIp]*RemoteList{},
|
||||
l: test.NewLogger(),
|
||||
addrMap: map[netip.Addr]*RemoteList{},
|
||||
queryChan: make(chan netip.Addr, 10),
|
||||
}
|
||||
lighthouses := map[iputil.VpnIp]struct{}{}
|
||||
staticList := map[iputil.VpnIp]struct{}{}
|
||||
lighthouses := map[netip.Addr]struct{}{}
|
||||
staticList := map[netip.Addr]struct{}{}
|
||||
|
||||
lh.lighthouses.Store(&lighthouses)
|
||||
lh.staticList.Store(&staticList)
|
||||
@@ -36,37 +34,40 @@ func newTestLighthouse() *LightHouse {
|
||||
func Test_NewConnectionManagerTest(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
|
||||
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
||||
vpnIp = iputil.Ip2VpnIp(net.ParseIP("172.1.1.2"))
|
||||
preferredRanges := []*net.IPNet{localrange}
|
||||
vpncidr := netip.MustParsePrefix("172.1.1.1/24")
|
||||
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
||||
vpnIp := netip.MustParseAddr("172.1.1.2")
|
||||
preferredRanges := []netip.Prefix{localrange}
|
||||
|
||||
// Very incomplete mock objects
|
||||
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
|
||||
hostMap := newHostMap(l, vpncidr)
|
||||
hostMap.preferredRanges.Store(&preferredRanges)
|
||||
|
||||
cs := &CertState{
|
||||
rawCertificate: []byte{},
|
||||
privateKey: []byte{},
|
||||
certificate: &cert.NebulaCertificate{},
|
||||
rawCertificateNoKey: []byte{},
|
||||
RawCertificate: []byte{},
|
||||
PrivateKey: []byte{},
|
||||
Certificate: &cert.NebulaCertificate{},
|
||||
RawCertificateNoKey: []byte{},
|
||||
}
|
||||
|
||||
lh := newTestLighthouse()
|
||||
ifce := &Interface{
|
||||
hostMap: hostMap,
|
||||
inside: &test.NoopTun{},
|
||||
outside: &udp.Conn{},
|
||||
outside: &udp.NoopConn{},
|
||||
firewall: &Firewall{},
|
||||
lightHouse: lh,
|
||||
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
|
||||
pki: &PKI{},
|
||||
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
|
||||
l: l,
|
||||
}
|
||||
ifce.certState.Store(cs)
|
||||
ifce.pki.cs.Store(cs)
|
||||
|
||||
// Create manager
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
||||
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
||||
conf := config.NewC(l)
|
||||
punchy := NewPunchyFromConfig(l, conf)
|
||||
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
|
||||
nc.intf = ifce
|
||||
p := []byte("")
|
||||
nb := make([]byte, 12, 12)
|
||||
out := make([]byte, mtu)
|
||||
@@ -78,37 +79,38 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
||||
remoteIndexId: 9901,
|
||||
}
|
||||
hostinfo.ConnectionState = &ConnectionState{
|
||||
certState: cs,
|
||||
H: &noise.HandshakeState{},
|
||||
myCert: &cert.NebulaCertificate{},
|
||||
H: &noise.HandshakeState{},
|
||||
}
|
||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||
|
||||
// We saw traffic out to vpnIp
|
||||
nc.Out(hostinfo.localIndexId)
|
||||
nc.In(hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||
nc.Out(hostinfo)
|
||||
nc.In(hostinfo)
|
||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.out, hostinfo.localIndexId)
|
||||
assert.True(t, hostinfo.out.Load())
|
||||
assert.True(t, hostinfo.in.Load())
|
||||
|
||||
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||
assert.False(t, hostinfo.out.Load())
|
||||
assert.False(t, hostinfo.in.Load())
|
||||
|
||||
// Do another traffic check tick, this host should be pending deletion now
|
||||
nc.Out(hostinfo.localIndexId)
|
||||
nc.Out(hostinfo)
|
||||
assert.True(t, hostinfo.out.Load())
|
||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||
assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||
assert.True(t, hostinfo.pendingDeletion.Load())
|
||||
assert.False(t, hostinfo.out.Load())
|
||||
assert.False(t, hostinfo.in.Load())
|
||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||
|
||||
// Do a final traffic check tick, the host should now be removed
|
||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||
assert.NotContains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
}
|
||||
@@ -116,36 +118,40 @@ func Test_NewConnectionManagerTest(t *testing.T) {
|
||||
func Test_NewConnectionManagerTest2(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
//_, tuncidr, _ := net.ParseCIDR("1.1.1.1/24")
|
||||
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
||||
preferredRanges := []*net.IPNet{localrange}
|
||||
vpncidr := netip.MustParsePrefix("172.1.1.1/24")
|
||||
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
||||
vpnIp := netip.MustParseAddr("172.1.1.2")
|
||||
preferredRanges := []netip.Prefix{localrange}
|
||||
|
||||
// Very incomplete mock objects
|
||||
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
|
||||
hostMap := newHostMap(l, vpncidr)
|
||||
hostMap.preferredRanges.Store(&preferredRanges)
|
||||
|
||||
cs := &CertState{
|
||||
rawCertificate: []byte{},
|
||||
privateKey: []byte{},
|
||||
certificate: &cert.NebulaCertificate{},
|
||||
rawCertificateNoKey: []byte{},
|
||||
RawCertificate: []byte{},
|
||||
PrivateKey: []byte{},
|
||||
Certificate: &cert.NebulaCertificate{},
|
||||
RawCertificateNoKey: []byte{},
|
||||
}
|
||||
|
||||
lh := newTestLighthouse()
|
||||
ifce := &Interface{
|
||||
hostMap: hostMap,
|
||||
inside: &test.NoopTun{},
|
||||
outside: &udp.Conn{},
|
||||
outside: &udp.NoopConn{},
|
||||
firewall: &Firewall{},
|
||||
lightHouse: lh,
|
||||
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
|
||||
pki: &PKI{},
|
||||
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
|
||||
l: l,
|
||||
}
|
||||
ifce.certState.Store(cs)
|
||||
ifce.pki.cs.Store(cs)
|
||||
|
||||
// Create manager
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
||||
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
||||
conf := config.NewC(l)
|
||||
punchy := NewPunchyFromConfig(l, conf)
|
||||
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
|
||||
nc.intf = ifce
|
||||
p := []byte("")
|
||||
nb := make([]byte, 12, 12)
|
||||
out := make([]byte, mtu)
|
||||
@@ -157,39 +163,136 @@ func Test_NewConnectionManagerTest2(t *testing.T) {
|
||||
remoteIndexId: 9901,
|
||||
}
|
||||
hostinfo.ConnectionState = &ConnectionState{
|
||||
certState: cs,
|
||||
H: &noise.HandshakeState{},
|
||||
myCert: &cert.NebulaCertificate{},
|
||||
H: &noise.HandshakeState{},
|
||||
}
|
||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||
|
||||
// We saw traffic out to vpnIp
|
||||
nc.Out(hostinfo.localIndexId)
|
||||
nc.In(hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.vpnIp)
|
||||
nc.Out(hostinfo)
|
||||
nc.In(hostinfo)
|
||||
assert.True(t, hostinfo.in.Load())
|
||||
assert.True(t, hostinfo.out.Load())
|
||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
|
||||
// Do a traffic check tick, should not be pending deletion but should not have any in/out packets recorded
|
||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||
assert.False(t, hostinfo.out.Load())
|
||||
assert.False(t, hostinfo.in.Load())
|
||||
|
||||
// Do another traffic check tick, this host should be pending deletion now
|
||||
nc.Out(hostinfo.localIndexId)
|
||||
nc.Out(hostinfo)
|
||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||
assert.Contains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||
assert.True(t, hostinfo.pendingDeletion.Load())
|
||||
assert.False(t, hostinfo.out.Load())
|
||||
assert.False(t, hostinfo.in.Load())
|
||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||
|
||||
// We saw traffic, should no longer be pending deletion
|
||||
nc.In(hostinfo.localIndexId)
|
||||
nc.In(hostinfo)
|
||||
nc.doTrafficCheck(hostinfo.localIndexId, p, nb, out, time.Now())
|
||||
assert.NotContains(t, nc.pendingDeletion, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.out, hostinfo.localIndexId)
|
||||
assert.NotContains(t, nc.in, hostinfo.localIndexId)
|
||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||
assert.False(t, hostinfo.out.Load())
|
||||
assert.False(t, hostinfo.in.Load())
|
||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||
}
|
||||
|
||||
func Test_NewConnectionManager_DisconnectInactive(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
vpncidr := netip.MustParsePrefix("172.1.1.1/24")
|
||||
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
||||
vpnIp := netip.MustParseAddr("172.1.1.2")
|
||||
preferredRanges := []netip.Prefix{localrange}
|
||||
|
||||
// Very incomplete mock objects
|
||||
hostMap := newHostMap(l, vpncidr)
|
||||
hostMap.preferredRanges.Store(&preferredRanges)
|
||||
|
||||
cs := &CertState{
|
||||
RawCertificate: []byte{},
|
||||
PrivateKey: []byte{},
|
||||
Certificate: &cert.NebulaCertificate{},
|
||||
RawCertificateNoKey: []byte{},
|
||||
}
|
||||
|
||||
lh := newTestLighthouse()
|
||||
ifce := &Interface{
|
||||
hostMap: hostMap,
|
||||
inside: &test.NoopTun{},
|
||||
outside: &udp.NoopConn{},
|
||||
firewall: &Firewall{},
|
||||
lightHouse: lh,
|
||||
pki: &PKI{},
|
||||
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
|
||||
l: l,
|
||||
}
|
||||
ifce.pki.cs.Store(cs)
|
||||
|
||||
// Create manager
|
||||
conf := config.NewC(l)
|
||||
conf.Settings["tunnels"] = map[interface{}]interface{}{
|
||||
"drop_inactive": true,
|
||||
}
|
||||
punchy := NewPunchyFromConfig(l, conf)
|
||||
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
|
||||
assert.True(t, nc.dropInactive.Load())
|
||||
nc.intf = ifce
|
||||
|
||||
// Add an ip we have established a connection w/ to hostmap
|
||||
hostinfo := &HostInfo{
|
||||
vpnIp: vpnIp,
|
||||
localIndexId: 1099,
|
||||
remoteIndexId: 9901,
|
||||
}
|
||||
hostinfo.ConnectionState = &ConnectionState{
|
||||
myCert: &cert.NebulaCertificate{},
|
||||
H: &noise.HandshakeState{},
|
||||
}
|
||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||
|
||||
// Do a traffic check tick, in and out should be cleared but should not be pending deletion
|
||||
nc.Out(hostinfo)
|
||||
nc.In(hostinfo)
|
||||
assert.True(t, hostinfo.out.Load())
|
||||
assert.True(t, hostinfo.in.Load())
|
||||
|
||||
now := time.Now()
|
||||
decision, _, _ := nc.makeTrafficDecision(hostinfo.localIndexId, now)
|
||||
assert.Equal(t, tryRehandshake, decision)
|
||||
assert.Equal(t, now, hostinfo.lastUsed)
|
||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||
assert.False(t, hostinfo.out.Load())
|
||||
assert.False(t, hostinfo.in.Load())
|
||||
|
||||
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Second*5))
|
||||
assert.Equal(t, doNothing, decision)
|
||||
assert.Equal(t, now, hostinfo.lastUsed)
|
||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||
assert.False(t, hostinfo.out.Load())
|
||||
assert.False(t, hostinfo.in.Load())
|
||||
|
||||
// Do another traffic check tick, should still not be pending deletion
|
||||
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Second*10))
|
||||
assert.Equal(t, doNothing, decision)
|
||||
assert.Equal(t, now, hostinfo.lastUsed)
|
||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||
assert.False(t, hostinfo.out.Load())
|
||||
assert.False(t, hostinfo.in.Load())
|
||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||
|
||||
// Finally advance beyond the inactivity timeout
|
||||
decision, _, _ = nc.makeTrafficDecision(hostinfo.localIndexId, now.Add(time.Minute*10))
|
||||
assert.Equal(t, closeTunnel, decision)
|
||||
assert.Equal(t, now, hostinfo.lastUsed)
|
||||
assert.False(t, hostinfo.pendingDeletion.Load())
|
||||
assert.False(t, hostinfo.out.Load())
|
||||
assert.False(t, hostinfo.in.Load())
|
||||
assert.Contains(t, nc.hostMap.Indexes, hostinfo.localIndexId)
|
||||
assert.Contains(t, nc.hostMap.Hosts, hostinfo.vpnIp)
|
||||
}
|
||||
@@ -204,10 +307,12 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
||||
IP: net.IPv4(172, 1, 1, 2),
|
||||
Mask: net.IPMask{255, 255, 255, 0},
|
||||
}
|
||||
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
||||
preferredRanges := []*net.IPNet{localrange}
|
||||
hostMap := NewHostMap(l, "test", vpncidr, preferredRanges)
|
||||
vpncidr := netip.MustParsePrefix("172.1.1.1/24")
|
||||
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
||||
vpnIp := netip.MustParseAddr("172.1.1.2")
|
||||
preferredRanges := []netip.Prefix{localrange}
|
||||
hostMap := newHostMap(l, vpncidr)
|
||||
hostMap.preferredRanges.Store(&preferredRanges)
|
||||
|
||||
// Generate keys for CA and peer's cert.
|
||||
pubCA, privCA, _ := ed25519.GenerateKey(rand.Reader)
|
||||
@@ -220,7 +325,8 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
||||
PublicKey: pubCA,
|
||||
},
|
||||
}
|
||||
caCert.Sign(cert.Curve_CURVE25519, privCA)
|
||||
|
||||
assert.NoError(t, caCert.Sign(cert.Curve_CURVE25519, privCA))
|
||||
ncp := &cert.NebulaCAPool{
|
||||
CAs: cert.NewCAPool().CAs,
|
||||
}
|
||||
@@ -239,41 +345,46 @@ func Test_NewConnectionManagerTest_DisconnectInvalid(t *testing.T) {
|
||||
Issuer: "ca",
|
||||
},
|
||||
}
|
||||
peerCert.Sign(cert.Curve_CURVE25519, privCA)
|
||||
assert.NoError(t, peerCert.Sign(cert.Curve_CURVE25519, privCA))
|
||||
|
||||
cs := &CertState{
|
||||
rawCertificate: []byte{},
|
||||
privateKey: []byte{},
|
||||
certificate: &cert.NebulaCertificate{},
|
||||
rawCertificateNoKey: []byte{},
|
||||
RawCertificate: []byte{},
|
||||
PrivateKey: []byte{},
|
||||
Certificate: &cert.NebulaCertificate{},
|
||||
RawCertificateNoKey: []byte{},
|
||||
}
|
||||
|
||||
lh := newTestLighthouse()
|
||||
ifce := &Interface{
|
||||
hostMap: hostMap,
|
||||
inside: &test.NoopTun{},
|
||||
outside: &udp.Conn{},
|
||||
firewall: &Firewall{},
|
||||
lightHouse: lh,
|
||||
handshakeManager: NewHandshakeManager(l, vpncidr, preferredRanges, hostMap, lh, &udp.Conn{}, defaultHandshakeConfig),
|
||||
l: l,
|
||||
disconnectInvalid: true,
|
||||
caPool: ncp,
|
||||
hostMap: hostMap,
|
||||
inside: &test.NoopTun{},
|
||||
outside: &udp.NoopConn{},
|
||||
firewall: &Firewall{},
|
||||
lightHouse: lh,
|
||||
handshakeManager: NewHandshakeManager(l, hostMap, lh, &udp.NoopConn{}, defaultHandshakeConfig),
|
||||
l: l,
|
||||
pki: &PKI{},
|
||||
}
|
||||
ifce.certState.Store(cs)
|
||||
ifce.pki.cs.Store(cs)
|
||||
ifce.pki.caPool.Store(ncp)
|
||||
ifce.disconnectInvalid.Store(true)
|
||||
|
||||
// Create manager
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
punchy := NewPunchyFromConfig(l, config.NewC(l))
|
||||
nc := newConnectionManager(ctx, l, ifce, 5, 10, punchy)
|
||||
conf := config.NewC(l)
|
||||
punchy := NewPunchyFromConfig(l, conf)
|
||||
nc := newConnectionManagerFromConfig(l, conf, hostMap, punchy)
|
||||
nc.intf = ifce
|
||||
ifce.connectionManager = nc
|
||||
hostinfo, _ := nc.hostMap.AddVpnIp(vpnIp, nil)
|
||||
hostinfo.ConnectionState = &ConnectionState{
|
||||
certState: cs,
|
||||
peerCert: &peerCert,
|
||||
H: &noise.HandshakeState{},
|
||||
|
||||
hostinfo := &HostInfo{
|
||||
vpnIp: vpnIp,
|
||||
ConnectionState: &ConnectionState{
|
||||
myCert: &cert.NebulaCertificate{},
|
||||
peerCert: &peerCert,
|
||||
H: &noise.HandshakeState{},
|
||||
},
|
||||
}
|
||||
nc.hostMap.unlockedAddHostInfo(hostinfo, ifce)
|
||||
|
||||
// Move ahead 45s.
|
||||
// Check if to disconnect with invalid certificate.
|
||||
|
||||
@@ -18,35 +18,34 @@ type ConnectionState struct {
|
||||
eKey *NebulaCipherState
|
||||
dKey *NebulaCipherState
|
||||
H *noise.HandshakeState
|
||||
certState *CertState
|
||||
myCert *cert.NebulaCertificate
|
||||
peerCert *cert.NebulaCertificate
|
||||
initiator bool
|
||||
messageCounter atomic.Uint64
|
||||
window *Bits
|
||||
queueLock sync.Mutex
|
||||
writeLock sync.Mutex
|
||||
ready bool
|
||||
}
|
||||
|
||||
func (f *Interface) newConnectionState(l *logrus.Logger, initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState {
|
||||
func NewConnectionState(l *logrus.Logger, cipher string, certState *CertState, initiator bool, pattern noise.HandshakePattern, psk []byte, pskStage int) *ConnectionState {
|
||||
var dhFunc noise.DHFunc
|
||||
curCertState := f.certState.Load()
|
||||
|
||||
switch curCertState.certificate.Details.Curve {
|
||||
switch certState.Certificate.Details.Curve {
|
||||
case cert.Curve_CURVE25519:
|
||||
dhFunc = noise.DH25519
|
||||
case cert.Curve_P256:
|
||||
dhFunc = noiseutil.DHP256
|
||||
default:
|
||||
l.Errorf("invalid curve: %s", curCertState.certificate.Details.Curve)
|
||||
l.Errorf("invalid curve: %s", certState.Certificate.Details.Curve)
|
||||
return nil
|
||||
}
|
||||
cs := noise.NewCipherSuite(dhFunc, noiseutil.CipherAESGCM, noise.HashSHA256)
|
||||
if f.cipher == "chachapoly" {
|
||||
|
||||
var cs noise.CipherSuite
|
||||
if cipher == "chachapoly" {
|
||||
cs = noise.NewCipherSuite(dhFunc, noise.CipherChaChaPoly, noise.HashSHA256)
|
||||
} else {
|
||||
cs = noise.NewCipherSuite(dhFunc, noiseutil.CipherAESGCM, noise.HashSHA256)
|
||||
}
|
||||
|
||||
static := noise.DHKey{Private: curCertState.privateKey, Public: curCertState.publicKey}
|
||||
static := noise.DHKey{Private: certState.PrivateKey, Public: certState.PublicKey}
|
||||
|
||||
b := NewBits(ReplayWindow)
|
||||
// Clear out bit 0, we never transmit it and we don't want it showing as packet loss
|
||||
@@ -71,9 +70,10 @@ func (f *Interface) newConnectionState(l *logrus.Logger, initiator bool, pattern
|
||||
H: hs,
|
||||
initiator: initiator,
|
||||
window: b,
|
||||
ready: false,
|
||||
certState: curCertState,
|
||||
myCert: certState.Certificate,
|
||||
}
|
||||
// always start the counter from 2, as packet 1 and packet 2 are handshake packets.
|
||||
ci.messageCounter.Add(2)
|
||||
|
||||
return ci
|
||||
}
|
||||
@@ -83,6 +83,5 @@ func (cs *ConnectionState) MarshalJSON() ([]byte, error) {
|
||||
"certificate": cs.peerCert,
|
||||
"initiator": cs.initiator,
|
||||
"message_counter": cs.messageCounter.Load(),
|
||||
"ready": cs.ready,
|
||||
})
|
||||
}
|
||||
|
||||
175
control.go
175
control.go
@@ -2,7 +2,7 @@ package nebula
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/netip"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
@@ -10,33 +10,43 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
"github.com/slackhq/nebula/overlay"
|
||||
)
|
||||
|
||||
// Every interaction here needs to take extra care to copy memory and not return or use arguments "as is" when touching
|
||||
// core. This means copying IP objects, slices, de-referencing pointers and taking the actual value, etc
|
||||
|
||||
type controlEach func(h *HostInfo)
|
||||
|
||||
type controlHostLister interface {
|
||||
QueryVpnIp(vpnIp netip.Addr) *HostInfo
|
||||
ForEachIndex(each controlEach)
|
||||
ForEachVpnIp(each controlEach)
|
||||
GetPreferredRanges() []netip.Prefix
|
||||
}
|
||||
|
||||
type Control struct {
|
||||
f *Interface
|
||||
l *logrus.Logger
|
||||
cancel context.CancelFunc
|
||||
sshStart func()
|
||||
statsStart func()
|
||||
dnsStart func()
|
||||
f *Interface
|
||||
l *logrus.Logger
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
sshStart func()
|
||||
statsStart func()
|
||||
dnsStart func()
|
||||
lighthouseStart func()
|
||||
connectionManagerStart func(context.Context)
|
||||
}
|
||||
|
||||
type ControlHostInfo struct {
|
||||
VpnIp net.IP `json:"vpnIp"`
|
||||
VpnIp netip.Addr `json:"vpnIp"`
|
||||
LocalIndex uint32 `json:"localIndex"`
|
||||
RemoteIndex uint32 `json:"remoteIndex"`
|
||||
RemoteAddrs []*udp.Addr `json:"remoteAddrs"`
|
||||
CachedPackets int `json:"cachedPackets"`
|
||||
RemoteAddrs []netip.AddrPort `json:"remoteAddrs"`
|
||||
Cert *cert.NebulaCertificate `json:"cert"`
|
||||
MessageCounter uint64 `json:"messageCounter"`
|
||||
CurrentRemote *udp.Addr `json:"currentRemote"`
|
||||
CurrentRelaysToMe []iputil.VpnIp `json:"currentRelaysToMe"`
|
||||
CurrentRelaysThroughMe []iputil.VpnIp `json:"currentRelaysThroughMe"`
|
||||
CurrentRemote netip.AddrPort `json:"currentRemote"`
|
||||
CurrentRelaysToMe []netip.Addr `json:"currentRelaysToMe"`
|
||||
CurrentRelaysThroughMe []netip.Addr `json:"currentRelaysThroughMe"`
|
||||
}
|
||||
|
||||
// Start actually runs nebula, this is a nonblocking call. To block use Control.ShutdownBlock()
|
||||
@@ -54,12 +64,22 @@ func (c *Control) Start() {
|
||||
if c.dnsStart != nil {
|
||||
go c.dnsStart()
|
||||
}
|
||||
if c.connectionManagerStart != nil {
|
||||
go c.connectionManagerStart(c.ctx)
|
||||
}
|
||||
if c.lighthouseStart != nil {
|
||||
c.lighthouseStart()
|
||||
}
|
||||
|
||||
// Start reading packets.
|
||||
c.f.run()
|
||||
}
|
||||
|
||||
// Stop signals nebula to shutdown, returns after the shutdown is complete
|
||||
func (c *Control) Context() context.Context {
|
||||
return c.ctx
|
||||
}
|
||||
|
||||
// Stop signals nebula to shutdown and close all tunnels, returns after the shutdown is complete
|
||||
func (c *Control) Stop() {
|
||||
// Stop the handshakeManager (and other services), to prevent new tunnels from
|
||||
// being created while we're shutting them all down.
|
||||
@@ -89,7 +109,7 @@ func (c *Control) RebindUDPServer() {
|
||||
_ = c.f.outside.Rebind()
|
||||
|
||||
// Trigger a lighthouse update, useful for mobile clients that should have an update interval of 0
|
||||
c.f.lightHouse.SendUpdate(c.f)
|
||||
c.f.lightHouse.SendUpdate()
|
||||
|
||||
// Let the main interface know that we rebound so that underlying tunnels know to trigger punches from their remotes
|
||||
c.f.rebindCount++
|
||||
@@ -98,7 +118,7 @@ func (c *Control) RebindUDPServer() {
|
||||
// ListHostmapHosts returns details about the actual or pending (handshaking) hostmap by vpn ip
|
||||
func (c *Control) ListHostmapHosts(pendingMap bool) []ControlHostInfo {
|
||||
if pendingMap {
|
||||
return listHostMapHosts(c.f.handshakeManager.pendingHostMap)
|
||||
return listHostMapHosts(c.f.handshakeManager)
|
||||
} else {
|
||||
return listHostMapHosts(c.f.hostMap)
|
||||
}
|
||||
@@ -107,46 +127,85 @@ func (c *Control) ListHostmapHosts(pendingMap bool) []ControlHostInfo {
|
||||
// ListHostmapIndexes returns details about the actual or pending (handshaking) hostmap by local index id
|
||||
func (c *Control) ListHostmapIndexes(pendingMap bool) []ControlHostInfo {
|
||||
if pendingMap {
|
||||
return listHostMapIndexes(c.f.handshakeManager.pendingHostMap)
|
||||
return listHostMapIndexes(c.f.handshakeManager)
|
||||
} else {
|
||||
return listHostMapIndexes(c.f.hostMap)
|
||||
}
|
||||
}
|
||||
|
||||
// GetCertByVpnIp returns the authenticated certificate of the given vpn IP, or nil if not found
|
||||
func (c *Control) GetCertByVpnIp(vpnIp netip.Addr) *cert.NebulaCertificate {
|
||||
if c.f.myVpnNet.Addr() == vpnIp {
|
||||
return c.f.pki.GetCertState().Certificate
|
||||
}
|
||||
hi := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||
if hi == nil {
|
||||
return nil
|
||||
}
|
||||
return hi.GetCert()
|
||||
}
|
||||
|
||||
// CreateTunnel creates a new tunnel to the given vpn ip.
|
||||
func (c *Control) CreateTunnel(vpnIp netip.Addr) {
|
||||
c.f.handshakeManager.StartHandshake(vpnIp, nil)
|
||||
}
|
||||
|
||||
// PrintTunnel creates a new tunnel to the given vpn ip.
|
||||
func (c *Control) PrintTunnel(vpnIp netip.Addr) *ControlHostInfo {
|
||||
hi := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||
if hi == nil {
|
||||
return nil
|
||||
}
|
||||
chi := copyHostInfo(hi, c.f.hostMap.GetPreferredRanges())
|
||||
return &chi
|
||||
}
|
||||
|
||||
// QueryLighthouse queries the lighthouse.
|
||||
func (c *Control) QueryLighthouse(vpnIp netip.Addr) *CacheMap {
|
||||
hi := c.f.lightHouse.Query(vpnIp)
|
||||
if hi == nil {
|
||||
return nil
|
||||
}
|
||||
return hi.CopyCache()
|
||||
}
|
||||
|
||||
// GetHostInfoByVpnIp returns a single tunnels hostInfo, or nil if not found
|
||||
func (c *Control) GetHostInfoByVpnIp(vpnIp iputil.VpnIp, pending bool) *ControlHostInfo {
|
||||
var hm *HostMap
|
||||
// Caller should take care to Unmap() any 4in6 addresses prior to calling.
|
||||
func (c *Control) GetHostInfoByVpnIp(vpnIp netip.Addr, pending bool) *ControlHostInfo {
|
||||
var hl controlHostLister
|
||||
if pending {
|
||||
hm = c.f.handshakeManager.pendingHostMap
|
||||
hl = c.f.handshakeManager
|
||||
} else {
|
||||
hm = c.f.hostMap
|
||||
hl = c.f.hostMap
|
||||
}
|
||||
|
||||
h, err := hm.QueryVpnIp(vpnIp)
|
||||
if err != nil {
|
||||
h := hl.QueryVpnIp(vpnIp)
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ch := copyHostInfo(h, c.f.hostMap.preferredRanges)
|
||||
ch := copyHostInfo(h, c.f.hostMap.GetPreferredRanges())
|
||||
return &ch
|
||||
}
|
||||
|
||||
// SetRemoteForTunnel forces a tunnel to use a specific remote
|
||||
func (c *Control) SetRemoteForTunnel(vpnIp iputil.VpnIp, addr udp.Addr) *ControlHostInfo {
|
||||
hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||
if err != nil {
|
||||
// Caller should take care to Unmap() any 4in6 addresses prior to calling.
|
||||
func (c *Control) SetRemoteForTunnel(vpnIp netip.Addr, addr netip.AddrPort) *ControlHostInfo {
|
||||
hostInfo := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||
if hostInfo == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
hostInfo.SetRemote(addr.Copy())
|
||||
ch := copyHostInfo(hostInfo, c.f.hostMap.preferredRanges)
|
||||
hostInfo.SetRemote(addr)
|
||||
ch := copyHostInfo(hostInfo, c.f.hostMap.GetPreferredRanges())
|
||||
return &ch
|
||||
}
|
||||
|
||||
// CloseTunnel closes a fully established tunnel. If localOnly is false it will notify the remote end as well.
|
||||
func (c *Control) CloseTunnel(vpnIp iputil.VpnIp, localOnly bool) bool {
|
||||
hostInfo, err := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||
if err != nil {
|
||||
// Caller should take care to Unmap() any 4in6 addresses prior to calling.
|
||||
func (c *Control) CloseTunnel(vpnIp netip.Addr, localOnly bool) bool {
|
||||
hostInfo := c.f.hostMap.QueryVpnIp(vpnIp)
|
||||
if hostInfo == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -187,7 +246,7 @@ func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
|
||||
}
|
||||
|
||||
// Learn which hosts are being used as relays, so we can shut them down last.
|
||||
relayingHosts := map[iputil.VpnIp]*HostInfo{}
|
||||
relayingHosts := map[netip.Addr]*HostInfo{}
|
||||
// Grab the hostMap lock to access the Relays map
|
||||
c.f.hostMap.Lock()
|
||||
for _, relayingHost := range c.f.hostMap.Relays {
|
||||
@@ -214,16 +273,20 @@ func (c *Control) CloseAllTunnels(excludeLighthouses bool) (closed int) {
|
||||
return
|
||||
}
|
||||
|
||||
func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
|
||||
func (c *Control) Device() overlay.Device {
|
||||
return c.f.inside
|
||||
}
|
||||
|
||||
func copyHostInfo(h *HostInfo, preferredRanges []netip.Prefix) ControlHostInfo {
|
||||
|
||||
chi := ControlHostInfo{
|
||||
VpnIp: h.vpnIp.ToIP(),
|
||||
VpnIp: h.vpnIp,
|
||||
LocalIndex: h.localIndexId,
|
||||
RemoteIndex: h.remoteIndexId,
|
||||
RemoteAddrs: h.remotes.CopyAddrs(preferredRanges),
|
||||
CachedPackets: len(h.packetStore),
|
||||
CurrentRelaysToMe: h.relayState.CopyRelayIps(),
|
||||
CurrentRelaysThroughMe: h.relayState.CopyRelayForIps(),
|
||||
CurrentRemote: h.remote,
|
||||
}
|
||||
|
||||
if h.ConnectionState != nil {
|
||||
@@ -234,35 +297,23 @@ func copyHostInfo(h *HostInfo, preferredRanges []*net.IPNet) ControlHostInfo {
|
||||
chi.Cert = c.Copy()
|
||||
}
|
||||
|
||||
if h.remote != nil {
|
||||
chi.CurrentRemote = h.remote.Copy()
|
||||
}
|
||||
|
||||
return chi
|
||||
}
|
||||
|
||||
func listHostMapHosts(hm *HostMap) []ControlHostInfo {
|
||||
hm.RLock()
|
||||
hosts := make([]ControlHostInfo, len(hm.Hosts))
|
||||
i := 0
|
||||
for _, v := range hm.Hosts {
|
||||
hosts[i] = copyHostInfo(v, hm.preferredRanges)
|
||||
i++
|
||||
}
|
||||
hm.RUnlock()
|
||||
|
||||
func listHostMapHosts(hl controlHostLister) []ControlHostInfo {
|
||||
hosts := make([]ControlHostInfo, 0)
|
||||
pr := hl.GetPreferredRanges()
|
||||
hl.ForEachVpnIp(func(hostinfo *HostInfo) {
|
||||
hosts = append(hosts, copyHostInfo(hostinfo, pr))
|
||||
})
|
||||
return hosts
|
||||
}
|
||||
|
||||
func listHostMapIndexes(hm *HostMap) []ControlHostInfo {
|
||||
hm.RLock()
|
||||
hosts := make([]ControlHostInfo, len(hm.Indexes))
|
||||
i := 0
|
||||
for _, v := range hm.Indexes {
|
||||
hosts[i] = copyHostInfo(v, hm.preferredRanges)
|
||||
i++
|
||||
}
|
||||
hm.RUnlock()
|
||||
|
||||
func listHostMapIndexes(hl controlHostLister) []ControlHostInfo {
|
||||
hosts := make([]ControlHostInfo, 0)
|
||||
pr := hl.GetPreferredRanges()
|
||||
hl.ForEachIndex(func(hostinfo *HostInfo) {
|
||||
hosts = append(hosts, copyHostInfo(hostinfo, pr))
|
||||
})
|
||||
return hosts
|
||||
}
|
||||
|
||||
@@ -2,15 +2,14 @@ package nebula
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/netip"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/test"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -18,16 +17,19 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
// Special care must be taken to re-use all objects provided to the hostmap and certificate in the expectedInfo object
|
||||
// To properly ensure we are not exposing core memory to the caller
|
||||
hm := NewHostMap(l, "test", &net.IPNet{}, make([]*net.IPNet, 0))
|
||||
remote1 := udp.NewAddr(net.ParseIP("0.0.0.100"), 4444)
|
||||
remote2 := udp.NewAddr(net.ParseIP("1:2:3:4:5:6:7:8"), 4444)
|
||||
hm := newHostMap(l, netip.Prefix{})
|
||||
hm.preferredRanges.Store(&[]netip.Prefix{})
|
||||
|
||||
remote1 := netip.MustParseAddrPort("0.0.0.100:4444")
|
||||
remote2 := netip.MustParseAddrPort("[1:2:3:4:5:6:7:8]:4444")
|
||||
|
||||
ipNet := net.IPNet{
|
||||
IP: net.IPv4(1, 2, 3, 4),
|
||||
IP: remote1.Addr().AsSlice(),
|
||||
Mask: net.IPMask{255, 255, 255, 0},
|
||||
}
|
||||
|
||||
ipNet2 := net.IPNet{
|
||||
IP: net.ParseIP("1:2:3:4:5:6:7:8"),
|
||||
IP: remote2.Addr().AsSlice(),
|
||||
Mask: net.IPMask{255, 255, 255, 0},
|
||||
}
|
||||
|
||||
@@ -48,9 +50,13 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
||||
}
|
||||
|
||||
remotes := NewRemoteList(nil)
|
||||
remotes.unlockedPrependV4(0, NewIp4AndPort(remote1.IP, uint32(remote1.Port)))
|
||||
remotes.unlockedPrependV6(0, NewIp6AndPort(remote2.IP, uint32(remote2.Port)))
|
||||
hm.Add(iputil.Ip2VpnIp(ipNet.IP), &HostInfo{
|
||||
remotes.unlockedPrependV4(netip.IPv4Unspecified(), NewIp4AndPortFromNetIP(remote1.Addr(), remote1.Port()))
|
||||
remotes.unlockedPrependV6(netip.IPv4Unspecified(), NewIp6AndPortFromNetIP(remote2.Addr(), remote2.Port()))
|
||||
|
||||
vpnIp, ok := netip.AddrFromSlice(ipNet.IP)
|
||||
assert.True(t, ok)
|
||||
|
||||
hm.unlockedAddHostInfo(&HostInfo{
|
||||
remote: remote1,
|
||||
remotes: remotes,
|
||||
ConnectionState: &ConnectionState{
|
||||
@@ -58,15 +64,18 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
||||
},
|
||||
remoteIndexId: 200,
|
||||
localIndexId: 201,
|
||||
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||
vpnIp: vpnIp,
|
||||
relayState: RelayState{
|
||||
relays: map[iputil.VpnIp]struct{}{},
|
||||
relayForByIp: map[iputil.VpnIp]*Relay{},
|
||||
relays: nil,
|
||||
relayForByIp: map[netip.Addr]*Relay{},
|
||||
relayForByIdx: map[uint32]*Relay{},
|
||||
},
|
||||
})
|
||||
}, &Interface{})
|
||||
|
||||
hm.Add(iputil.Ip2VpnIp(ipNet2.IP), &HostInfo{
|
||||
vpnIp2, ok := netip.AddrFromSlice(ipNet2.IP)
|
||||
assert.True(t, ok)
|
||||
|
||||
hm.unlockedAddHostInfo(&HostInfo{
|
||||
remote: remote1,
|
||||
remotes: remotes,
|
||||
ConnectionState: &ConnectionState{
|
||||
@@ -74,13 +83,13 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
||||
},
|
||||
remoteIndexId: 200,
|
||||
localIndexId: 201,
|
||||
vpnIp: iputil.Ip2VpnIp(ipNet2.IP),
|
||||
vpnIp: vpnIp2,
|
||||
relayState: RelayState{
|
||||
relays: map[iputil.VpnIp]struct{}{},
|
||||
relayForByIp: map[iputil.VpnIp]*Relay{},
|
||||
relays: nil,
|
||||
relayForByIp: map[netip.Addr]*Relay{},
|
||||
relayForByIdx: map[uint32]*Relay{},
|
||||
},
|
||||
})
|
||||
}, &Interface{})
|
||||
|
||||
c := Control{
|
||||
f: &Interface{
|
||||
@@ -89,28 +98,29 @@ func TestControl_GetHostInfoByVpnIp(t *testing.T) {
|
||||
l: logrus.New(),
|
||||
}
|
||||
|
||||
thi := c.GetHostInfoByVpnIp(iputil.Ip2VpnIp(ipNet.IP), false)
|
||||
thi := c.GetHostInfoByVpnIp(vpnIp, false)
|
||||
|
||||
expectedInfo := ControlHostInfo{
|
||||
VpnIp: net.IPv4(1, 2, 3, 4).To4(),
|
||||
VpnIp: vpnIp,
|
||||
LocalIndex: 201,
|
||||
RemoteIndex: 200,
|
||||
RemoteAddrs: []*udp.Addr{remote2, remote1},
|
||||
CachedPackets: 0,
|
||||
RemoteAddrs: []netip.AddrPort{remote2, remote1},
|
||||
Cert: crt.Copy(),
|
||||
MessageCounter: 0,
|
||||
CurrentRemote: udp.NewAddr(net.ParseIP("0.0.0.100"), 4444),
|
||||
CurrentRelaysToMe: []iputil.VpnIp{},
|
||||
CurrentRelaysThroughMe: []iputil.VpnIp{},
|
||||
CurrentRemote: remote1,
|
||||
CurrentRelaysToMe: []netip.Addr{},
|
||||
CurrentRelaysThroughMe: []netip.Addr{},
|
||||
}
|
||||
|
||||
// Make sure we don't have any unexpected fields
|
||||
assertFields(t, []string{"VpnIp", "LocalIndex", "RemoteIndex", "RemoteAddrs", "CachedPackets", "Cert", "MessageCounter", "CurrentRemote", "CurrentRelaysToMe", "CurrentRelaysThroughMe"}, thi)
|
||||
test.AssertDeepCopyEqual(t, &expectedInfo, thi)
|
||||
assertFields(t, []string{"VpnIp", "LocalIndex", "RemoteIndex", "RemoteAddrs", "Cert", "MessageCounter", "CurrentRemote", "CurrentRelaysToMe", "CurrentRelaysThroughMe"}, thi)
|
||||
assert.EqualValues(t, &expectedInfo, thi)
|
||||
//TODO: netip.Addr reuses global memory for zone identifiers which breaks our "no reused memory check" here
|
||||
//test.AssertDeepCopyEqual(t, &expectedInfo, thi)
|
||||
|
||||
// Make sure we don't panic if the host info doesn't have a cert yet
|
||||
assert.NotPanics(t, func() {
|
||||
thi = c.GetHostInfoByVpnIp(iputil.Ip2VpnIp(ipNet2.IP), false)
|
||||
thi = c.GetHostInfoByVpnIp(vpnIp2, false)
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -4,14 +4,13 @@
|
||||
package nebula
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/netip"
|
||||
|
||||
"github.com/slackhq/nebula/cert"
|
||||
|
||||
"github.com/google/gopacket"
|
||||
"github.com/google/gopacket/layers"
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/overlay"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
)
|
||||
@@ -21,7 +20,7 @@ import (
|
||||
func (c *Control) WaitForType(msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
|
||||
h := &header.H{}
|
||||
for {
|
||||
p := c.f.outside.Get(true)
|
||||
p := c.f.outside.(*udp.TesterConn).Get(true)
|
||||
if err := h.Parse(p.Data); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -37,7 +36,7 @@ func (c *Control) WaitForType(msgType header.MessageType, subType header.Message
|
||||
func (c *Control) WaitForTypeByIndex(toIndex uint32, msgType header.MessageType, subType header.MessageSubType, pipeTo *Control) {
|
||||
h := &header.H{}
|
||||
for {
|
||||
p := c.f.outside.Get(true)
|
||||
p := c.f.outside.(*udp.TesterConn).Get(true)
|
||||
if err := h.Parse(p.Data); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -50,37 +49,30 @@ func (c *Control) WaitForTypeByIndex(toIndex uint32, msgType header.MessageType,
|
||||
|
||||
// InjectLightHouseAddr will push toAddr into the local lighthouse cache for the vpnIp
|
||||
// This is necessary if you did not configure static hosts or are not running a lighthouse
|
||||
func (c *Control) InjectLightHouseAddr(vpnIp net.IP, toAddr *net.UDPAddr) {
|
||||
func (c *Control) InjectLightHouseAddr(vpnIp netip.Addr, toAddr netip.AddrPort) {
|
||||
c.f.lightHouse.Lock()
|
||||
remoteList := c.f.lightHouse.unlockedGetRemoteList(iputil.Ip2VpnIp(vpnIp))
|
||||
remoteList := c.f.lightHouse.unlockedGetRemoteList(vpnIp)
|
||||
remoteList.Lock()
|
||||
defer remoteList.Unlock()
|
||||
c.f.lightHouse.Unlock()
|
||||
|
||||
iVpnIp := iputil.Ip2VpnIp(vpnIp)
|
||||
if v4 := toAddr.IP.To4(); v4 != nil {
|
||||
remoteList.unlockedPrependV4(iVpnIp, NewIp4AndPort(v4, uint32(toAddr.Port)))
|
||||
if toAddr.Addr().Is4() {
|
||||
remoteList.unlockedPrependV4(vpnIp, NewIp4AndPortFromNetIP(toAddr.Addr(), toAddr.Port()))
|
||||
} else {
|
||||
remoteList.unlockedPrependV6(iVpnIp, NewIp6AndPort(toAddr.IP, uint32(toAddr.Port)))
|
||||
remoteList.unlockedPrependV6(vpnIp, NewIp6AndPortFromNetIP(toAddr.Addr(), toAddr.Port()))
|
||||
}
|
||||
}
|
||||
|
||||
// InjectRelays will push relayVpnIps into the local lighthouse cache for the vpnIp
|
||||
// This is necessary to inform an initiator of possible relays for communicating with a responder
|
||||
func (c *Control) InjectRelays(vpnIp net.IP, relayVpnIps []net.IP) {
|
||||
func (c *Control) InjectRelays(vpnIp netip.Addr, relayVpnIps []netip.Addr) {
|
||||
c.f.lightHouse.Lock()
|
||||
remoteList := c.f.lightHouse.unlockedGetRemoteList(iputil.Ip2VpnIp(vpnIp))
|
||||
remoteList := c.f.lightHouse.unlockedGetRemoteList(vpnIp)
|
||||
remoteList.Lock()
|
||||
defer remoteList.Unlock()
|
||||
c.f.lightHouse.Unlock()
|
||||
|
||||
iVpnIp := iputil.Ip2VpnIp(vpnIp)
|
||||
uVpnIp := []uint32{}
|
||||
for _, rVPnIp := range relayVpnIps {
|
||||
uVpnIp = append(uVpnIp, uint32(iputil.Ip2VpnIp(rVPnIp)))
|
||||
}
|
||||
|
||||
remoteList.unlockedSetRelay(iVpnIp, iVpnIp, uVpnIp)
|
||||
remoteList.unlockedSetRelay(vpnIp, vpnIp, relayVpnIps)
|
||||
}
|
||||
|
||||
// GetFromTun will pull a packet off the tun side of nebula
|
||||
@@ -90,11 +82,11 @@ func (c *Control) GetFromTun(block bool) []byte {
|
||||
|
||||
// GetFromUDP will pull a udp packet off the udp side of nebula
|
||||
func (c *Control) GetFromUDP(block bool) *udp.Packet {
|
||||
return c.f.outside.Get(block)
|
||||
return c.f.outside.(*udp.TesterConn).Get(block)
|
||||
}
|
||||
|
||||
func (c *Control) GetUDPTxChan() <-chan *udp.Packet {
|
||||
return c.f.outside.TxPackets
|
||||
return c.f.outside.(*udp.TesterConn).TxPackets
|
||||
}
|
||||
|
||||
func (c *Control) GetTunTxChan() <-chan []byte {
|
||||
@@ -103,17 +95,18 @@ func (c *Control) GetTunTxChan() <-chan []byte {
|
||||
|
||||
// InjectUDPPacket will inject a packet into the udp side of nebula
|
||||
func (c *Control) InjectUDPPacket(p *udp.Packet) {
|
||||
c.f.outside.Send(p)
|
||||
c.f.outside.(*udp.TesterConn).Send(p)
|
||||
}
|
||||
|
||||
// InjectTunUDPPacket puts a udp packet on the tun interface. Using UDP here because it's a simpler protocol
|
||||
func (c *Control) InjectTunUDPPacket(toIp net.IP, toPort uint16, fromPort uint16, data []byte) {
|
||||
func (c *Control) InjectTunUDPPacket(toIp netip.Addr, toPort uint16, fromPort uint16, data []byte) {
|
||||
//TODO: IPV6-WORK
|
||||
ip := layers.IPv4{
|
||||
Version: 4,
|
||||
TTL: 64,
|
||||
Protocol: layers.IPProtocolUDP,
|
||||
SrcIP: c.f.inside.Cidr().IP,
|
||||
DstIP: toIp,
|
||||
SrcIP: c.f.inside.Cidr().Addr().Unmap().AsSlice(),
|
||||
DstIP: toIp.Unmap().AsSlice(),
|
||||
}
|
||||
|
||||
udp := layers.UDP{
|
||||
@@ -138,21 +131,21 @@ func (c *Control) InjectTunUDPPacket(toIp net.IP, toPort uint16, fromPort uint16
|
||||
c.f.inside.(*overlay.TestTun).Send(buffer.Bytes())
|
||||
}
|
||||
|
||||
func (c *Control) GetVpnIp() iputil.VpnIp {
|
||||
return c.f.myVpnIp
|
||||
func (c *Control) GetVpnIp() netip.Addr {
|
||||
return c.f.myVpnNet.Addr()
|
||||
}
|
||||
|
||||
func (c *Control) GetUDPAddr() string {
|
||||
return c.f.outside.Addr.String()
|
||||
func (c *Control) GetUDPAddr() netip.AddrPort {
|
||||
return c.f.outside.(*udp.TesterConn).Addr
|
||||
}
|
||||
|
||||
func (c *Control) KillPendingTunnel(vpnIp net.IP) bool {
|
||||
hostinfo, ok := c.f.handshakeManager.pendingHostMap.Hosts[iputil.Ip2VpnIp(vpnIp)]
|
||||
if !ok {
|
||||
func (c *Control) KillPendingTunnel(vpnIp netip.Addr) bool {
|
||||
hostinfo := c.f.handshakeManager.QueryVpnIp(vpnIp)
|
||||
if hostinfo == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
c.f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo)
|
||||
c.f.handshakeManager.DeleteHostInfo(hostinfo)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -161,19 +154,9 @@ func (c *Control) GetHostmap() *HostMap {
|
||||
}
|
||||
|
||||
func (c *Control) GetCert() *cert.NebulaCertificate {
|
||||
return c.f.certState.Load().certificate
|
||||
return c.f.pki.GetCertState().Certificate
|
||||
}
|
||||
|
||||
func (c *Control) ReHandshake(vpnIp iputil.VpnIp) {
|
||||
hostinfo := c.f.handshakeManager.AddVpnIp(vpnIp, c.f.initHostInfo)
|
||||
ixHandshakeStage0(c.f, vpnIp, hostinfo)
|
||||
|
||||
// If this is a static host, we don't need to wait for the HostQueryReply
|
||||
// We can trigger the handshake right now
|
||||
if _, ok := c.f.lightHouse.GetStaticHostList()[hostinfo.vpnIp]; ok {
|
||||
select {
|
||||
case c.f.handshakeManager.trigger <- hostinfo.vpnIp:
|
||||
default:
|
||||
}
|
||||
}
|
||||
func (c *Control) ReHandshake(vpnIp netip.Addr) {
|
||||
c.f.handshakeManager.StartHandshake(vpnIp, nil)
|
||||
}
|
||||
|
||||
13
dist/arch/nebula.service
vendored
13
dist/arch/nebula.service
vendored
@@ -1,13 +0,0 @@
|
||||
[Unit]
|
||||
Description=Nebula overlay networking tool
|
||||
Wants=basic.target network-online.target nss-lookup.target time-sync.target
|
||||
After=basic.target network.target network-online.target
|
||||
|
||||
[Service]
|
||||
SyslogIdentifier=nebula
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
14
dist/fedora/nebula.service
vendored
14
dist/fedora/nebula.service
vendored
@@ -1,14 +0,0 @@
|
||||
[Unit]
|
||||
Description=Nebula overlay networking tool
|
||||
Wants=basic.target network-online.target nss-lookup.target time-sync.target
|
||||
After=basic.target network.target network-online.target
|
||||
Before=sshd.service
|
||||
|
||||
[Service]
|
||||
SyslogIdentifier=nebula
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
ExecStart=/usr/bin/nebula -config /etc/nebula/config.yml
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -3,6 +3,7 @@ package nebula
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -10,7 +11,6 @@ import (
|
||||
"github.com/miekg/dns"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
)
|
||||
|
||||
// This whole thing should be rewritten to use context
|
||||
@@ -42,21 +42,23 @@ func (d *dnsRecords) Query(data string) string {
|
||||
}
|
||||
|
||||
func (d *dnsRecords) QueryCert(data string) string {
|
||||
ip := net.ParseIP(data[:len(data)-1])
|
||||
if ip == nil {
|
||||
return ""
|
||||
}
|
||||
iip := iputil.Ip2VpnIp(ip)
|
||||
hostinfo, err := d.hostMap.QueryVpnIp(iip)
|
||||
ip, err := netip.ParseAddr(data[:len(data)-1])
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
hostinfo := d.hostMap.QueryVpnIp(ip)
|
||||
if hostinfo == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
q := hostinfo.GetCert()
|
||||
if q == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
cert := q.Details
|
||||
c := fmt.Sprintf("\"Name: %s\" \"Ips: %s\" \"Subnets %s\" \"Groups %s\" \"NotBefore %s\" \"NotAFter %s\" \"PublicKey %x\" \"IsCA %t\" \"Issuer %s\"", cert.Name, cert.Ips, cert.Subnets, cert.Groups, cert.NotBefore, cert.NotAfter, cert.PublicKey, cert.IsCA, cert.Issuer)
|
||||
c := fmt.Sprintf("\"Name: %s\" \"Ips: %s\" \"Subnets %s\" \"Groups %s\" \"NotBefore %s\" \"NotAfter %s\" \"PublicKey %x\" \"IsCA %t\" \"Issuer %s\"", cert.Name, cert.Ips, cert.Subnets, cert.Groups, cert.NotBefore, cert.NotAfter, cert.PublicKey, cert.IsCA, cert.Issuer)
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -80,7 +82,11 @@ func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) {
|
||||
}
|
||||
case dns.TypeTXT:
|
||||
a, _, _ := net.SplitHostPort(w.RemoteAddr().String())
|
||||
b := net.ParseIP(a)
|
||||
b, err := netip.ParseAddr(a)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// We don't answer these queries from non nebula nodes or localhost
|
||||
//l.Debugf("Does %s contain %s", b, dnsR.hostMap.vpnCIDR)
|
||||
if !dnsR.hostMap.vpnCIDR.Contains(b) && a != "127.0.0.1" {
|
||||
@@ -96,6 +102,10 @@ func parseQuery(l *logrus.Logger, m *dns.Msg, w dns.ResponseWriter) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(m.Answer) == 0 {
|
||||
m.Rcode = dns.RcodeNameError
|
||||
}
|
||||
}
|
||||
|
||||
func handleDnsRequest(l *logrus.Logger, w dns.ResponseWriter, r *dns.Msg) {
|
||||
@@ -129,7 +139,12 @@ func dnsMain(l *logrus.Logger, hostMap *HostMap, c *config.C) func() {
|
||||
}
|
||||
|
||||
func getDnsServerAddr(c *config.C) string {
|
||||
return c.GetString("lighthouse.dns.host", "") + ":" + strconv.Itoa(c.GetInt("lighthouse.dns.port", 53))
|
||||
dnsHost := strings.TrimSpace(c.GetString("lighthouse.dns.host", ""))
|
||||
// Old guidance was to provide the literal `[::]` in `lighthouse.dns.host` but that won't resolve.
|
||||
if dnsHost == "[::]" {
|
||||
dnsHost = "::"
|
||||
}
|
||||
return net.JoinHostPort(dnsHost, strconv.Itoa(c.GetInt("lighthouse.dns.port", 53)))
|
||||
}
|
||||
|
||||
func startDns(l *logrus.Logger, c *config.C) {
|
||||
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/miekg/dns"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestParsequery(t *testing.T) {
|
||||
@@ -17,3 +19,40 @@ func TestParsequery(t *testing.T) {
|
||||
|
||||
//parseQuery(m)
|
||||
}
|
||||
|
||||
func Test_getDnsServerAddr(t *testing.T) {
|
||||
c := config.NewC(nil)
|
||||
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||
"dns": map[interface{}]interface{}{
|
||||
"host": "0.0.0.0",
|
||||
"port": "1",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "0.0.0.0:1", getDnsServerAddr(c))
|
||||
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||
"dns": map[interface{}]interface{}{
|
||||
"host": "::",
|
||||
"port": "1",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
|
||||
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||
"dns": map[interface{}]interface{}{
|
||||
"host": "[::]",
|
||||
"port": "1",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
|
||||
|
||||
// Make sure whitespace doesn't mess us up
|
||||
c.Settings["lighthouse"] = map[interface{}]interface{}{
|
||||
"dns": map[interface{}]interface{}{
|
||||
"host": "[::] ",
|
||||
"port": "1",
|
||||
},
|
||||
}
|
||||
assert.Equal(t, "[::]:1", getDnsServerAddr(c))
|
||||
}
|
||||
|
||||
11
docker/Dockerfile
Normal file
11
docker/Dockerfile
Normal file
@@ -0,0 +1,11 @@
|
||||
FROM gcr.io/distroless/static:latest
|
||||
|
||||
ARG TARGETOS TARGETARCH
|
||||
COPY build/$TARGETOS-$TARGETARCH/nebula /nebula
|
||||
COPY build/$TARGETOS-$TARGETARCH/nebula-cert /nebula-cert
|
||||
|
||||
VOLUME ["/config"]
|
||||
|
||||
ENTRYPOINT ["/nebula"]
|
||||
# Allow users to override the args passed to nebula
|
||||
CMD ["-config", "/config/config.yml"]
|
||||
24
docker/README.md
Normal file
24
docker/README.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# NebulaOSS/nebula Docker Image
|
||||
|
||||
## Building
|
||||
|
||||
From the root of the repository, run `make docker`.
|
||||
|
||||
## Running
|
||||
|
||||
To run the built image, use the following command:
|
||||
|
||||
```
|
||||
docker run \
|
||||
--name nebula \
|
||||
--network host \
|
||||
--cap-add NET_ADMIN \
|
||||
--volume ./config:/config \
|
||||
--rm \
|
||||
nebulaoss/nebula
|
||||
```
|
||||
|
||||
A few notes:
|
||||
|
||||
- The `NET_ADMIN` capability is necessary to create the tun adapter on the host (this is unnecessary if the tun device is disabled.)
|
||||
- `--volume ./config:/config` should point to a directory that contains your `config.yml` and any other necessary files.
|
||||
@@ -4,28 +4,29 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/gopacket"
|
||||
"github.com/google/gopacket/layers"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula"
|
||||
"github.com/slackhq/nebula/e2e/router"
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
func BenchmarkHotPath(b *testing.B) {
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, _, _, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
myControl, _, _, _ := newSimpleServer(ca, caKey, "me", "10.128.0.1/24", nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", nil)
|
||||
|
||||
// Put their info in our lighthouse
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||
|
||||
// Start the servers
|
||||
myControl.Start()
|
||||
@@ -35,7 +36,7 @@ func BenchmarkHotPath(b *testing.B) {
|
||||
r.CancelFlowLogs()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
||||
_ = r.RouteForAllUntilTxTun(theirControl)
|
||||
}
|
||||
|
||||
@@ -44,19 +45,19 @@ func BenchmarkHotPath(b *testing.B) {
|
||||
}
|
||||
|
||||
func TestGoodHandshake(t *testing.T) {
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", "10.128.0.1/24", nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", nil)
|
||||
|
||||
// Put their info in our lighthouse
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||
|
||||
// Start the servers
|
||||
myControl.Start()
|
||||
theirControl.Start()
|
||||
|
||||
t.Log("Send a udp packet through to begin standing up the tunnel, this should come out the other side")
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
||||
|
||||
t.Log("Have them consume my stage 0 packet. They have a tunnel now")
|
||||
theirControl.InjectUDPPacket(myControl.GetFromUDP(true))
|
||||
@@ -77,16 +78,16 @@ func TestGoodHandshake(t *testing.T) {
|
||||
myControl.WaitForType(1, 0, theirControl)
|
||||
|
||||
t.Log("Make sure our host infos are correct")
|
||||
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl)
|
||||
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl)
|
||||
|
||||
t.Log("Get that cached packet and make sure it looks right")
|
||||
myCachedPacket := theirControl.GetFromTun(true)
|
||||
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
||||
|
||||
t.Log("Do a bidirectional tunnel test")
|
||||
r := router.NewR(t, myControl, theirControl)
|
||||
defer r.RenderFlow()
|
||||
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||
|
||||
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||
myControl.Stop()
|
||||
@@ -95,20 +96,20 @@ func TestGoodHandshake(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWrongResponderHandshake(t *testing.T) {
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
|
||||
// The IPs here are chosen on purpose:
|
||||
// The current remote handling will sort by preference, public, and then lexically.
|
||||
// So we need them to have a higher address than evil (we could apply a preference though)
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 100}, nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 99}, nil)
|
||||
evilControl, evilVpnIp, evilUdpAddr, _ := newSimpleServer(ca, caKey, "evil", net.IP{10, 0, 0, 2}, nil)
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", "10.128.0.100/24", nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.99/24", nil)
|
||||
evilControl, evilVpnIp, evilUdpAddr, _ := newSimpleServer(ca, caKey, "evil", "10.128.0.2/24", nil)
|
||||
|
||||
// Add their real udp addr, which should be tried after evil.
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||
|
||||
// Put the evil udp addr in for their vpn Ip, this is a case of being lied to by the lighthouse.
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, evilUdpAddr)
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), evilUdpAddr)
|
||||
|
||||
// Build a router so we don't have to reason who gets which packet
|
||||
r := router.NewR(t, myControl, theirControl, evilControl)
|
||||
@@ -120,7 +121,7 @@ func TestWrongResponderHandshake(t *testing.T) {
|
||||
evilControl.Start()
|
||||
|
||||
t.Log("Start the handshake process, we will route until we see our cached packet get sent to them")
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
||||
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
|
||||
h := &header.H{}
|
||||
err := h.Parse(p.Data)
|
||||
@@ -128,7 +129,7 @@ func TestWrongResponderHandshake(t *testing.T) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if p.ToIp.Equal(theirUdpAddr.IP) && p.ToPort == uint16(theirUdpAddr.Port) && h.Type == 1 {
|
||||
if p.To == theirUdpAddr && h.Type == 1 {
|
||||
return router.RouteAndExit
|
||||
}
|
||||
|
||||
@@ -139,18 +140,18 @@ func TestWrongResponderHandshake(t *testing.T) {
|
||||
|
||||
t.Log("My cached packet should be received by them")
|
||||
myCachedPacket := theirControl.GetFromTun(true)
|
||||
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
||||
|
||||
t.Log("Test the tunnel with them")
|
||||
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl)
|
||||
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||
assertHostInfoPair(t, myUdpAddr, theirUdpAddr, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||
|
||||
t.Log("Flush all packets from all controllers")
|
||||
r.FlushAll()
|
||||
|
||||
t.Log("Ensure ensure I don't have any hostinfo artifacts from evil")
|
||||
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp.IP), true), "My pending hostmap should not contain evil")
|
||||
assert.Nil(t, myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(evilVpnIp.IP), false), "My main hostmap should not contain evil")
|
||||
assert.Nil(t, myControl.GetHostInfoByVpnIp(evilVpnIp.Addr(), true), "My pending hostmap should not contain evil")
|
||||
assert.Nil(t, myControl.GetHostInfoByVpnIp(evilVpnIp.Addr(), false), "My main hostmap should not contain evil")
|
||||
//NOTE: if evil lost the handshake race it may still have a tunnel since me would reject the handshake since the tunnel is complete
|
||||
|
||||
//TODO: assert hostmaps for everyone
|
||||
@@ -164,13 +165,13 @@ func TestStage1Race(t *testing.T) {
|
||||
// This tests ensures that two hosts handshaking with each other at the same time will allow traffic to flow
|
||||
// But will eventually collapse down to a single tunnel
|
||||
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", nil)
|
||||
|
||||
// Put their info in our lighthouse and vice versa
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
|
||||
|
||||
// Build a router so we don't have to reason who gets which packet
|
||||
r := router.NewR(t, myControl, theirControl)
|
||||
@@ -181,8 +182,8 @@ func TestStage1Race(t *testing.T) {
|
||||
theirControl.Start()
|
||||
|
||||
t.Log("Trigger a handshake to start on both me and them")
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
||||
theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them"))
|
||||
|
||||
t.Log("Get both stage 1 handshake packets")
|
||||
myHsForThem := myControl.GetFromUDP(true)
|
||||
@@ -194,14 +195,14 @@ func TestStage1Race(t *testing.T) {
|
||||
|
||||
r.Log("Route until they receive a message packet")
|
||||
myCachedPacket := r.RouteForAllUntilTxTun(theirControl)
|
||||
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||
assertUdpPacket(t, []byte("Hi from me"), myCachedPacket, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
||||
|
||||
r.Log("Their cached packet should be received by me")
|
||||
theirCachedPacket := r.RouteForAllUntilTxTun(myControl)
|
||||
assertUdpPacket(t, []byte("Hi from them"), theirCachedPacket, theirVpnIpNet.IP, myVpnIpNet.IP, 80, 80)
|
||||
assertUdpPacket(t, []byte("Hi from them"), theirCachedPacket, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), 80, 80)
|
||||
|
||||
r.Log("Do a bidirectional tunnel test")
|
||||
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||
|
||||
myHostmapHosts := myControl.ListHostmapHosts(false)
|
||||
myHostmapIndexes := myControl.ListHostmapIndexes(false)
|
||||
@@ -219,7 +220,7 @@ func TestStage1Race(t *testing.T) {
|
||||
r.Log("Spin until connection manager tears down a tunnel")
|
||||
|
||||
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
|
||||
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||
t.Log("Connection manager hasn't ticked yet")
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
@@ -241,13 +242,13 @@ func TestStage1Race(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUncleanShutdownRaceLoser(t *testing.T) {
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", nil)
|
||||
|
||||
// Teach my how to get to the relay and that their can be reached via the relay
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
|
||||
|
||||
// Build a router so we don't have to reason who gets which packet
|
||||
r := router.NewR(t, myControl, theirControl)
|
||||
@@ -258,28 +259,28 @@ func TestUncleanShutdownRaceLoser(t *testing.T) {
|
||||
theirControl.Start()
|
||||
|
||||
r.Log("Trigger a handshake from me to them")
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
||||
|
||||
p := r.RouteForAllUntilTxTun(theirControl)
|
||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
||||
|
||||
r.Log("Nuke my hostmap")
|
||||
myHostmap := myControl.GetHostmap()
|
||||
myHostmap.Hosts = map[iputil.VpnIp]*nebula.HostInfo{}
|
||||
myHostmap.Hosts = map[netip.Addr]*nebula.HostInfo{}
|
||||
myHostmap.Indexes = map[uint32]*nebula.HostInfo{}
|
||||
myHostmap.RemoteIndexes = map[uint32]*nebula.HostInfo{}
|
||||
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me again"))
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me again"))
|
||||
p = r.RouteForAllUntilTxTun(theirControl)
|
||||
assertUdpPacket(t, []byte("Hi from me again"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||
assertUdpPacket(t, []byte("Hi from me again"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
||||
|
||||
r.Log("Assert the tunnel works")
|
||||
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||
|
||||
r.Log("Wait for the dead index to go away")
|
||||
start := len(theirControl.GetHostmap().Indexes)
|
||||
for {
|
||||
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||
if len(theirControl.GetHostmap().Indexes) < start {
|
||||
break
|
||||
}
|
||||
@@ -290,13 +291,13 @@ func TestUncleanShutdownRaceLoser(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUncleanShutdownRaceWinner(t *testing.T) {
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", nil)
|
||||
|
||||
// Teach my how to get to the relay and that their can be reached via the relay
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
|
||||
|
||||
// Build a router so we don't have to reason who gets which packet
|
||||
r := router.NewR(t, myControl, theirControl)
|
||||
@@ -307,30 +308,30 @@ func TestUncleanShutdownRaceWinner(t *testing.T) {
|
||||
theirControl.Start()
|
||||
|
||||
r.Log("Trigger a handshake from me to them")
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
||||
|
||||
p := r.RouteForAllUntilTxTun(theirControl)
|
||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
||||
r.RenderHostmaps("Final hostmaps", myControl, theirControl)
|
||||
|
||||
r.Log("Nuke my hostmap")
|
||||
theirHostmap := theirControl.GetHostmap()
|
||||
theirHostmap.Hosts = map[iputil.VpnIp]*nebula.HostInfo{}
|
||||
theirHostmap.Hosts = map[netip.Addr]*nebula.HostInfo{}
|
||||
theirHostmap.Indexes = map[uint32]*nebula.HostInfo{}
|
||||
theirHostmap.RemoteIndexes = map[uint32]*nebula.HostInfo{}
|
||||
|
||||
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them again"))
|
||||
theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them again"))
|
||||
p = r.RouteForAllUntilTxTun(myControl)
|
||||
assertUdpPacket(t, []byte("Hi from them again"), p, theirVpnIpNet.IP, myVpnIpNet.IP, 80, 80)
|
||||
assertUdpPacket(t, []byte("Hi from them again"), p, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), 80, 80)
|
||||
r.RenderHostmaps("Derp hostmaps", myControl, theirControl)
|
||||
|
||||
r.Log("Assert the tunnel works")
|
||||
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||
|
||||
r.Log("Wait for the dead index to go away")
|
||||
start := len(myControl.GetHostmap().Indexes)
|
||||
for {
|
||||
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||
if len(myControl.GetHostmap().Indexes) < start {
|
||||
break
|
||||
}
|
||||
@@ -341,15 +342,15 @@ func TestUncleanShutdownRaceWinner(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRelays(t *testing.T) {
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
||||
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", m{"relay": m{"use_relays": true}})
|
||||
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", "10.128.0.128/24", m{"relay": m{"am_relay": true}})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", "10.128.0.2/24", m{"relay": m{"use_relays": true}})
|
||||
|
||||
// Teach my how to get to the relay and that their can be reached via the relay
|
||||
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||
myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
|
||||
myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
|
||||
relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||
|
||||
// Build a router so we don't have to reason who gets which packet
|
||||
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||
@@ -361,31 +362,162 @@ func TestRelays(t *testing.T) {
|
||||
theirControl.Start()
|
||||
|
||||
t.Log("Trigger a handshake from me to them via the relay")
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
||||
|
||||
p := r.RouteForAllUntilTxTun(theirControl)
|
||||
r.Log("Assert the tunnel works")
|
||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
||||
r.RenderHostmaps("Final hostmaps", myControl, relayControl, theirControl)
|
||||
//TODO: assert we actually used the relay even though it should be impossible for a tunnel to have occurred without it
|
||||
}
|
||||
|
||||
func TestStage1RaceRelays(t *testing.T) {
|
||||
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
||||
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||
func TestReestablishRelays(t *testing.T) {
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", m{"relay": m{"use_relays": true}})
|
||||
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", "10.128.0.128/24", m{"relay": m{"am_relay": true}})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", "10.128.0.2/24", m{"relay": m{"use_relays": true}})
|
||||
|
||||
// Teach my how to get to the relay and that their can be reached via the relay
|
||||
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||
myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
|
||||
myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
|
||||
relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||
|
||||
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||
theirControl.InjectRelays(myVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||
// Build a router so we don't have to reason who gets which packet
|
||||
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||
defer r.RenderFlow()
|
||||
|
||||
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||
relayControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||
// Start the servers
|
||||
myControl.Start()
|
||||
relayControl.Start()
|
||||
theirControl.Start()
|
||||
|
||||
t.Log("Trigger a handshake from me to them via the relay")
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
||||
|
||||
p := r.RouteForAllUntilTxTun(theirControl)
|
||||
r.Log("Assert the tunnel works")
|
||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
||||
|
||||
t.Log("Ensure packet traversal from them to me via the relay")
|
||||
theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them"))
|
||||
|
||||
p = r.RouteForAllUntilTxTun(myControl)
|
||||
r.Log("Assert the tunnel works")
|
||||
assertUdpPacket(t, []byte("Hi from them"), p, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), 80, 80)
|
||||
|
||||
// If we break the relay's connection to 'them', 'me' needs to detect and recover the connection
|
||||
r.Log("Close the tunnel")
|
||||
relayControl.CloseTunnel(theirVpnIpNet.Addr(), true)
|
||||
|
||||
start := len(myControl.GetHostmap().Indexes)
|
||||
curIndexes := len(myControl.GetHostmap().Indexes)
|
||||
for curIndexes >= start {
|
||||
curIndexes = len(myControl.GetHostmap().Indexes)
|
||||
r.Logf("Wait for the dead index to go away:start=%v indexes, current=%v indexes", start, curIndexes)
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me should fail"))
|
||||
|
||||
r.RouteForAllExitFunc(func(p *udp.Packet, c *nebula.Control) router.ExitType {
|
||||
return router.RouteAndExit
|
||||
})
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
r.Log("Dead index went away. Woot!")
|
||||
r.RenderHostmaps("Me removed hostinfo", myControl, relayControl, theirControl)
|
||||
// Next packet should re-establish a relayed connection and work just great.
|
||||
|
||||
t.Logf("Assert the tunnel...")
|
||||
for {
|
||||
t.Log("RouteForAllUntilTxTun")
|
||||
myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
|
||||
myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
|
||||
relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
||||
|
||||
p = r.RouteForAllUntilTxTun(theirControl)
|
||||
r.Log("Assert the tunnel works")
|
||||
packet := gopacket.NewPacket(p, layers.LayerTypeIPv4, gopacket.Lazy)
|
||||
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
|
||||
if slices.Compare(v4.SrcIP, myVpnIpNet.Addr().AsSlice()) != 0 {
|
||||
t.Logf("SrcIP is unexpected...this is not the packet I'm looking for. Keep looking")
|
||||
continue
|
||||
}
|
||||
if slices.Compare(v4.DstIP, theirVpnIpNet.Addr().AsSlice()) != 0 {
|
||||
t.Logf("DstIP is unexpected...this is not the packet I'm looking for. Keep looking")
|
||||
continue
|
||||
}
|
||||
|
||||
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
|
||||
if udp == nil {
|
||||
t.Log("Not a UDP packet. This is not the packet I'm looking for. Keep looking")
|
||||
continue
|
||||
}
|
||||
data := packet.ApplicationLayer()
|
||||
if data == nil {
|
||||
t.Log("No data found in packet. This is not the packet I'm looking for. Keep looking.")
|
||||
continue
|
||||
}
|
||||
if string(data.Payload()) != "Hi from me" {
|
||||
t.Logf("Unexpected payload: '%v', keep looking", string(data.Payload()))
|
||||
continue
|
||||
}
|
||||
t.Log("I found my lost packet. I am so happy.")
|
||||
break
|
||||
}
|
||||
t.Log("Assert the tunnel works the other way, too")
|
||||
for {
|
||||
t.Log("RouteForAllUntilTxTun")
|
||||
theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them"))
|
||||
|
||||
p = r.RouteForAllUntilTxTun(myControl)
|
||||
r.Log("Assert the tunnel works")
|
||||
packet := gopacket.NewPacket(p, layers.LayerTypeIPv4, gopacket.Lazy)
|
||||
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
|
||||
if slices.Compare(v4.DstIP, myVpnIpNet.Addr().AsSlice()) != 0 {
|
||||
t.Logf("Dst is unexpected...this is not the packet I'm looking for. Keep looking")
|
||||
continue
|
||||
}
|
||||
if slices.Compare(v4.SrcIP, theirVpnIpNet.Addr().AsSlice()) != 0 {
|
||||
t.Logf("SrcIP is unexpected...this is not the packet I'm looking for. Keep looking")
|
||||
continue
|
||||
}
|
||||
|
||||
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
|
||||
if udp == nil {
|
||||
t.Log("Not a UDP packet. This is not the packet I'm looking for. Keep looking")
|
||||
continue
|
||||
}
|
||||
data := packet.ApplicationLayer()
|
||||
if data == nil {
|
||||
t.Log("No data found in packet. This is not the packet I'm looking for. Keep looking.")
|
||||
continue
|
||||
}
|
||||
if string(data.Payload()) != "Hi from them" {
|
||||
t.Logf("Unexpected payload: '%v', keep looking", string(data.Payload()))
|
||||
continue
|
||||
}
|
||||
t.Log("I found my lost packet. I am so happy.")
|
||||
break
|
||||
}
|
||||
r.RenderHostmaps("Final hostmaps", myControl, relayControl, theirControl)
|
||||
|
||||
}
|
||||
|
||||
func TestStage1RaceRelays(t *testing.T) {
|
||||
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", m{"relay": m{"use_relays": true}})
|
||||
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", "10.128.0.128/24", m{"relay": m{"am_relay": true}})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", "10.128.0.2/24", m{"relay": m{"use_relays": true}})
|
||||
|
||||
// Teach my how to get to the relay and that their can be reached via the relay
|
||||
myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
|
||||
|
||||
myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
|
||||
theirControl.InjectRelays(myVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
|
||||
|
||||
relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||
relayControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
|
||||
|
||||
// Build a router so we don't have to reason who gets which packet
|
||||
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||
@@ -397,19 +529,21 @@ func TestStage1RaceRelays(t *testing.T) {
|
||||
theirControl.Start()
|
||||
|
||||
r.Log("Get a tunnel between me and relay")
|
||||
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), relayVpnIpNet.Addr(), myControl, relayControl, r)
|
||||
|
||||
r.Log("Get a tunnel between them and relay")
|
||||
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r)
|
||||
assertTunnel(t, theirVpnIpNet.Addr(), relayVpnIpNet.Addr(), theirControl, relayControl, r)
|
||||
|
||||
r.Log("Trigger a handshake from both them and me via relay to them and me")
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
||||
theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them"))
|
||||
|
||||
r.Log("Wait for a packet from them to me")
|
||||
p := r.RouteForAllUntilTxTun(myControl)
|
||||
_ = p
|
||||
|
||||
r.FlushAll()
|
||||
|
||||
myControl.Stop()
|
||||
theirControl.Stop()
|
||||
relayControl.Stop()
|
||||
@@ -419,21 +553,21 @@ func TestStage1RaceRelays(t *testing.T) {
|
||||
|
||||
func TestStage1RaceRelays2(t *testing.T) {
|
||||
//NOTE: this is a race between me and relay resulting in a full tunnel from me to them via relay
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
||||
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", m{"relay": m{"use_relays": true}})
|
||||
relayControl, relayVpnIpNet, relayUdpAddr, _ := newSimpleServer(ca, caKey, "relay ", "10.128.0.128/24", m{"relay": m{"am_relay": true}})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", "10.128.0.2/24", m{"relay": m{"use_relays": true}})
|
||||
l := NewTestLogger()
|
||||
|
||||
// Teach my how to get to the relay and that their can be reached via the relay
|
||||
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||
myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
|
||||
|
||||
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||
theirControl.InjectRelays(myVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||
myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
|
||||
theirControl.InjectRelays(myVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
|
||||
|
||||
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||
relayControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||
relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||
relayControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
|
||||
|
||||
// Build a router so we don't have to reason who gets which packet
|
||||
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||
@@ -446,16 +580,16 @@ func TestStage1RaceRelays2(t *testing.T) {
|
||||
|
||||
r.Log("Get a tunnel between me and relay")
|
||||
l.Info("Get a tunnel between me and relay")
|
||||
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), relayVpnIpNet.Addr(), myControl, relayControl, r)
|
||||
|
||||
r.Log("Get a tunnel between them and relay")
|
||||
l.Info("Get a tunnel between them and relay")
|
||||
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r)
|
||||
assertTunnel(t, theirVpnIpNet.Addr(), relayVpnIpNet.Addr(), theirControl, relayControl, r)
|
||||
|
||||
r.Log("Trigger a handshake from both them and me via relay to them and me")
|
||||
l.Info("Trigger a handshake from both them and me via relay to them and me")
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
||||
theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them"))
|
||||
|
||||
//r.RouteUntilAfterMsgType(myControl, header.Control, header.MessageNone)
|
||||
//r.RouteUntilAfterMsgType(theirControl, header.Control, header.MessageNone)
|
||||
@@ -468,7 +602,7 @@ func TestStage1RaceRelays2(t *testing.T) {
|
||||
|
||||
r.Log("Assert the tunnel works")
|
||||
l.Info("Assert the tunnel works")
|
||||
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
|
||||
|
||||
t.Log("Wait until we remove extra tunnels")
|
||||
l.Info("Wait until we remove extra tunnels")
|
||||
@@ -488,7 +622,7 @@ func TestStage1RaceRelays2(t *testing.T) {
|
||||
"theirControl": len(theirControl.GetHostmap().Indexes),
|
||||
"relayControl": len(relayControl.GetHostmap().Indexes),
|
||||
}).Info("Waiting for hostinfos to be removed...")
|
||||
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||
t.Log("Connection manager hasn't ticked yet")
|
||||
time.Sleep(time.Second)
|
||||
retries--
|
||||
@@ -496,7 +630,7 @@ func TestStage1RaceRelays2(t *testing.T) {
|
||||
|
||||
r.Log("Assert the tunnel works")
|
||||
l.Info("Assert the tunnel works")
|
||||
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
|
||||
|
||||
myControl.Stop()
|
||||
theirControl.Stop()
|
||||
@@ -505,16 +639,17 @@ func TestStage1RaceRelays2(t *testing.T) {
|
||||
//
|
||||
////TODO: assert hostmaps
|
||||
}
|
||||
|
||||
func TestRehandshakingRelays(t *testing.T) {
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 1}, m{"relay": m{"use_relays": true}})
|
||||
relayControl, relayVpnIpNet, relayUdpAddr, relayConfig := newSimpleServer(ca, caKey, "relay ", net.IP{10, 0, 0, 128}, m{"relay": m{"am_relay": true}})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", net.IP{10, 0, 0, 2}, m{"relay": m{"use_relays": true}})
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.1/24", m{"relay": m{"use_relays": true}})
|
||||
relayControl, relayVpnIpNet, relayUdpAddr, relayConfig := newSimpleServer(ca, caKey, "relay ", "10.128.0.128/24", m{"relay": m{"am_relay": true}})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", "10.128.0.2/24", m{"relay": m{"use_relays": true}})
|
||||
|
||||
// Teach my how to get to the relay and that their can be reached via the relay
|
||||
myControl.InjectLightHouseAddr(relayVpnIpNet.IP, relayUdpAddr)
|
||||
myControl.InjectRelays(theirVpnIpNet.IP, []net.IP{relayVpnIpNet.IP})
|
||||
relayControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||
myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
|
||||
myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
|
||||
relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||
|
||||
// Build a router so we don't have to reason who gets which packet
|
||||
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||
@@ -526,17 +661,17 @@ func TestRehandshakingRelays(t *testing.T) {
|
||||
theirControl.Start()
|
||||
|
||||
t.Log("Trigger a handshake from me to them via the relay")
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
||||
|
||||
p := r.RouteForAllUntilTxTun(theirControl)
|
||||
r.Log("Assert the tunnel works")
|
||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.IP, theirVpnIpNet.IP, 80, 80)
|
||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
||||
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
|
||||
|
||||
// When I update the certificate for the relay, both me and them will have 2 host infos for the relay,
|
||||
// and the main host infos will not have any relay state to handle the me<->relay<->them tunnel.
|
||||
r.Log("Renew relay certificate and spin until me and them sees it")
|
||||
_, _, myNextPrivKey, myNextPEM := newTestCert(ca, caKey, "relay", time.Now(), time.Now().Add(5*time.Minute), relayVpnIpNet, nil, []string{"new group"})
|
||||
_, _, myNextPrivKey, myNextPEM := NewTestCert(ca, caKey, "relay", time.Now(), time.Now().Add(5*time.Minute), relayVpnIpNet, nil, []string{"new group"})
|
||||
|
||||
caB, err := ca.MarshalToPEM()
|
||||
if err != nil {
|
||||
@@ -554,8 +689,8 @@ func TestRehandshakingRelays(t *testing.T) {
|
||||
|
||||
for {
|
||||
r.Log("Assert the tunnel works between myVpnIpNet and relayVpnIpNet")
|
||||
assertTunnel(t, myVpnIpNet.IP, relayVpnIpNet.IP, myControl, relayControl, r)
|
||||
c := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(relayVpnIpNet.IP), false)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), relayVpnIpNet.Addr(), myControl, relayControl, r)
|
||||
c := myControl.GetHostInfoByVpnIp(relayVpnIpNet.Addr(), false)
|
||||
if len(c.Cert.Details.Groups) != 0 {
|
||||
// We have a new certificate now
|
||||
r.Log("Certificate between my and relay is updated!")
|
||||
@@ -567,8 +702,8 @@ func TestRehandshakingRelays(t *testing.T) {
|
||||
|
||||
for {
|
||||
r.Log("Assert the tunnel works between theirVpnIpNet and relayVpnIpNet")
|
||||
assertTunnel(t, theirVpnIpNet.IP, relayVpnIpNet.IP, theirControl, relayControl, r)
|
||||
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(relayVpnIpNet.IP), false)
|
||||
assertTunnel(t, theirVpnIpNet.Addr(), relayVpnIpNet.Addr(), theirControl, relayControl, r)
|
||||
c := theirControl.GetHostInfoByVpnIp(relayVpnIpNet.Addr(), false)
|
||||
if len(c.Cert.Details.Groups) != 0 {
|
||||
// We have a new certificate now
|
||||
r.Log("Certificate between their and relay is updated!")
|
||||
@@ -579,13 +714,13 @@ func TestRehandshakingRelays(t *testing.T) {
|
||||
}
|
||||
|
||||
r.Log("Assert the relay tunnel still works")
|
||||
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
|
||||
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
|
||||
// We should have two hostinfos on all sides
|
||||
for len(myControl.GetHostmap().Indexes) != 2 {
|
||||
t.Logf("Waiting for myControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(myControl.GetHostmap().Indexes))
|
||||
r.Log("Assert the relay tunnel still works")
|
||||
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
|
||||
r.Log("yupitdoes")
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
@@ -593,7 +728,7 @@ func TestRehandshakingRelays(t *testing.T) {
|
||||
for len(theirControl.GetHostmap().Indexes) != 2 {
|
||||
t.Logf("Waiting for theirControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(theirControl.GetHostmap().Indexes))
|
||||
r.Log("Assert the relay tunnel still works")
|
||||
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
|
||||
r.Log("yupitdoes")
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
@@ -601,7 +736,111 @@ func TestRehandshakingRelays(t *testing.T) {
|
||||
for len(relayControl.GetHostmap().Indexes) != 2 {
|
||||
t.Logf("Waiting for relayControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(relayControl.GetHostmap().Indexes))
|
||||
r.Log("Assert the relay tunnel still works")
|
||||
assertTunnel(t, theirVpnIpNet.IP, myVpnIpNet.IP, theirControl, myControl, r)
|
||||
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
|
||||
r.Log("yupitdoes")
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
t.Logf("relayControl hostinfos got cleaned up!")
|
||||
}
|
||||
|
||||
func TestRehandshakingRelaysPrimary(t *testing.T) {
|
||||
// This test is the same as TestRehandshakingRelays but one of the terminal types is a primary swap winner
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
myControl, myVpnIpNet, _, _ := newSimpleServer(ca, caKey, "me ", "10.128.0.128/24", m{"relay": m{"use_relays": true}})
|
||||
relayControl, relayVpnIpNet, relayUdpAddr, relayConfig := newSimpleServer(ca, caKey, "relay ", "10.128.0.1/24", m{"relay": m{"am_relay": true}})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them ", "10.128.0.2/24", m{"relay": m{"use_relays": true}})
|
||||
|
||||
// Teach my how to get to the relay and that their can be reached via the relay
|
||||
myControl.InjectLightHouseAddr(relayVpnIpNet.Addr(), relayUdpAddr)
|
||||
myControl.InjectRelays(theirVpnIpNet.Addr(), []netip.Addr{relayVpnIpNet.Addr()})
|
||||
relayControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||
|
||||
// Build a router so we don't have to reason who gets which packet
|
||||
r := router.NewR(t, myControl, relayControl, theirControl)
|
||||
defer r.RenderFlow()
|
||||
|
||||
// Start the servers
|
||||
myControl.Start()
|
||||
relayControl.Start()
|
||||
theirControl.Start()
|
||||
|
||||
t.Log("Trigger a handshake from me to them via the relay")
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
||||
|
||||
p := r.RouteForAllUntilTxTun(theirControl)
|
||||
r.Log("Assert the tunnel works")
|
||||
assertUdpPacket(t, []byte("Hi from me"), p, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), 80, 80)
|
||||
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
|
||||
|
||||
// When I update the certificate for the relay, both me and them will have 2 host infos for the relay,
|
||||
// and the main host infos will not have any relay state to handle the me<->relay<->them tunnel.
|
||||
r.Log("Renew relay certificate and spin until me and them sees it")
|
||||
_, _, myNextPrivKey, myNextPEM := NewTestCert(ca, caKey, "relay", time.Now(), time.Now().Add(5*time.Minute), relayVpnIpNet, nil, []string{"new group"})
|
||||
|
||||
caB, err := ca.MarshalToPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
relayConfig.Settings["pki"] = m{
|
||||
"ca": string(caB),
|
||||
"cert": string(myNextPEM),
|
||||
"key": string(myNextPrivKey),
|
||||
}
|
||||
rc, err := yaml.Marshal(relayConfig.Settings)
|
||||
assert.NoError(t, err)
|
||||
relayConfig.ReloadConfigString(string(rc))
|
||||
|
||||
for {
|
||||
r.Log("Assert the tunnel works between myVpnIpNet and relayVpnIpNet")
|
||||
assertTunnel(t, myVpnIpNet.Addr(), relayVpnIpNet.Addr(), myControl, relayControl, r)
|
||||
c := myControl.GetHostInfoByVpnIp(relayVpnIpNet.Addr(), false)
|
||||
if len(c.Cert.Details.Groups) != 0 {
|
||||
// We have a new certificate now
|
||||
r.Log("Certificate between my and relay is updated!")
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
for {
|
||||
r.Log("Assert the tunnel works between theirVpnIpNet and relayVpnIpNet")
|
||||
assertTunnel(t, theirVpnIpNet.Addr(), relayVpnIpNet.Addr(), theirControl, relayControl, r)
|
||||
c := theirControl.GetHostInfoByVpnIp(relayVpnIpNet.Addr(), false)
|
||||
if len(c.Cert.Details.Groups) != 0 {
|
||||
// We have a new certificate now
|
||||
r.Log("Certificate between their and relay is updated!")
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
r.Log("Assert the relay tunnel still works")
|
||||
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
|
||||
r.RenderHostmaps("working hostmaps", myControl, relayControl, theirControl)
|
||||
// We should have two hostinfos on all sides
|
||||
for len(myControl.GetHostmap().Indexes) != 2 {
|
||||
t.Logf("Waiting for myControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(myControl.GetHostmap().Indexes))
|
||||
r.Log("Assert the relay tunnel still works")
|
||||
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
|
||||
r.Log("yupitdoes")
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
t.Logf("myControl hostinfos got cleaned up!")
|
||||
for len(theirControl.GetHostmap().Indexes) != 2 {
|
||||
t.Logf("Waiting for theirControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(theirControl.GetHostmap().Indexes))
|
||||
r.Log("Assert the relay tunnel still works")
|
||||
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
|
||||
r.Log("yupitdoes")
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
t.Logf("theirControl hostinfos got cleaned up!")
|
||||
for len(relayControl.GetHostmap().Indexes) != 2 {
|
||||
t.Logf("Waiting for relayControl hostinfos (%v != 2) to get cleaned up from lack of use...", len(relayControl.GetHostmap().Indexes))
|
||||
r.Log("Assert the relay tunnel still works")
|
||||
assertTunnel(t, theirVpnIpNet.Addr(), myVpnIpNet.Addr(), theirControl, myControl, r)
|
||||
r.Log("yupitdoes")
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
@@ -609,13 +848,13 @@ func TestRehandshakingRelays(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRehandshaking(t *testing.T) {
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 2}, nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 1}, nil)
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", "10.128.0.2/24", nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", "10.128.0.1/24", nil)
|
||||
|
||||
// Put their info in our lighthouse and vice versa
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
|
||||
|
||||
// Build a router so we don't have to reason who gets which packet
|
||||
r := router.NewR(t, myControl, theirControl)
|
||||
@@ -626,12 +865,12 @@ func TestRehandshaking(t *testing.T) {
|
||||
theirControl.Start()
|
||||
|
||||
t.Log("Stand up a tunnel between me and them")
|
||||
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||
|
||||
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
||||
|
||||
r.Log("Renew my certificate and spin until their sees it")
|
||||
_, _, myNextPrivKey, myNextPEM := newTestCert(ca, caKey, "me", time.Now(), time.Now().Add(5*time.Minute), myVpnIpNet, nil, []string{"new group"})
|
||||
_, _, myNextPrivKey, myNextPEM := NewTestCert(ca, caKey, "me", time.Now(), time.Now().Add(5*time.Minute), myVpnIpNet, nil, []string{"new group"})
|
||||
|
||||
caB, err := ca.MarshalToPEM()
|
||||
if err != nil {
|
||||
@@ -648,8 +887,8 @@ func TestRehandshaking(t *testing.T) {
|
||||
myConfig.ReloadConfigString(string(rc))
|
||||
|
||||
for {
|
||||
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(myVpnIpNet.IP), false)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||
c := theirControl.GetHostInfoByVpnIp(myVpnIpNet.Addr(), false)
|
||||
if len(c.Cert.Details.Groups) != 0 {
|
||||
// We have a new certificate now
|
||||
break
|
||||
@@ -675,19 +914,19 @@ func TestRehandshaking(t *testing.T) {
|
||||
|
||||
r.Log("Spin until there is only 1 tunnel")
|
||||
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
|
||||
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||
t.Log("Connection manager hasn't ticked yet")
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||
myFinalHostmapHosts := myControl.ListHostmapHosts(false)
|
||||
myFinalHostmapIndexes := myControl.ListHostmapIndexes(false)
|
||||
theirFinalHostmapHosts := theirControl.ListHostmapHosts(false)
|
||||
theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false)
|
||||
|
||||
// Make sure the correct tunnel won
|
||||
c := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(myVpnIpNet.IP), false)
|
||||
c := theirControl.GetHostInfoByVpnIp(myVpnIpNet.Addr(), false)
|
||||
assert.Contains(t, c.Cert.Details.Groups, "new group")
|
||||
|
||||
// We should only have a single tunnel now on both sides
|
||||
@@ -705,13 +944,13 @@ func TestRehandshaking(t *testing.T) {
|
||||
func TestRehandshakingLoser(t *testing.T) {
|
||||
// The purpose of this test is that the race loser renews their certificate and rehandshakes. The final tunnel
|
||||
// Should be the one with the new certificate
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", net.IP{10, 0, 0, 2}, nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 1}, nil)
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, myConfig := newSimpleServer(ca, caKey, "me ", "10.128.0.2/24", nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, theirConfig := newSimpleServer(ca, caKey, "them", "10.128.0.1/24", nil)
|
||||
|
||||
// Put their info in our lighthouse and vice versa
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
|
||||
|
||||
// Build a router so we don't have to reason who gets which packet
|
||||
r := router.NewR(t, myControl, theirControl)
|
||||
@@ -722,16 +961,15 @@ func TestRehandshakingLoser(t *testing.T) {
|
||||
theirControl.Start()
|
||||
|
||||
t.Log("Stand up a tunnel between me and them")
|
||||
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||
|
||||
tt1 := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(theirVpnIpNet.IP), false)
|
||||
tt2 := theirControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(myVpnIpNet.IP), false)
|
||||
fmt.Println(tt1.LocalIndex, tt2.LocalIndex)
|
||||
myControl.GetHostInfoByVpnIp(theirVpnIpNet.Addr(), false)
|
||||
theirControl.GetHostInfoByVpnIp(myVpnIpNet.Addr(), false)
|
||||
|
||||
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
||||
|
||||
r.Log("Renew their certificate and spin until mine sees it")
|
||||
_, _, theirNextPrivKey, theirNextPEM := newTestCert(ca, caKey, "them", time.Now(), time.Now().Add(5*time.Minute), theirVpnIpNet, nil, []string{"their new group"})
|
||||
_, _, theirNextPrivKey, theirNextPEM := NewTestCert(ca, caKey, "them", time.Now(), time.Now().Add(5*time.Minute), theirVpnIpNet, nil, []string{"their new group"})
|
||||
|
||||
caB, err := ca.MarshalToPEM()
|
||||
if err != nil {
|
||||
@@ -748,8 +986,8 @@ func TestRehandshakingLoser(t *testing.T) {
|
||||
theirConfig.ReloadConfigString(string(rc))
|
||||
|
||||
for {
|
||||
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||
theirCertInMe := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(theirVpnIpNet.IP), false)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||
theirCertInMe := myControl.GetHostInfoByVpnIp(theirVpnIpNet.Addr(), false)
|
||||
|
||||
_, theirNewGroup := theirCertInMe.Cert.Details.InvertedGroups["their new group"]
|
||||
if theirNewGroup {
|
||||
@@ -776,19 +1014,19 @@ func TestRehandshakingLoser(t *testing.T) {
|
||||
|
||||
r.Log("Spin until there is only 1 tunnel")
|
||||
for len(myControl.GetHostmap().Indexes)+len(theirControl.GetHostmap().Indexes) > 2 {
|
||||
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||
t.Log("Connection manager hasn't ticked yet")
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
|
||||
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||
myFinalHostmapHosts := myControl.ListHostmapHosts(false)
|
||||
myFinalHostmapIndexes := myControl.ListHostmapIndexes(false)
|
||||
theirFinalHostmapHosts := theirControl.ListHostmapHosts(false)
|
||||
theirFinalHostmapIndexes := theirControl.ListHostmapIndexes(false)
|
||||
|
||||
// Make sure the correct tunnel won
|
||||
theirCertInMe := myControl.GetHostInfoByVpnIp(iputil.Ip2VpnIp(theirVpnIpNet.IP), false)
|
||||
theirCertInMe := myControl.GetHostInfoByVpnIp(theirVpnIpNet.Addr(), false)
|
||||
assert.Contains(t, theirCertInMe.Cert.Details.Groups, "their new group")
|
||||
|
||||
// We should only have a single tunnel now on both sides
|
||||
@@ -806,13 +1044,13 @@ func TestRaceRegression(t *testing.T) {
|
||||
// This test forces stage 1, stage 2, stage 1 to be received by me from them
|
||||
// We had a bug where we were not finding the duplicate handshake and responding to the final stage 1 which
|
||||
// caused a cross-linked hostinfo
|
||||
ca, _, caKey, _ := newTestCaCert(time.Now(), time.Now().Add(10*time.Minute), []*net.IPNet{}, []*net.IPNet{}, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", net.IP{10, 0, 0, 1}, nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", net.IP{10, 0, 0, 2}, nil)
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", "10.128.0.1/24", nil)
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", nil)
|
||||
|
||||
// Put their info in our lighthouse
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.IP, theirUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(myVpnIpNet.IP, myUdpAddr)
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
|
||||
|
||||
// Start the servers
|
||||
myControl.Start()
|
||||
@@ -826,8 +1064,8 @@ func TestRaceRegression(t *testing.T) {
|
||||
//them rx stage:2 initiatorIndex=120607833 responderIndex=4209862089
|
||||
|
||||
t.Log("Start both handshakes")
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.IP, 80, 80, []byte("Hi from me"))
|
||||
theirControl.InjectTunUDPPacket(myVpnIpNet.IP, 80, 80, []byte("Hi from them"))
|
||||
myControl.InjectTunUDPPacket(theirVpnIpNet.Addr(), 80, 80, []byte("Hi from me"))
|
||||
theirControl.InjectTunUDPPacket(myVpnIpNet.Addr(), 80, 80, []byte("Hi from them"))
|
||||
|
||||
t.Log("Get both stage 1")
|
||||
myStage1ForThem := myControl.GetFromUDP(true)
|
||||
@@ -857,7 +1095,7 @@ func TestRaceRegression(t *testing.T) {
|
||||
r.RenderHostmaps("Starting hostmaps", myControl, theirControl)
|
||||
|
||||
t.Log("Make sure the tunnel still works")
|
||||
assertTunnel(t, myVpnIpNet.IP, theirVpnIpNet.IP, myControl, theirControl, r)
|
||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||
|
||||
myControl.Stop()
|
||||
theirControl.Stop()
|
||||
|
||||
125
e2e/helpers.go
Normal file
125
e2e/helpers.go
Normal file
@@ -0,0 +1,125 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"io"
|
||||
"net"
|
||||
"net/netip"
|
||||
"time"
|
||||
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"golang.org/x/crypto/curve25519"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
)
|
||||
|
||||
// NewTestCaCert will generate a CA cert
|
||||
func NewTestCaCert(before, after time.Time, ips, subnets []netip.Prefix, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
if before.IsZero() {
|
||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
}
|
||||
if after.IsZero() {
|
||||
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
}
|
||||
|
||||
nc := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: "test ca",
|
||||
NotBefore: time.Unix(before.Unix(), 0),
|
||||
NotAfter: time.Unix(after.Unix(), 0),
|
||||
PublicKey: pub,
|
||||
IsCA: true,
|
||||
InvertedGroups: make(map[string]struct{}),
|
||||
},
|
||||
}
|
||||
|
||||
if len(ips) > 0 {
|
||||
nc.Details.Ips = make([]*net.IPNet, len(ips))
|
||||
for i, ip := range ips {
|
||||
nc.Details.Ips[i] = &net.IPNet{IP: ip.Addr().AsSlice(), Mask: net.CIDRMask(ip.Bits(), ip.Addr().BitLen())}
|
||||
}
|
||||
}
|
||||
|
||||
if len(subnets) > 0 {
|
||||
nc.Details.Subnets = make([]*net.IPNet, len(subnets))
|
||||
for i, ip := range subnets {
|
||||
nc.Details.Ips[i] = &net.IPNet{IP: ip.Addr().AsSlice(), Mask: net.CIDRMask(ip.Bits(), ip.Addr().BitLen())}
|
||||
}
|
||||
}
|
||||
|
||||
if len(groups) > 0 {
|
||||
nc.Details.Groups = groups
|
||||
}
|
||||
|
||||
err = nc.Sign(cert.Curve_CURVE25519, priv)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pem, err := nc.MarshalToPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return nc, pub, priv, pem
|
||||
}
|
||||
|
||||
// NewTestCert will generate a signed certificate with the provided details.
|
||||
// Expiry times are defaulted if you do not pass them in
|
||||
func NewTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, after time.Time, ip netip.Prefix, subnets []netip.Prefix, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
||||
issuer, err := ca.Sha256Sum()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if before.IsZero() {
|
||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
}
|
||||
|
||||
if after.IsZero() {
|
||||
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
}
|
||||
|
||||
pub, rawPriv := x25519Keypair()
|
||||
ipb := ip.Addr().AsSlice()
|
||||
nc := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: name,
|
||||
Ips: []*net.IPNet{{IP: ipb[:], Mask: net.CIDRMask(ip.Bits(), ip.Addr().BitLen())}},
|
||||
//Subnets: subnets,
|
||||
Groups: groups,
|
||||
NotBefore: time.Unix(before.Unix(), 0),
|
||||
NotAfter: time.Unix(after.Unix(), 0),
|
||||
PublicKey: pub,
|
||||
IsCA: false,
|
||||
Issuer: issuer,
|
||||
InvertedGroups: make(map[string]struct{}),
|
||||
},
|
||||
}
|
||||
|
||||
err = nc.Sign(ca.Details.Curve, key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pem, err := nc.MarshalToPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return nc, pub, cert.MarshalX25519PrivateKey(rawPriv), pem
|
||||
}
|
||||
|
||||
func x25519Keypair() ([]byte, []byte) {
|
||||
privkey := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return pubkey, privkey
|
||||
}
|
||||
@@ -4,43 +4,47 @@
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/netip"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"dario.cat/mergo"
|
||||
"github.com/google/gopacket"
|
||||
"github.com/google/gopacket/layers"
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/e2e/router"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/crypto/curve25519"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
type m map[string]interface{}
|
||||
|
||||
// newSimpleServer creates a nebula instance with many assumptions
|
||||
func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, udpIp net.IP, overrides m) (*nebula.Control, *net.IPNet, *net.UDPAddr, *config.C) {
|
||||
func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, sVpnIpNet string, overrides m) (*nebula.Control, netip.Prefix, netip.AddrPort, *config.C) {
|
||||
l := NewTestLogger()
|
||||
|
||||
vpnIpNet := &net.IPNet{IP: make([]byte, len(udpIp)), Mask: net.IPMask{255, 255, 255, 0}}
|
||||
copy(vpnIpNet.IP, udpIp)
|
||||
vpnIpNet.IP[1] += 128
|
||||
udpAddr := net.UDPAddr{
|
||||
IP: udpIp,
|
||||
Port: 4242,
|
||||
vpnIpNet, err := netip.ParsePrefix(sVpnIpNet)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, _, myPrivKey, myPEM := newTestCert(caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnIpNet, nil, []string{})
|
||||
|
||||
var udpAddr netip.AddrPort
|
||||
if vpnIpNet.Addr().Is4() {
|
||||
budpIp := vpnIpNet.Addr().As4()
|
||||
budpIp[1] -= 128
|
||||
udpAddr = netip.AddrPortFrom(netip.AddrFrom4(budpIp), 4242)
|
||||
} else {
|
||||
budpIp := vpnIpNet.Addr().As16()
|
||||
budpIp[13] -= 128
|
||||
udpAddr = netip.AddrPortFrom(netip.AddrFrom16(budpIp), 4242)
|
||||
}
|
||||
_, _, myPrivKey, myPEM := NewTestCert(caCrt, caKey, name, time.Now(), time.Now().Add(5*time.Minute), vpnIpNet, nil, []string{})
|
||||
|
||||
caB, err := caCrt.MarshalToPEM()
|
||||
if err != nil {
|
||||
@@ -70,8 +74,8 @@ func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, u
|
||||
// "try_interval": "1s",
|
||||
//},
|
||||
"listen": m{
|
||||
"host": udpAddr.IP.String(),
|
||||
"port": udpAddr.Port,
|
||||
"host": udpAddr.Addr().String(),
|
||||
"port": udpAddr.Port(),
|
||||
},
|
||||
"logging": m{
|
||||
"timestamp_format": fmt.Sprintf("%v 15:04:05.000000", name),
|
||||
@@ -105,113 +109,7 @@ func newSimpleServer(caCrt *cert.NebulaCertificate, caKey []byte, name string, u
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return control, vpnIpNet, &udpAddr, c
|
||||
}
|
||||
|
||||
// newTestCaCert will generate a CA cert
|
||||
func newTestCaCert(before, after time.Time, ips, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
||||
pub, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||
if before.IsZero() {
|
||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
}
|
||||
if after.IsZero() {
|
||||
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
}
|
||||
|
||||
nc := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: "test ca",
|
||||
NotBefore: time.Unix(before.Unix(), 0),
|
||||
NotAfter: time.Unix(after.Unix(), 0),
|
||||
PublicKey: pub,
|
||||
IsCA: true,
|
||||
InvertedGroups: make(map[string]struct{}),
|
||||
},
|
||||
}
|
||||
|
||||
if len(ips) > 0 {
|
||||
nc.Details.Ips = ips
|
||||
}
|
||||
|
||||
if len(subnets) > 0 {
|
||||
nc.Details.Subnets = subnets
|
||||
}
|
||||
|
||||
if len(groups) > 0 {
|
||||
nc.Details.Groups = groups
|
||||
}
|
||||
|
||||
err = nc.Sign(cert.Curve_CURVE25519, priv)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pem, err := nc.MarshalToPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return nc, pub, priv, pem
|
||||
}
|
||||
|
||||
// newTestCert will generate a signed certificate with the provided details.
|
||||
// Expiry times are defaulted if you do not pass them in
|
||||
func newTestCert(ca *cert.NebulaCertificate, key []byte, name string, before, after time.Time, ip *net.IPNet, subnets []*net.IPNet, groups []string) (*cert.NebulaCertificate, []byte, []byte, []byte) {
|
||||
issuer, err := ca.Sha256Sum()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if before.IsZero() {
|
||||
before = time.Now().Add(time.Second * -60).Round(time.Second)
|
||||
}
|
||||
|
||||
if after.IsZero() {
|
||||
after = time.Now().Add(time.Second * 60).Round(time.Second)
|
||||
}
|
||||
|
||||
pub, rawPriv := x25519Keypair()
|
||||
|
||||
nc := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
Name: name,
|
||||
Ips: []*net.IPNet{ip},
|
||||
Subnets: subnets,
|
||||
Groups: groups,
|
||||
NotBefore: time.Unix(before.Unix(), 0),
|
||||
NotAfter: time.Unix(after.Unix(), 0),
|
||||
PublicKey: pub,
|
||||
IsCA: false,
|
||||
Issuer: issuer,
|
||||
InvertedGroups: make(map[string]struct{}),
|
||||
},
|
||||
}
|
||||
|
||||
err = nc.Sign(ca.Details.Curve, key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pem, err := nc.MarshalToPEM()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return nc, pub, cert.MarshalX25519PrivateKey(rawPriv), pem
|
||||
}
|
||||
|
||||
func x25519Keypair() ([]byte, []byte) {
|
||||
privkey := make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand.Reader, privkey); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
pubkey, err := curve25519.X25519(privkey, curve25519.Basepoint)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return pubkey, privkey
|
||||
return control, vpnIpNet, udpAddr, c
|
||||
}
|
||||
|
||||
type doneCb func()
|
||||
@@ -232,7 +130,7 @@ func deadline(t *testing.T, seconds time.Duration) doneCb {
|
||||
}
|
||||
}
|
||||
|
||||
func assertTunnel(t *testing.T, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control, r *router.R) {
|
||||
func assertTunnel(t *testing.T, vpnIpA, vpnIpB netip.Addr, controlA, controlB *nebula.Control, r *router.R) {
|
||||
// Send a packet from them to me
|
||||
controlB.InjectTunUDPPacket(vpnIpA, 80, 90, []byte("Hi from B"))
|
||||
bPacket := r.RouteForAllUntilTxTun(controlA)
|
||||
@@ -244,23 +142,20 @@ func assertTunnel(t *testing.T, vpnIpA, vpnIpB net.IP, controlA, controlB *nebul
|
||||
assertUdpPacket(t, []byte("Hello from A"), aPacket, vpnIpA, vpnIpB, 90, 80)
|
||||
}
|
||||
|
||||
func assertHostInfoPair(t *testing.T, addrA, addrB *net.UDPAddr, vpnIpA, vpnIpB net.IP, controlA, controlB *nebula.Control) {
|
||||
func assertHostInfoPair(t *testing.T, addrA, addrB netip.AddrPort, vpnIpA, vpnIpB netip.Addr, controlA, controlB *nebula.Control) {
|
||||
// Get both host infos
|
||||
hBinA := controlA.GetHostInfoByVpnIp(iputil.Ip2VpnIp(vpnIpB), false)
|
||||
hBinA := controlA.GetHostInfoByVpnIp(vpnIpB, false)
|
||||
assert.NotNil(t, hBinA, "Host B was not found by vpnIp in controlA")
|
||||
|
||||
hAinB := controlB.GetHostInfoByVpnIp(iputil.Ip2VpnIp(vpnIpA), false)
|
||||
hAinB := controlB.GetHostInfoByVpnIp(vpnIpA, false)
|
||||
assert.NotNil(t, hAinB, "Host A was not found by vpnIp in controlB")
|
||||
|
||||
// Check that both vpn and real addr are correct
|
||||
assert.Equal(t, vpnIpB, hBinA.VpnIp, "Host B VpnIp is wrong in control A")
|
||||
assert.Equal(t, vpnIpA, hAinB.VpnIp, "Host A VpnIp is wrong in control B")
|
||||
|
||||
assert.Equal(t, addrB.IP.To16(), hBinA.CurrentRemote.IP.To16(), "Host B remote ip is wrong in control A")
|
||||
assert.Equal(t, addrA.IP.To16(), hAinB.CurrentRemote.IP.To16(), "Host A remote ip is wrong in control B")
|
||||
|
||||
assert.Equal(t, addrB.Port, int(hBinA.CurrentRemote.Port), "Host B remote port is wrong in control A")
|
||||
assert.Equal(t, addrA.Port, int(hAinB.CurrentRemote.Port), "Host A remote port is wrong in control B")
|
||||
assert.Equal(t, addrB, hBinA.CurrentRemote, "Host B remote is wrong in control A")
|
||||
assert.Equal(t, addrA, hAinB.CurrentRemote, "Host A remote is wrong in control B")
|
||||
|
||||
// Check that our indexes match
|
||||
assert.Equal(t, hBinA.LocalIndex, hAinB.RemoteIndex, "Host B local index does not match host A remote index")
|
||||
@@ -283,13 +178,13 @@ func assertHostInfoPair(t *testing.T, addrA, addrB *net.UDPAddr, vpnIpA, vpnIpB
|
||||
//checkIndexes("hmB", hmB, hAinB)
|
||||
}
|
||||
|
||||
func assertUdpPacket(t *testing.T, expected, b []byte, fromIp, toIp net.IP, fromPort, toPort uint16) {
|
||||
func assertUdpPacket(t *testing.T, expected, b []byte, fromIp, toIp netip.Addr, fromPort, toPort uint16) {
|
||||
packet := gopacket.NewPacket(b, layers.LayerTypeIPv4, gopacket.Lazy)
|
||||
v4 := packet.Layer(layers.LayerTypeIPv4).(*layers.IPv4)
|
||||
assert.NotNil(t, v4, "No ipv4 data found")
|
||||
|
||||
assert.Equal(t, fromIp, v4.SrcIP, "Source ip was incorrect")
|
||||
assert.Equal(t, toIp, v4.DstIP, "Dest ip was incorrect")
|
||||
assert.Equal(t, fromIp.AsSlice(), []byte(v4.SrcIP), "Source ip was incorrect")
|
||||
assert.Equal(t, toIp.AsSlice(), []byte(v4.DstIP), "Dest ip was incorrect")
|
||||
|
||||
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
|
||||
assert.NotNil(t, udp, "No udp data found")
|
||||
|
||||
@@ -5,11 +5,11 @@ package router
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/slackhq/nebula"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
)
|
||||
|
||||
type edge struct {
|
||||
@@ -118,14 +118,14 @@ func renderHostmap(c *nebula.Control) (string, []*edge) {
|
||||
return r, globalLines
|
||||
}
|
||||
|
||||
func sortedHosts(hosts map[iputil.VpnIp]*nebula.HostInfo) []iputil.VpnIp {
|
||||
keys := make([]iputil.VpnIp, 0, len(hosts))
|
||||
func sortedHosts(hosts map[netip.Addr]*nebula.HostInfo) []netip.Addr {
|
||||
keys := make([]netip.Addr, 0, len(hosts))
|
||||
for key := range hosts {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
|
||||
sort.SliceStable(keys, func(i, j int) bool {
|
||||
return keys[i] > keys[j]
|
||||
return keys[i].Compare(keys[j]) > 0
|
||||
})
|
||||
|
||||
return keys
|
||||
|
||||
@@ -6,12 +6,11 @@ package router
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
@@ -21,7 +20,6 @@ import (
|
||||
"github.com/google/gopacket/layers"
|
||||
"github.com/slackhq/nebula"
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
@@ -29,18 +27,18 @@ import (
|
||||
type R struct {
|
||||
// Simple map of the ip:port registered on a control to the control
|
||||
// Basically a router, right?
|
||||
controls map[string]*nebula.Control
|
||||
controls map[netip.AddrPort]*nebula.Control
|
||||
|
||||
// A map for inbound packets for a control that doesn't know about this address
|
||||
inNat map[string]*nebula.Control
|
||||
inNat map[netip.AddrPort]*nebula.Control
|
||||
|
||||
// A last used map, if an inbound packet hit the inNat map then
|
||||
// all return packets should use the same last used inbound address for the outbound sender
|
||||
// map[from address + ":" + to address] => ip:port to rewrite in the udp packet to receiver
|
||||
outNat map[string]net.UDPAddr
|
||||
outNat map[string]netip.AddrPort
|
||||
|
||||
// A map of vpn ip to the nebula control it belongs to
|
||||
vpnControls map[iputil.VpnIp]*nebula.Control
|
||||
vpnControls map[netip.Addr]*nebula.Control
|
||||
|
||||
ignoreFlows []ignoreFlow
|
||||
flow []flowEntry
|
||||
@@ -118,10 +116,10 @@ func NewR(t testing.TB, controls ...*nebula.Control) *R {
|
||||
}
|
||||
|
||||
r := &R{
|
||||
controls: make(map[string]*nebula.Control),
|
||||
vpnControls: make(map[iputil.VpnIp]*nebula.Control),
|
||||
inNat: make(map[string]*nebula.Control),
|
||||
outNat: make(map[string]net.UDPAddr),
|
||||
controls: make(map[netip.AddrPort]*nebula.Control),
|
||||
vpnControls: make(map[netip.Addr]*nebula.Control),
|
||||
inNat: make(map[netip.AddrPort]*nebula.Control),
|
||||
outNat: make(map[string]netip.AddrPort),
|
||||
flow: []flowEntry{},
|
||||
ignoreFlows: []ignoreFlow{},
|
||||
fn: filepath.Join("mermaid", fmt.Sprintf("%s.md", t.Name())),
|
||||
@@ -135,7 +133,7 @@ func NewR(t testing.TB, controls ...*nebula.Control) *R {
|
||||
for _, c := range controls {
|
||||
addr := c.GetUDPAddr()
|
||||
if _, ok := r.controls[addr]; ok {
|
||||
panic("Duplicate listen address: " + addr)
|
||||
panic("Duplicate listen address: " + addr.String())
|
||||
}
|
||||
|
||||
r.vpnControls[c.GetVpnIp()] = c
|
||||
@@ -165,13 +163,13 @@ func NewR(t testing.TB, controls ...*nebula.Control) *R {
|
||||
// It does not look at the addr attached to the instance.
|
||||
// If a route is used, this will behave like a NAT for the return path.
|
||||
// Rewriting the source ip:port to what was last sent to from the origin
|
||||
func (r *R) AddRoute(ip net.IP, port uint16, c *nebula.Control) {
|
||||
func (r *R) AddRoute(ip netip.Addr, port uint16, c *nebula.Control) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
||||
inAddr := net.JoinHostPort(ip.String(), fmt.Sprintf("%v", port))
|
||||
inAddr := netip.AddrPortFrom(ip, port)
|
||||
if _, ok := r.inNat[inAddr]; ok {
|
||||
panic("Duplicate listen address inNat: " + inAddr)
|
||||
panic("Duplicate listen address inNat: " + inAddr.String())
|
||||
}
|
||||
r.inNat[inAddr] = c
|
||||
}
|
||||
@@ -198,7 +196,7 @@ func (r *R) renderFlow() {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
var participants = map[string]struct{}{}
|
||||
var participants = map[netip.AddrPort]struct{}{}
|
||||
var participantsVals []string
|
||||
|
||||
fmt.Fprintln(f, "```mermaid")
|
||||
@@ -215,7 +213,7 @@ func (r *R) renderFlow() {
|
||||
continue
|
||||
}
|
||||
participants[addr] = struct{}{}
|
||||
sanAddr := strings.Replace(addr, ":", "-", 1)
|
||||
sanAddr := strings.Replace(addr.String(), ":", "-", 1)
|
||||
participantsVals = append(participantsVals, sanAddr)
|
||||
fmt.Fprintf(
|
||||
f, " participant %s as Nebula: %s<br/>UDP: %s\n",
|
||||
@@ -252,9 +250,9 @@ func (r *R) renderFlow() {
|
||||
|
||||
fmt.Fprintf(f,
|
||||
" %s%s%s: %s(%s), index %v, counter: %v\n",
|
||||
strings.Replace(p.from.GetUDPAddr(), ":", "-", 1),
|
||||
strings.Replace(p.from.GetUDPAddr().String(), ":", "-", 1),
|
||||
line,
|
||||
strings.Replace(p.to.GetUDPAddr(), ":", "-", 1),
|
||||
strings.Replace(p.to.GetUDPAddr().String(), ":", "-", 1),
|
||||
h.TypeName(), h.SubTypeName(), h.RemoteIndex, h.MessageCounter,
|
||||
)
|
||||
}
|
||||
@@ -305,7 +303,7 @@ func (r *R) RenderHostmaps(title string, controls ...*nebula.Control) {
|
||||
func (r *R) renderHostmaps(title string) {
|
||||
c := maps.Values(r.controls)
|
||||
sort.SliceStable(c, func(i, j int) bool {
|
||||
return c[i].GetVpnIp() > c[j].GetVpnIp()
|
||||
return c[i].GetVpnIp().Compare(c[j].GetVpnIp()) > 0
|
||||
})
|
||||
|
||||
s := renderHostmaps(c...)
|
||||
@@ -420,10 +418,8 @@ func (r *R) RouteUntilTxTun(sender *nebula.Control, receiver *nebula.Control) []
|
||||
|
||||
// Nope, lets push the sender along
|
||||
case p := <-udpTx:
|
||||
outAddr := sender.GetUDPAddr()
|
||||
r.Lock()
|
||||
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
|
||||
c := r.getControl(outAddr, inAddr, p)
|
||||
c := r.getControl(sender.GetUDPAddr(), p.To, p)
|
||||
if c == nil {
|
||||
r.Unlock()
|
||||
panic("No control for udp tx")
|
||||
@@ -479,10 +475,7 @@ func (r *R) RouteForAllUntilTxTun(receiver *nebula.Control) []byte {
|
||||
} else {
|
||||
// we are a udp tx, route and continue
|
||||
p := rx.Interface().(*udp.Packet)
|
||||
outAddr := cm[x].GetUDPAddr()
|
||||
|
||||
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
|
||||
c := r.getControl(outAddr, inAddr, p)
|
||||
c := r.getControl(cm[x].GetUDPAddr(), p.To, p)
|
||||
if c == nil {
|
||||
r.Unlock()
|
||||
panic("No control for udp tx")
|
||||
@@ -509,12 +502,10 @@ func (r *R) RouteExitFunc(sender *nebula.Control, whatDo ExitFunc) {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
outAddr := sender.GetUDPAddr()
|
||||
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
|
||||
receiver := r.getControl(outAddr, inAddr, p)
|
||||
receiver := r.getControl(sender.GetUDPAddr(), p.To, p)
|
||||
if receiver == nil {
|
||||
r.Unlock()
|
||||
panic("Can't route for host: " + inAddr)
|
||||
panic("Can't RouteExitFunc for host: " + p.To.String())
|
||||
}
|
||||
|
||||
e := whatDo(p, receiver)
|
||||
@@ -590,13 +581,13 @@ func (r *R) InjectUDPPacket(sender, receiver *nebula.Control, packet *udp.Packet
|
||||
// RouteForUntilAfterToAddr will route for sender and return only after it sees and sends a packet destined for toAddr
|
||||
// finish can be any of the exitType values except `keepRouting`, the default value is `routeAndExit`
|
||||
// If the router doesn't have the nebula controller for that address, we panic
|
||||
func (r *R) RouteForUntilAfterToAddr(sender *nebula.Control, toAddr *net.UDPAddr, finish ExitType) {
|
||||
func (r *R) RouteForUntilAfterToAddr(sender *nebula.Control, toAddr netip.AddrPort, finish ExitType) {
|
||||
if finish == KeepRouting {
|
||||
finish = RouteAndExit
|
||||
}
|
||||
|
||||
r.RouteExitFunc(sender, func(p *udp.Packet, r *nebula.Control) ExitType {
|
||||
if p.ToIp.Equal(toAddr.IP) && p.ToPort == uint16(toAddr.Port) {
|
||||
if p.To == toAddr {
|
||||
return finish
|
||||
}
|
||||
|
||||
@@ -630,13 +621,10 @@ func (r *R) RouteForAllExitFunc(whatDo ExitFunc) {
|
||||
r.Lock()
|
||||
|
||||
p := rx.Interface().(*udp.Packet)
|
||||
|
||||
outAddr := cm[x].GetUDPAddr()
|
||||
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
|
||||
receiver := r.getControl(outAddr, inAddr, p)
|
||||
receiver := r.getControl(cm[x].GetUDPAddr(), p.To, p)
|
||||
if receiver == nil {
|
||||
r.Unlock()
|
||||
panic("Can't route for host: " + inAddr)
|
||||
panic("Can't RouteForAllExitFunc for host: " + p.To.String())
|
||||
}
|
||||
|
||||
e := whatDo(p, receiver)
|
||||
@@ -697,41 +685,26 @@ func (r *R) FlushAll() {
|
||||
|
||||
p := rx.Interface().(*udp.Packet)
|
||||
|
||||
outAddr := cm[x].GetUDPAddr()
|
||||
inAddr := net.JoinHostPort(p.ToIp.String(), fmt.Sprintf("%v", p.ToPort))
|
||||
receiver := r.getControl(outAddr, inAddr, p)
|
||||
receiver := r.getControl(cm[x].GetUDPAddr(), p.To, p)
|
||||
if receiver == nil {
|
||||
r.Unlock()
|
||||
panic("Can't route for host: " + inAddr)
|
||||
panic("Can't FlushAll for host: " + p.To.String())
|
||||
}
|
||||
receiver.InjectUDPPacket(p)
|
||||
r.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// getControl performs or seeds NAT translation and returns the control for toAddr, p from fields may change
|
||||
// This is an internal router function, the caller must hold the lock
|
||||
func (r *R) getControl(fromAddr, toAddr string, p *udp.Packet) *nebula.Control {
|
||||
if newAddr, ok := r.outNat[fromAddr+":"+toAddr]; ok {
|
||||
p.FromIp = newAddr.IP
|
||||
p.FromPort = uint16(newAddr.Port)
|
||||
func (r *R) getControl(fromAddr, toAddr netip.AddrPort, p *udp.Packet) *nebula.Control {
|
||||
if newAddr, ok := r.outNat[fromAddr.String()+":"+toAddr.String()]; ok {
|
||||
p.From = newAddr
|
||||
}
|
||||
|
||||
c, ok := r.inNat[toAddr]
|
||||
if ok {
|
||||
sHost, sPort, err := net.SplitHostPort(toAddr)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
port, err := strconv.Atoi(sPort)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
r.outNat[c.GetUDPAddr()+":"+fromAddr] = net.UDPAddr{
|
||||
IP: net.ParseIP(sHost),
|
||||
Port: port,
|
||||
}
|
||||
r.outNat[c.GetUDPAddr().String()+":"+fromAddr.String()] = toAddr
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -746,8 +719,9 @@ func (r *R) formatUdpPacket(p *packet) string {
|
||||
}
|
||||
|
||||
from := "unknown"
|
||||
if c, ok := r.vpnControls[iputil.Ip2VpnIp(v4.SrcIP)]; ok {
|
||||
from = c.GetUDPAddr()
|
||||
srcAddr, _ := netip.AddrFromSlice(v4.SrcIP)
|
||||
if c, ok := r.vpnControls[srcAddr]; ok {
|
||||
from = c.GetUDPAddr().String()
|
||||
}
|
||||
|
||||
udp := packet.Layer(layers.LayerTypeUDP).(*layers.UDP)
|
||||
@@ -759,7 +733,7 @@ func (r *R) formatUdpPacket(p *packet) string {
|
||||
return fmt.Sprintf(
|
||||
" %s-->>%s: src port: %v<br/>dest port: %v<br/>data: \"%v\"\n",
|
||||
strings.Replace(from, ":", "-", 1),
|
||||
strings.Replace(p.to.GetUDPAddr(), ":", "-", 1),
|
||||
strings.Replace(p.to.GetUDPAddr().String(), ":", "-", 1),
|
||||
udp.SrcPort,
|
||||
udp.DstPort,
|
||||
string(data.Payload()),
|
||||
|
||||
55
e2e/tunnels_test.go
Normal file
55
e2e/tunnels_test.go
Normal file
@@ -0,0 +1,55 @@
|
||||
//go:build e2e_testing
|
||||
// +build e2e_testing
|
||||
|
||||
package e2e
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/slackhq/nebula/e2e/router"
|
||||
)
|
||||
|
||||
func TestDropInactiveTunnels(t *testing.T) {
|
||||
// The goal of this test is to ensure the shortest inactivity timeout will close the tunnel on both sides
|
||||
// under ideal conditions
|
||||
ca, _, caKey, _ := NewTestCaCert(time.Now(), time.Now().Add(10*time.Minute), nil, nil, []string{})
|
||||
myControl, myVpnIpNet, myUdpAddr, _ := newSimpleServer(ca, caKey, "me", "10.128.0.1/24", m{"tunnels": m{"drop_inactive": true, "inactivity_timeout": "5s"}})
|
||||
theirControl, theirVpnIpNet, theirUdpAddr, _ := newSimpleServer(ca, caKey, "them", "10.128.0.2/24", m{"tunnels": m{"drop_inactive": true, "inactivity_timeout": "10m"}})
|
||||
|
||||
// Share our underlay information
|
||||
myControl.InjectLightHouseAddr(theirVpnIpNet.Addr(), theirUdpAddr)
|
||||
theirControl.InjectLightHouseAddr(myVpnIpNet.Addr(), myUdpAddr)
|
||||
|
||||
// Start the servers
|
||||
myControl.Start()
|
||||
theirControl.Start()
|
||||
|
||||
r := router.NewR(t, myControl, theirControl)
|
||||
|
||||
r.Log("Assert the tunnel between me and them works")
|
||||
assertTunnel(t, myVpnIpNet.Addr(), theirVpnIpNet.Addr(), myControl, theirControl, r)
|
||||
|
||||
r.Log("Go inactive and wait for the tunnels to get dropped")
|
||||
waitStart := time.Now()
|
||||
for {
|
||||
myIndexes := len(myControl.GetHostmap().Indexes)
|
||||
theirIndexes := len(theirControl.GetHostmap().Indexes)
|
||||
if myIndexes == 0 && theirIndexes == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
since := time.Since(waitStart)
|
||||
r.Logf("my tunnels: %v; their tunnels: %v; duration: %v", myIndexes, theirIndexes, since)
|
||||
if since > time.Second*30 {
|
||||
t.Fatal("Tunnel should have been declared inactive after 5 seconds and before 30 seconds")
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
r.FlushAll()
|
||||
}
|
||||
|
||||
r.Logf("Inactive tunnels were dropped within %v", time.Since(waitStart))
|
||||
myControl.Stop()
|
||||
theirControl.Stop()
|
||||
}
|
||||
@@ -11,7 +11,7 @@ pki:
|
||||
#blocklist:
|
||||
# - c99d4e650533b92061b09918e838a5a0a6aaee21eed1d12fd937682865936c72
|
||||
# disconnect_invalid is a toggle to force a client to be disconnected if the certificate is expired or invalid.
|
||||
#disconnect_invalid: false
|
||||
#disconnect_invalid: true
|
||||
|
||||
# The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
|
||||
# A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
|
||||
@@ -21,6 +21,19 @@ pki:
|
||||
static_host_map:
|
||||
"192.168.100.1": ["100.64.22.11:4242"]
|
||||
|
||||
# The static_map config stanza can be used to configure how the static_host_map behaves.
|
||||
#static_map:
|
||||
# cadence determines how frequently DNS is re-queried for updated IP addresses when a static_host_map entry contains
|
||||
# a DNS name.
|
||||
#cadence: 30s
|
||||
|
||||
# network determines the type of IP addresses to ask the DNS server for. The default is "ip4" because nodes typically
|
||||
# do not know their public IPv4 address. Connecting to the Lighthouse via IPv4 allows the Lighthouse to detect the
|
||||
# public address. Other valid options are "ip6" and "ip" (returns both.)
|
||||
#network: ip4
|
||||
|
||||
# lookup_timeout is the DNS query timeout.
|
||||
#lookup_timeout: 250ms
|
||||
|
||||
lighthouse:
|
||||
# am_lighthouse is used to enable lighthouse functionality for a node. This should ONLY be true on nodes
|
||||
@@ -154,11 +167,11 @@ punchy:
|
||||
|
||||
# Preferred ranges is used to define a hint about the local network ranges, which speeds up discovering the fastest
|
||||
# path to a network adjacent nebula node.
|
||||
# NOTE: the previous option "local_range" only allowed definition of a single range
|
||||
# and has been deprecated for "preferred_ranges"
|
||||
# This setting is reloadable.
|
||||
#preferred_ranges: ["172.16.0.0/24"]
|
||||
|
||||
# sshd can expose informational and administrative functions via ssh this is a
|
||||
# sshd can expose informational and administrative functions via ssh. This can expose informational and administrative
|
||||
# functions, and allows manual tweaking of various network settings when debugging or testing.
|
||||
#sshd:
|
||||
# Toggles the feature
|
||||
#enabled: true
|
||||
@@ -167,12 +180,15 @@ punchy:
|
||||
# A file containing the ssh host private key to use
|
||||
# A decent way to generate one: ssh-keygen -t ed25519 -f ssh_host_ed25519_key -N "" < /dev/null
|
||||
#host_key: ./ssh_host_ed25519_key
|
||||
# A file containing a list of authorized public keys
|
||||
# Authorized users and their public keys
|
||||
#authorized_users:
|
||||
#- user: steeeeve
|
||||
# keys can be an array of strings or single string
|
||||
#keys:
|
||||
#- "ssh public key string"
|
||||
# Trusted SSH CA public keys. These are the public keys of the CAs that are allowed to sign SSH keys for access.
|
||||
#trusted_cas:
|
||||
#- "ssh public key string"
|
||||
|
||||
# EXPERIMENTAL: relay support for networks that can't establish direct connections.
|
||||
relay:
|
||||
@@ -194,7 +210,7 @@ tun:
|
||||
disabled: false
|
||||
# Name of the device. If not set, a default will be chosen by the OS.
|
||||
# For macOS: if set, must be in the form `utun[0-9]+`.
|
||||
# For FreeBSD: Required to be set, must be in the form `tun[0-9]+`.
|
||||
# For NetBSD: Required to be set, must be in the form `tun[0-9]+`
|
||||
dev: nebula1
|
||||
# Toggles forwarding of local broadcast packets, the address of which depends on the ip/mask encoded in pki.cert
|
||||
drop_local_broadcast: false
|
||||
@@ -216,6 +232,7 @@ tun:
|
||||
# `mtu`: will default to tun mtu if this option is not specified
|
||||
# `metric`: will default to 0 if this option is not specified
|
||||
# `install`: will default to true, controls whether this route is installed in the systems routing table.
|
||||
# This setting is reloadable.
|
||||
unsafe_routes:
|
||||
#- route: 172.16.1.0/24
|
||||
# via: 192.168.100.99
|
||||
@@ -230,7 +247,10 @@ tun:
|
||||
# TODO
|
||||
# Configure logging level
|
||||
logging:
|
||||
# panic, fatal, error, warning, info, or debug. Default is info
|
||||
# panic, fatal, error, warning, info, or debug. Default is info and is reloadable.
|
||||
#NOTE: Debug mode can log remotely controlled/untrusted data which can quickly fill a disk in some
|
||||
# scenarios. Debug logging is also CPU intensive and will decrease performance overall.
|
||||
# Only enable debug logging while actively investigating an issue.
|
||||
level: info
|
||||
# json or text formats currently available. Default is text
|
||||
format: text
|
||||
@@ -275,10 +295,26 @@ logging:
|
||||
# A 100ms interval with the default 10 retries will give a handshake 5.5 seconds to resolve before timing out
|
||||
#try_interval: 100ms
|
||||
#retries: 20
|
||||
|
||||
# query_buffer is the size of the buffer channel for querying lighthouses
|
||||
#query_buffer: 64
|
||||
|
||||
# trigger_buffer is the size of the buffer channel for quickly sending handshakes
|
||||
# after receiving the response for lighthouse queries
|
||||
#trigger_buffer: 64
|
||||
|
||||
# Tunnel manager settings
|
||||
#tunnels:
|
||||
# drop_inactive controls whether inactive tunnels are maintained or dropped after the inactive_timeout period has
|
||||
# elapsed.
|
||||
# In general, it is a good idea to enable this setting. It will be enabled by default in a future release.
|
||||
# This setting is reloadable
|
||||
#drop_inactive: false
|
||||
|
||||
# inactivity_timeout controls how long a tunnel MUST NOT see any inbound or outbound traffic before being considered
|
||||
# inactive and eligible to be dropped.
|
||||
# This setting is reloadable
|
||||
#inactivity_timeout: 10m
|
||||
|
||||
# Nebula security group configuration
|
||||
firewall:
|
||||
@@ -291,6 +327,13 @@ firewall:
|
||||
outbound_action: drop
|
||||
inbound_action: drop
|
||||
|
||||
# Controls the default value for local_cidr. Default is true, will be deprecated after v1.9 and defaulted to false.
|
||||
# This setting only affects nebula hosts with subnets encoded in their certificate. A nebula host acting as an
|
||||
# unsafe router with `default_local_cidr_any: true` will expose their unsafe routes to every inbound rule regardless
|
||||
# of the actual destination for the packet. Setting this to false requires each inbound rule to contain a `local_cidr`
|
||||
# if the intention is to allow traffic to flow to an unsafe route.
|
||||
#default_local_cidr_any: false
|
||||
|
||||
conntrack:
|
||||
tcp_timeout: 12m
|
||||
udp_timeout: 3m
|
||||
@@ -298,7 +341,7 @@ firewall:
|
||||
|
||||
# The firewall is default deny. There is no way to write a deny rule.
|
||||
# Rules are comprised of a protocol, port, and one or more of host, group, or CIDR
|
||||
# Logical evaluation is roughly: port AND proto AND (ca_sha OR ca_name) AND (host OR group OR groups OR cidr)
|
||||
# Logical evaluation is roughly: port AND proto AND (ca_sha OR ca_name) AND (host OR group OR groups OR cidr) AND (local cidr)
|
||||
# - port: Takes `0` or `any` as any, a single number `80`, a range `200-901`, or `fragment` to match second and further fragments of fragmented packets (since there is no port available).
|
||||
# code: same as port but makes more sense when talking about ICMP, TODO: this is not currently implemented in a way that works, use `any`
|
||||
# proto: `any`, `tcp`, `udp`, or `icmp`
|
||||
@@ -307,6 +350,8 @@ firewall:
|
||||
# groups: Same as group but accepts a list of values. Multiple values are AND'd together and a certificate would have to contain all groups to pass
|
||||
# cidr: a remote CIDR, `0.0.0.0/0` is any.
|
||||
# local_cidr: a local CIDR, `0.0.0.0/0` is any. This could be used to filter destinations when using unsafe_routes.
|
||||
# Default is `any` unless the certificate contains subnets and then the default is the ip issued in the certificate
|
||||
# if `default_local_cidr_any` is false, otherwise its `any`.
|
||||
# ca_name: An issuing CA name
|
||||
# ca_sha: An issuing CA shasum
|
||||
|
||||
@@ -328,3 +373,10 @@ firewall:
|
||||
groups:
|
||||
- laptop
|
||||
- home
|
||||
|
||||
# Expose a subnet (unsafe route) to hosts with the group remote_client
|
||||
# This example assume you have a subnet of 192.168.100.1/24 or larger encoded in the certificate
|
||||
- port: 8080
|
||||
proto: tcp
|
||||
group: remote_client
|
||||
local_cidr: 192.168.100.1/24
|
||||
|
||||
109
examples/go_service/main.go
Normal file
109
examples/go_service/main.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/service"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := run(); err != nil {
|
||||
log.Fatalf("%+v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func run() error {
|
||||
configStr := `
|
||||
tun:
|
||||
user: true
|
||||
|
||||
static_host_map:
|
||||
'192.168.100.1': ['localhost:4242']
|
||||
|
||||
listen:
|
||||
host: 0.0.0.0
|
||||
port: 4241
|
||||
|
||||
lighthouse:
|
||||
am_lighthouse: false
|
||||
interval: 60
|
||||
hosts:
|
||||
- '192.168.100.1'
|
||||
|
||||
firewall:
|
||||
outbound:
|
||||
# Allow all outbound traffic from this node
|
||||
- port: any
|
||||
proto: any
|
||||
host: any
|
||||
|
||||
inbound:
|
||||
# Allow icmp between any nebula hosts
|
||||
- port: any
|
||||
proto: icmp
|
||||
host: any
|
||||
- port: any
|
||||
proto: any
|
||||
host: any
|
||||
|
||||
pki:
|
||||
ca: /home/rice/Developer/nebula-config/ca.crt
|
||||
cert: /home/rice/Developer/nebula-config/app.crt
|
||||
key: /home/rice/Developer/nebula-config/app.key
|
||||
`
|
||||
var cfg config.C
|
||||
if err := cfg.LoadString(configStr); err != nil {
|
||||
return err
|
||||
}
|
||||
svc, err := service.New(&cfg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ln, err := svc.Listen("tcp", ":1234")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for {
|
||||
conn, err := ln.Accept()
|
||||
if err != nil {
|
||||
log.Printf("accept error: %s", err)
|
||||
break
|
||||
}
|
||||
defer func(conn net.Conn) {
|
||||
_ = conn.Close()
|
||||
}(conn)
|
||||
|
||||
log.Printf("got connection")
|
||||
|
||||
_, err = conn.Write([]byte("hello world\n"))
|
||||
if err != nil {
|
||||
log.Printf("write error: %s", err)
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(conn)
|
||||
for scanner.Scan() {
|
||||
message := scanner.Text()
|
||||
_, err = fmt.Fprintf(conn, "echo: %q\n", message)
|
||||
if err != nil {
|
||||
log.Printf("write error: %s", err)
|
||||
}
|
||||
log.Printf("got message %q", message)
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
log.Printf("scanner error: %s", err)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
_ = svc.Close()
|
||||
if err := svc.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,138 +0,0 @@
|
||||
# Quickstart Guide
|
||||
|
||||
This guide is intended to bring up a vagrant environment with 1 lighthouse and 2 generic hosts running nebula.
|
||||
|
||||
## Creating the virtualenv for ansible
|
||||
|
||||
Within the `quickstart/` directory, do the following
|
||||
|
||||
```
|
||||
# make a virtual environment
|
||||
virtualenv venv
|
||||
|
||||
# get into the virtualenv
|
||||
source venv/bin/activate
|
||||
|
||||
# install ansible
|
||||
pip install -r requirements.yml
|
||||
```
|
||||
|
||||
## Bringing up the vagrant environment
|
||||
|
||||
A plugin that is used for the Vagrant environment is `vagrant-hostmanager`
|
||||
|
||||
To install, run
|
||||
|
||||
```
|
||||
vagrant plugin install vagrant-hostmanager
|
||||
```
|
||||
|
||||
All hosts within the Vagrantfile are brought up with
|
||||
|
||||
`vagrant up`
|
||||
|
||||
Once the boxes are up, go into the `ansible/` directory and deploy the playbook by running
|
||||
|
||||
`ansible-playbook playbook.yml -i inventory -u vagrant`
|
||||
|
||||
## Testing within the vagrant env
|
||||
|
||||
Once the ansible run is done, hop onto a vagrant box
|
||||
|
||||
`vagrant ssh generic1.vagrant`
|
||||
|
||||
or specifically
|
||||
|
||||
`ssh vagrant@<ip-address-in-vagrant-file` (password for the vagrant user on the boxes is `vagrant`)
|
||||
|
||||
Some quick tests once the vagrant boxes are up are to ping from `generic1.vagrant` to `generic2.vagrant` using
|
||||
their respective nebula ip address.
|
||||
|
||||
```
|
||||
vagrant@generic1:~$ ping 10.168.91.220
|
||||
PING 10.168.91.220 (10.168.91.220) 56(84) bytes of data.
|
||||
64 bytes from 10.168.91.220: icmp_seq=1 ttl=64 time=241 ms
|
||||
64 bytes from 10.168.91.220: icmp_seq=2 ttl=64 time=0.704 ms
|
||||
```
|
||||
|
||||
You can further verify that the allowed nebula firewall rules work by ssh'ing from 1 generic box to the other.
|
||||
|
||||
`ssh vagrant@<nebula-ip-address>` (password for the vagrant user on the boxes is `vagrant`)
|
||||
|
||||
See `/etc/nebula/config.yml` on a box for firewall rules.
|
||||
|
||||
To see full handshakes and hostmaps, change the logging config of `/etc/nebula/config.yml` on the vagrant boxes from
|
||||
info to debug.
|
||||
|
||||
You can watch nebula logs by running
|
||||
|
||||
```
|
||||
sudo journalctl -fu nebula
|
||||
```
|
||||
|
||||
Refer to the nebula src code directory's README for further instructions on configuring nebula.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Is nebula up and running?
|
||||
|
||||
Run and verify that
|
||||
|
||||
```
|
||||
ifconfig
|
||||
```
|
||||
|
||||
shows you an interface with the name `nebula1` being up.
|
||||
|
||||
```
|
||||
vagrant@generic1:~$ ifconfig nebula1
|
||||
nebula1: flags=4305<UP,POINTOPOINT,RUNNING,NOARP,MULTICAST> mtu 1300
|
||||
inet 10.168.91.210 netmask 255.128.0.0 destination 10.168.91.210
|
||||
inet6 fe80::aeaf:b105:e6dc:936c prefixlen 64 scopeid 0x20<link>
|
||||
unspec 00-00-00-00-00-00-00-00-00-00-00-00-00-00-00-00 txqueuelen 500 (UNSPEC)
|
||||
RX packets 2 bytes 168 (168.0 B)
|
||||
RX errors 0 dropped 0 overruns 0 frame 0
|
||||
TX packets 11 bytes 600 (600.0 B)
|
||||
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
|
||||
```
|
||||
|
||||
### Connectivity
|
||||
|
||||
Are you able to ping other boxes on the private nebula network?
|
||||
|
||||
The following are the private nebula ip addresses of the vagrant env
|
||||
|
||||
```
|
||||
generic1.vagrant [nebula_ip] 10.168.91.210
|
||||
generic2.vagrant [nebula_ip] 10.168.91.220
|
||||
lighthouse1.vagrant [nebula_ip] 10.168.91.230
|
||||
```
|
||||
|
||||
Try pinging generic1.vagrant to and from any other box using its nebula ip above.
|
||||
|
||||
Double check the nebula firewall rules under /etc/nebula/config.yml to make sure that connectivity is allowed for your use-case if on a specific port.
|
||||
|
||||
```
|
||||
vagrant@lighthouse1:~$ grep -A21 firewall /etc/nebula/config.yml
|
||||
firewall:
|
||||
conntrack:
|
||||
tcp_timeout: 12m
|
||||
udp_timeout: 3m
|
||||
default_timeout: 10m
|
||||
|
||||
inbound:
|
||||
- proto: icmp
|
||||
port: any
|
||||
host: any
|
||||
- proto: any
|
||||
port: 22
|
||||
host: any
|
||||
- proto: any
|
||||
port: 53
|
||||
host: any
|
||||
|
||||
outbound:
|
||||
- proto: any
|
||||
port: any
|
||||
host: any
|
||||
```
|
||||
40
examples/quickstart-vagrant/Vagrantfile
vendored
40
examples/quickstart-vagrant/Vagrantfile
vendored
@@ -1,40 +0,0 @@
|
||||
Vagrant.require_version ">= 2.2.6"
|
||||
|
||||
nodes = [
|
||||
{ :hostname => 'generic1.vagrant', :ip => '172.11.91.210', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
|
||||
{ :hostname => 'generic2.vagrant', :ip => '172.11.91.220', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
|
||||
{ :hostname => 'lighthouse1.vagrant', :ip => '172.11.91.230', :box => 'bento/ubuntu-18.04', :ram => '512', :cpus => 1},
|
||||
]
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
|
||||
config.ssh.insert_key = false
|
||||
|
||||
if Vagrant.has_plugin?('vagrant-cachier')
|
||||
config.cache.enable :apt
|
||||
else
|
||||
printf("** Install vagrant-cachier plugin to speedup deploy: `vagrant plugin install vagrant-cachier`.**\n")
|
||||
end
|
||||
|
||||
if Vagrant.has_plugin?('vagrant-hostmanager')
|
||||
config.hostmanager.enabled = true
|
||||
config.hostmanager.manage_host = true
|
||||
config.hostmanager.include_offline = true
|
||||
else
|
||||
config.vagrant.plugins = "vagrant-hostmanager"
|
||||
end
|
||||
|
||||
nodes.each do |node|
|
||||
config.vm.define node[:hostname] do |node_config|
|
||||
node_config.vm.box = node[:box]
|
||||
node_config.vm.hostname = node[:hostname]
|
||||
node_config.vm.network :private_network, ip: node[:ip]
|
||||
node_config.vm.provider :virtualbox do |vb|
|
||||
vb.memory = node[:ram]
|
||||
vb.cpus = node[:cpus]
|
||||
vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"]
|
||||
vb.customize ['guestproperty', 'set', :id, '/VirtualBox/GuestAdd/VBoxService/--timesync-set-threshold', 10000]
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
@@ -1,4 +0,0 @@
|
||||
[defaults]
|
||||
host_key_checking = False
|
||||
private_key_file = ~/.vagrant.d/insecure_private_key
|
||||
become = yes
|
||||
@@ -1,21 +0,0 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
'to_nebula_ip': self.to_nebula_ip,
|
||||
'map_to_nebula_ips': self.map_to_nebula_ips,
|
||||
}
|
||||
|
||||
def to_nebula_ip(self, ip_str):
|
||||
ip_list = list(map(int, ip_str.split(".")))
|
||||
ip_list[0] = 10
|
||||
ip_list[1] = 168
|
||||
ip = '.'.join(map(str, ip_list))
|
||||
return ip
|
||||
|
||||
def map_to_nebula_ips(self, ip_strs):
|
||||
ip_list = [ self.to_nebula_ip(ip_str) for ip_str in ip_strs ]
|
||||
ips = ', '.join(ip_list)
|
||||
return ips
|
||||
@@ -1,11 +0,0 @@
|
||||
[all]
|
||||
generic1.vagrant
|
||||
generic2.vagrant
|
||||
lighthouse1.vagrant
|
||||
|
||||
[generic]
|
||||
generic1.vagrant
|
||||
generic2.vagrant
|
||||
|
||||
[lighthouse]
|
||||
lighthouse1.vagrant
|
||||
@@ -1,23 +0,0 @@
|
||||
---
|
||||
- name: test connection to vagrant boxes
|
||||
hosts: all
|
||||
tasks:
|
||||
- debug: msg=ok
|
||||
|
||||
- name: build nebula binaries locally
|
||||
connection: local
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- command: chdir=../../../ make build/linux-amd64/"{{ item }}"
|
||||
with_items:
|
||||
- nebula
|
||||
- nebula-cert
|
||||
tags:
|
||||
- build-nebula
|
||||
|
||||
- name: install nebula on all vagrant hosts
|
||||
hosts: all
|
||||
become: yes
|
||||
gather_facts: yes
|
||||
roles:
|
||||
- nebula
|
||||
@@ -1,3 +0,0 @@
|
||||
---
|
||||
# defaults file for nebula
|
||||
nebula_config_directory: "/etc/nebula/"
|
||||
@@ -1,14 +0,0 @@
|
||||
[Unit]
|
||||
Description=Nebula overlay networking tool
|
||||
Wants=basic.target network-online.target nss-lookup.target time-sync.target
|
||||
After=basic.target network.target network-online.target
|
||||
Before=sshd.service
|
||||
|
||||
[Service]
|
||||
SyslogIdentifier=nebula
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml
|
||||
Restart=always
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
@@ -1,5 +0,0 @@
|
||||
-----BEGIN NEBULA CERTIFICATE-----
|
||||
CkAKDm5lYnVsYSB0ZXN0IENBKNXC1NYFMNXIhO0GOiCmVYeZ9tkB4WEnawmkrca+
|
||||
hsAg9otUFhpAowZeJ33KVEABEkAORybHQUUyVFbKYzw0JHfVzAQOHA4kwB1yP9IV
|
||||
KpiTw9+ADz+wA+R5tn9B+L8+7+Apc+9dem4BQULjA5mRaoYN
|
||||
-----END NEBULA CERTIFICATE-----
|
||||
@@ -1,4 +0,0 @@
|
||||
-----BEGIN NEBULA ED25519 PRIVATE KEY-----
|
||||
FEXZKMSmg8CgIODR0ymUeNT3nbnVpMi7nD79UgkCRHWmVYeZ9tkB4WEnawmkrca+
|
||||
hsAg9otUFhpAowZeJ33KVA==
|
||||
-----END NEBULA ED25519 PRIVATE KEY-----
|
||||
@@ -1,5 +0,0 @@
|
||||
---
|
||||
# handlers file for nebula
|
||||
|
||||
- name: restart nebula
|
||||
service: name=nebula state=restarted
|
||||
@@ -1,62 +0,0 @@
|
||||
---
|
||||
# tasks file for nebula
|
||||
|
||||
- name: get the vagrant network interface and set fact
|
||||
set_fact:
|
||||
vagrant_ifce: "ansible_{{ ansible_interfaces | difference(['lo',ansible_default_ipv4.alias]) | sort | first }}"
|
||||
tags:
|
||||
- nebula-conf
|
||||
|
||||
- name: install built nebula binary
|
||||
copy: src="../../../../../build/linux-amd64/{{ item }}" dest="/usr/local/bin" mode=0755
|
||||
with_items:
|
||||
- nebula
|
||||
- nebula-cert
|
||||
|
||||
- name: create nebula config directory
|
||||
file: path="{{ nebula_config_directory }}" state=directory mode=0755
|
||||
|
||||
- name: temporarily copy over root.crt and root.key to sign
|
||||
copy: src={{ item }} dest=/opt/{{ item }}
|
||||
with_items:
|
||||
- vagrant-test-ca.key
|
||||
- vagrant-test-ca.crt
|
||||
|
||||
- name: remove previously signed host certificate
|
||||
file: dest=/etc/nebula/{{ item }} state=absent
|
||||
with_items:
|
||||
- host.crt
|
||||
- host.key
|
||||
|
||||
- name: sign using the root key
|
||||
command: nebula-cert sign -ca-crt /opt/vagrant-test-ca.crt -ca-key /opt/vagrant-test-ca.key -duration 4320h -groups vagrant -ip {{ hostvars[inventory_hostname][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }}/9 -name {{ ansible_hostname }}.nebula -out-crt /etc/nebula/host.crt -out-key /etc/nebula/host.key
|
||||
|
||||
- name: remove root.key used to sign
|
||||
file: dest=/opt/{{ item }} state=absent
|
||||
with_items:
|
||||
- vagrant-test-ca.key
|
||||
|
||||
- name: write the content of the trusted ca certificate
|
||||
copy: src="vagrant-test-ca.crt" dest="/etc/nebula/vagrant-test-ca.crt"
|
||||
notify: restart nebula
|
||||
|
||||
- name: Create config directory
|
||||
file: path="{{ nebula_config_directory }}" owner=root group=root mode=0755 state=directory
|
||||
|
||||
- name: nebula config
|
||||
template: src=config.yml.j2 dest="/etc/nebula/config.yml" mode=0644 owner=root group=root
|
||||
notify: restart nebula
|
||||
tags:
|
||||
- nebula-conf
|
||||
|
||||
- name: nebula systemd
|
||||
copy: src=systemd.nebula.service dest="/etc/systemd/system/nebula.service" mode=0644 owner=root group=root
|
||||
register: addconf
|
||||
notify: restart nebula
|
||||
|
||||
- name: maybe reload systemd
|
||||
shell: systemctl daemon-reload
|
||||
when: addconf.changed
|
||||
|
||||
- name: nebula running
|
||||
service: name="nebula" state=started enabled=yes
|
||||
@@ -1,85 +0,0 @@
|
||||
pki:
|
||||
ca: /etc/nebula/vagrant-test-ca.crt
|
||||
cert: /etc/nebula/host.crt
|
||||
key: /etc/nebula/host.key
|
||||
|
||||
# Port Nebula will be listening on
|
||||
listen:
|
||||
host: 0.0.0.0
|
||||
port: 4242
|
||||
|
||||
# sshd can expose informational and administrative functions via ssh
|
||||
sshd:
|
||||
# Toggles the feature
|
||||
enabled: true
|
||||
# Host and port to listen on
|
||||
listen: 127.0.0.1:2222
|
||||
# A file containing the ssh host private key to use
|
||||
host_key: /etc/ssh/ssh_host_ed25519_key
|
||||
# A file containing a list of authorized public keys
|
||||
authorized_users:
|
||||
{% for user in nebula_users %}
|
||||
- user: {{ user.name }}
|
||||
keys:
|
||||
{% for key in user.ssh_auth_keys %}
|
||||
- "{{ key }}"
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
local_range: 10.168.0.0/16
|
||||
|
||||
static_host_map:
|
||||
# lighthouse
|
||||
{{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }}: ["{{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address']}}:4242"]
|
||||
|
||||
default_route: "0.0.0.0"
|
||||
|
||||
lighthouse:
|
||||
{% if 'lighthouse' in group_names %}
|
||||
am_lighthouse: true
|
||||
serve_dns: true
|
||||
{% else %}
|
||||
am_lighthouse: false
|
||||
{% endif %}
|
||||
interval: 60
|
||||
{% if 'generic' in group_names %}
|
||||
hosts:
|
||||
- {{ hostvars[groups['lighthouse'][0]][vagrant_ifce]['ipv4']['address'] | to_nebula_ip }}
|
||||
{% endif %}
|
||||
|
||||
# Configure the private interface
|
||||
tun:
|
||||
dev: nebula1
|
||||
# Sets MTU of the tun dev.
|
||||
# MTU of the tun must be smaller than the MTU of the eth0 interface
|
||||
mtu: 1300
|
||||
|
||||
# TODO
|
||||
# Configure logging level
|
||||
logging:
|
||||
level: info
|
||||
format: json
|
||||
|
||||
firewall:
|
||||
conntrack:
|
||||
tcp_timeout: 12m
|
||||
udp_timeout: 3m
|
||||
default_timeout: 10m
|
||||
|
||||
inbound:
|
||||
- proto: icmp
|
||||
port: any
|
||||
host: any
|
||||
- proto: any
|
||||
port: 22
|
||||
host: any
|
||||
{% if "lighthouse" in groups %}
|
||||
- proto: any
|
||||
port: 53
|
||||
host: any
|
||||
{% endif %}
|
||||
|
||||
outbound:
|
||||
- proto: any
|
||||
port: any
|
||||
host: any
|
||||
@@ -1,7 +0,0 @@
|
||||
---
|
||||
# vars file for nebula
|
||||
|
||||
nebula_users:
|
||||
- name: user1
|
||||
ssh_auth_keys:
|
||||
- "ed25519 place-your-ssh-public-key-here"
|
||||
@@ -1 +0,0 @@
|
||||
ansible
|
||||
35
examples/service_scripts/nebula.open-rc
Normal file
35
examples/service_scripts/nebula.open-rc
Normal file
@@ -0,0 +1,35 @@
|
||||
#!/sbin/openrc-run
|
||||
#
|
||||
# nebula service for open-rc systems
|
||||
|
||||
extra_commands="checkconfig"
|
||||
|
||||
: ${NEBULA_CONFDIR:=${RC_PREFIX%/}/etc/nebula}
|
||||
: ${NEBULA_CONFIG:=${NEBULA_CONFDIR}/config.yml}
|
||||
: ${NEBULA_BINARY:=${NEBULA_BINARY}${RC_PREFIX%/}/usr/local/sbin/nebula}
|
||||
|
||||
command="${NEBULA_BINARY}"
|
||||
command_args="${NEBULA_OPTS} -config ${NEBULA_CONFIG}"
|
||||
|
||||
supervisor="supervise-daemon"
|
||||
|
||||
description="A scalable overlay networking tool with a focus on performance, simplicity and security"
|
||||
|
||||
required_dirs="${NEBULA_CONFDIR}"
|
||||
required_files="${NEBULA_CONFIG}"
|
||||
|
||||
checkconfig() {
|
||||
"${command}" -test ${command_args} || return 1
|
||||
}
|
||||
|
||||
start_pre() {
|
||||
if [ "${RC_CMD}" != "restart" ] ; then
|
||||
checkconfig || return $?
|
||||
fi
|
||||
}
|
||||
|
||||
stop_pre() {
|
||||
if [ "${RC_CMD}" = "restart" ] ; then
|
||||
checkconfig || return $?
|
||||
fi
|
||||
}
|
||||
@@ -5,6 +5,8 @@ After=basic.target network.target network-online.target
|
||||
Before=sshd.service
|
||||
|
||||
[Service]
|
||||
Type=notify
|
||||
NotifyAccess=main
|
||||
SyslogIdentifier=nebula
|
||||
ExecReload=/bin/kill -HUP $MAINPID
|
||||
ExecStart=/usr/local/bin/nebula -config /etc/nebula/config.yml
|
||||
|
||||
315
firewall.go
315
firewall.go
@@ -2,36 +2,31 @@ package nebula
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"hash/fnv"
|
||||
"net/netip"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gaissmai/bart"
|
||||
"github.com/rcrowley/go-metrics"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/cidr"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/firewall"
|
||||
)
|
||||
|
||||
const tcpACK = 0x10
|
||||
const tcpFIN = 0x01
|
||||
|
||||
type FirewallInterface interface {
|
||||
AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error
|
||||
AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip, localIp netip.Prefix, caName string, caSha string) error
|
||||
}
|
||||
|
||||
type conn struct {
|
||||
Expires time.Time // Time when this conntrack entry will expire
|
||||
Sent time.Time // If tcp rtt tracking is enabled this will be when Seq was last set
|
||||
Seq uint32 // If tcp rtt tracking is enabled this will be the seq we are looking for an ack
|
||||
|
||||
// record why the original connection passed the firewall, so we can re-validate
|
||||
// after ruleset changes. Note, rulesVersion is a uint16 so that these two
|
||||
@@ -57,15 +52,16 @@ type Firewall struct {
|
||||
DefaultTimeout time.Duration //linux: 600s
|
||||
|
||||
// Used to ensure we don't emit local packets for ips we don't own
|
||||
localIps *cidr.Tree4
|
||||
localIps *bart.Table[struct{}]
|
||||
assignedCIDR netip.Prefix
|
||||
hasSubnets bool
|
||||
|
||||
rules string
|
||||
rulesVersion uint16
|
||||
|
||||
trackTCPRTT bool
|
||||
metricTCPRTT metrics.Histogram
|
||||
incomingMetrics firewallMetrics
|
||||
outgoingMetrics firewallMetrics
|
||||
defaultLocalCIDRAny bool
|
||||
incomingMetrics firewallMetrics
|
||||
outgoingMetrics firewallMetrics
|
||||
|
||||
l *logrus.Logger
|
||||
}
|
||||
@@ -83,6 +79,8 @@ type FirewallConntrack struct {
|
||||
TimerWheel *TimerWheel[firewall.Packet]
|
||||
}
|
||||
|
||||
// FirewallTable is the entry point for a rule, the evaluation order is:
|
||||
// Proto AND port AND (CA SHA or CA name) AND local CIDR AND (group OR groups OR name OR remote CIDR)
|
||||
type FirewallTable struct {
|
||||
TCP firewallPort
|
||||
UDP firewallPort
|
||||
@@ -106,18 +104,27 @@ type FirewallCA struct {
|
||||
}
|
||||
|
||||
type FirewallRule struct {
|
||||
// Any makes Hosts, Groups, CIDR and LocalCIDR irrelevant
|
||||
Any bool
|
||||
Hosts map[string]struct{}
|
||||
Groups [][]string
|
||||
CIDR *cidr.Tree4
|
||||
LocalCIDR *cidr.Tree4
|
||||
// Any makes Hosts, Groups, and CIDR irrelevant
|
||||
Any *firewallLocalCIDR
|
||||
Hosts map[string]*firewallLocalCIDR
|
||||
Groups []*firewallGroups
|
||||
CIDR *bart.Table[*firewallLocalCIDR]
|
||||
}
|
||||
|
||||
type firewallGroups struct {
|
||||
Groups []string
|
||||
LocalCIDR *firewallLocalCIDR
|
||||
}
|
||||
|
||||
// Even though ports are uint16, int32 maps are faster for lookup
|
||||
// Plus we can use `-1` for fragment rules
|
||||
type firewallPort map[int32]*FirewallCA
|
||||
|
||||
type firewallLocalCIDR struct {
|
||||
Any bool
|
||||
LocalCIDR *bart.Table[struct{}]
|
||||
}
|
||||
|
||||
// NewFirewall creates a new Firewall object. A TimerWheel is created for you from the provided timeouts.
|
||||
func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.Duration, c *cert.NebulaCertificate) *Firewall {
|
||||
//TODO: error on 0 duration
|
||||
@@ -137,13 +144,28 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D
|
||||
max = defaultTimeout
|
||||
}
|
||||
|
||||
localIps := cidr.NewTree4()
|
||||
localIps := new(bart.Table[struct{}])
|
||||
var assignedCIDR netip.Prefix
|
||||
var assignedSet bool
|
||||
for _, ip := range c.Details.Ips {
|
||||
localIps.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{})
|
||||
//TODO: IPV6-WORK the unmap is a bit unfortunate
|
||||
nip, _ := netip.AddrFromSlice(ip.IP)
|
||||
nip = nip.Unmap()
|
||||
nprefix := netip.PrefixFrom(nip, nip.BitLen())
|
||||
localIps.Insert(nprefix, struct{}{})
|
||||
|
||||
if !assignedSet {
|
||||
// Only grabbing the first one in the cert since any more than that currently has undefined behavior
|
||||
assignedCIDR = nprefix
|
||||
assignedSet = true
|
||||
}
|
||||
}
|
||||
|
||||
for _, n := range c.Details.Subnets {
|
||||
localIps.AddCIDR(n, struct{}{})
|
||||
nip, _ := netip.AddrFromSlice(n.IP)
|
||||
ones, _ := n.Mask.Size()
|
||||
nip = nip.Unmap()
|
||||
localIps.Insert(netip.PrefixFrom(nip, ones), struct{}{})
|
||||
}
|
||||
|
||||
return &Firewall{
|
||||
@@ -157,9 +179,10 @@ func NewFirewall(l *logrus.Logger, tcpTimeout, UDPTimeout, defaultTimeout time.D
|
||||
UDPTimeout: UDPTimeout,
|
||||
DefaultTimeout: defaultTimeout,
|
||||
localIps: localIps,
|
||||
assignedCIDR: assignedCIDR,
|
||||
hasSubnets: len(c.Details.Subnets) > 0,
|
||||
l: l,
|
||||
|
||||
metricTCPRTT: metrics.GetOrRegisterHistogram("network.tcp.rtt", nil, metrics.NewExpDecaySample(1028, 0.015)),
|
||||
incomingMetrics: firewallMetrics{
|
||||
droppedLocalIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.local_ip", nil),
|
||||
droppedRemoteIP: metrics.GetOrRegisterCounter("firewall.incoming.dropped.remote_ip", nil),
|
||||
@@ -183,6 +206,9 @@ func NewFirewallFromConfig(l *logrus.Logger, nc *cert.NebulaCertificate, c *conf
|
||||
//TODO: max_connections
|
||||
)
|
||||
|
||||
//TODO: Flip to false after v1.9 release
|
||||
fw.defaultLocalCIDRAny = c.GetBool("firewall.default_local_cidr_any", true)
|
||||
|
||||
inboundAction := c.GetString("firewall.inbound_action", "drop")
|
||||
switch inboundAction {
|
||||
case "reject":
|
||||
@@ -219,15 +245,15 @@ func NewFirewallFromConfig(l *logrus.Logger, nc *cert.NebulaCertificate, c *conf
|
||||
}
|
||||
|
||||
// AddRule properly creates the in memory rule structure for a firewall table.
|
||||
func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error {
|
||||
func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip, localIp netip.Prefix, caName string, caSha string) error {
|
||||
// Under gomobile, stringing a nil pointer with fmt causes an abort in debug mode for iOS
|
||||
// https://github.com/golang/go/issues/14131
|
||||
sIp := ""
|
||||
if ip != nil {
|
||||
if ip.IsValid() {
|
||||
sIp = ip.String()
|
||||
}
|
||||
lIp := ""
|
||||
if localIp != nil {
|
||||
if localIp.IsValid() {
|
||||
lIp = localIp.String()
|
||||
}
|
||||
|
||||
@@ -269,7 +295,7 @@ func (f *Firewall) AddRule(incoming bool, proto uint8, startPort int32, endPort
|
||||
return fmt.Errorf("unknown protocol %v", proto)
|
||||
}
|
||||
|
||||
return fp.addRule(startPort, endPort, groups, host, ip, localIp, caName, caSha)
|
||||
return fp.addRule(f, startPort, endPort, groups, host, ip, localIp, caName, caSha)
|
||||
}
|
||||
|
||||
// GetRuleHash returns a hash representation of all inbound and outbound rules
|
||||
@@ -278,6 +304,18 @@ func (f *Firewall) GetRuleHash() string {
|
||||
return hex.EncodeToString(sum[:])
|
||||
}
|
||||
|
||||
// GetRuleHashFNV returns a uint32 FNV-1 hash representation the rules, for use as a metric value
|
||||
func (f *Firewall) GetRuleHashFNV() uint32 {
|
||||
h := fnv.New32a()
|
||||
h.Write([]byte(f.rules))
|
||||
return h.Sum32()
|
||||
}
|
||||
|
||||
// GetRuleHashes returns both the sha256 and FNV-1 hashes, suitable for logging
|
||||
func (f *Firewall) GetRuleHashes() string {
|
||||
return "SHA:" + f.GetRuleHash() + ",FNV:" + strconv.FormatUint(uint64(f.GetRuleHashFNV()), 10)
|
||||
}
|
||||
|
||||
func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw FirewallInterface) error {
|
||||
var table string
|
||||
if inbound {
|
||||
@@ -352,17 +390,17 @@ func AddFirewallRulesFromConfig(l *logrus.Logger, inbound bool, c *config.C, fw
|
||||
return fmt.Errorf("%s rule #%v; proto was not understood; `%s`", table, i, r.Proto)
|
||||
}
|
||||
|
||||
var cidr *net.IPNet
|
||||
var cidr netip.Prefix
|
||||
if r.Cidr != "" {
|
||||
_, cidr, err = net.ParseCIDR(r.Cidr)
|
||||
cidr, err = netip.ParsePrefix(r.Cidr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s rule #%v; cidr did not parse; %s", table, i, err)
|
||||
}
|
||||
}
|
||||
|
||||
var localCidr *net.IPNet
|
||||
var localCidr netip.Prefix
|
||||
if r.LocalCidr != "" {
|
||||
_, localCidr, err = net.ParseCIDR(r.LocalCidr)
|
||||
localCidr, err = netip.ParsePrefix(r.LocalCidr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%s rule #%v; local_cidr did not parse; %s", table, i, err)
|
||||
}
|
||||
@@ -383,15 +421,17 @@ var ErrNoMatchingRule = errors.New("no matching rule in firewall table")
|
||||
|
||||
// Drop returns an error if the packet should be dropped, explaining why. It
|
||||
// returns nil if the packet should not be dropped.
|
||||
func (f *Firewall) Drop(packet []byte, fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) error {
|
||||
func (f *Firewall) Drop(fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) error {
|
||||
// Check if we spoke to this tuple, if we did then allow this packet
|
||||
if f.inConns(packet, fp, incoming, h, caPool, localCache) {
|
||||
if f.inConns(fp, h, caPool, localCache) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make sure remote address matches nebula certificate
|
||||
if remoteCidr := h.remoteCidr; remoteCidr != nil {
|
||||
if remoteCidr.Contains(fp.RemoteIP) == nil {
|
||||
//TODO: this would be better if we had a least specific match lookup, could waste time here, need to benchmark since the algo is different
|
||||
_, ok := remoteCidr.Lookup(fp.RemoteIP)
|
||||
if !ok {
|
||||
f.metrics(incoming).droppedRemoteIP.Inc(1)
|
||||
return ErrInvalidRemoteIP
|
||||
}
|
||||
@@ -404,7 +444,9 @@ func (f *Firewall) Drop(packet []byte, fp firewall.Packet, incoming bool, h *Hos
|
||||
}
|
||||
|
||||
// Make sure we are supposed to be handling this local ip address
|
||||
if f.localIps.Contains(fp.LocalIP) == nil {
|
||||
//TODO: this would be better if we had a least specific match lookup, could waste time here, need to benchmark since the algo is different
|
||||
_, ok := f.localIps.Lookup(fp.LocalIP)
|
||||
if !ok {
|
||||
f.metrics(incoming).droppedLocalIP.Inc(1)
|
||||
return ErrInvalidLocalIP
|
||||
}
|
||||
@@ -421,7 +463,7 @@ func (f *Firewall) Drop(packet []byte, fp firewall.Packet, incoming bool, h *Hos
|
||||
}
|
||||
|
||||
// We always want to conntrack since it is a faster operation
|
||||
f.addConn(packet, fp, incoming)
|
||||
f.addConn(fp, incoming)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -447,9 +489,10 @@ func (f *Firewall) EmitStats() {
|
||||
conntrack.Unlock()
|
||||
metrics.GetOrRegisterGauge("firewall.conntrack.count", nil).Update(int64(conntrackCount))
|
||||
metrics.GetOrRegisterGauge("firewall.rules.version", nil).Update(int64(f.rulesVersion))
|
||||
metrics.GetOrRegisterGauge("firewall.rules.hash", nil).Update(int64(f.GetRuleHashFNV()))
|
||||
}
|
||||
|
||||
func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) bool {
|
||||
func (f *Firewall) inConns(fp firewall.Packet, h *HostInfo, caPool *cert.NebulaCAPool, localCache firewall.ConntrackCache) bool {
|
||||
if localCache != nil {
|
||||
if _, ok := localCache[fp]; ok {
|
||||
return true
|
||||
@@ -509,11 +552,6 @@ func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *
|
||||
switch fp.Protocol {
|
||||
case firewall.ProtoTCP:
|
||||
c.Expires = time.Now().Add(f.TCPTimeout)
|
||||
if incoming {
|
||||
f.checkTCPRTT(c, packet)
|
||||
} else {
|
||||
setTCPRTTTracking(c, packet)
|
||||
}
|
||||
case firewall.ProtoUDP:
|
||||
c.Expires = time.Now().Add(f.UDPTimeout)
|
||||
default:
|
||||
@@ -529,16 +567,13 @@ func (f *Firewall) inConns(packet []byte, fp firewall.Packet, incoming bool, h *
|
||||
return true
|
||||
}
|
||||
|
||||
func (f *Firewall) addConn(packet []byte, fp firewall.Packet, incoming bool) {
|
||||
func (f *Firewall) addConn(fp firewall.Packet, incoming bool) {
|
||||
var timeout time.Duration
|
||||
c := &conn{}
|
||||
|
||||
switch fp.Protocol {
|
||||
case firewall.ProtoTCP:
|
||||
timeout = f.TCPTimeout
|
||||
if !incoming {
|
||||
setTCPRTTTracking(c, packet)
|
||||
}
|
||||
case firewall.ProtoUDP:
|
||||
timeout = f.UDPTimeout
|
||||
default:
|
||||
@@ -564,7 +599,6 @@ func (f *Firewall) addConn(packet []byte, fp firewall.Packet, incoming bool) {
|
||||
// Evict checks if a conntrack entry has expired, if so it is removed, if not it is re-added to the wheel
|
||||
// Caller must own the connMutex lock!
|
||||
func (f *Firewall) evict(p firewall.Packet) {
|
||||
//TODO: report a stat if the tcp rtt tracking was never resolved?
|
||||
// Are we still tracking this conn?
|
||||
conntrack := f.Conntrack
|
||||
t, ok := conntrack.Conns[p]
|
||||
@@ -608,7 +642,7 @@ func (ft *FirewallTable) match(p firewall.Packet, incoming bool, c *cert.NebulaC
|
||||
return false
|
||||
}
|
||||
|
||||
func (fp firewallPort) addRule(startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error {
|
||||
func (fp firewallPort) addRule(f *Firewall, startPort int32, endPort int32, groups []string, host string, ip, localIp netip.Prefix, caName string, caSha string) error {
|
||||
if startPort > endPort {
|
||||
return fmt.Errorf("start port was lower than end port")
|
||||
}
|
||||
@@ -621,7 +655,7 @@ func (fp firewallPort) addRule(startPort int32, endPort int32, groups []string,
|
||||
}
|
||||
}
|
||||
|
||||
if err := fp[i].addRule(groups, host, ip, localIp, caName, caSha); err != nil {
|
||||
if err := fp[i].addRule(f, groups, host, ip, localIp, caName, caSha); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -652,13 +686,12 @@ func (fp firewallPort) match(p firewall.Packet, incoming bool, c *cert.NebulaCer
|
||||
return fp[firewall.PortAny].match(p, c, caPool)
|
||||
}
|
||||
|
||||
func (fc *FirewallCA) addRule(groups []string, host string, ip, localIp *net.IPNet, caName, caSha string) error {
|
||||
func (fc *FirewallCA) addRule(f *Firewall, groups []string, host string, ip, localIp netip.Prefix, caName, caSha string) error {
|
||||
fr := func() *FirewallRule {
|
||||
return &FirewallRule{
|
||||
Hosts: make(map[string]struct{}),
|
||||
Groups: make([][]string, 0),
|
||||
CIDR: cidr.NewTree4(),
|
||||
LocalCIDR: cidr.NewTree4(),
|
||||
Hosts: make(map[string]*firewallLocalCIDR),
|
||||
Groups: make([]*firewallGroups, 0),
|
||||
CIDR: new(bart.Table[*firewallLocalCIDR]),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -667,14 +700,14 @@ func (fc *FirewallCA) addRule(groups []string, host string, ip, localIp *net.IPN
|
||||
fc.Any = fr()
|
||||
}
|
||||
|
||||
return fc.Any.addRule(groups, host, ip, localIp)
|
||||
return fc.Any.addRule(f, groups, host, ip, localIp)
|
||||
}
|
||||
|
||||
if caSha != "" {
|
||||
if _, ok := fc.CAShas[caSha]; !ok {
|
||||
fc.CAShas[caSha] = fr()
|
||||
}
|
||||
err := fc.CAShas[caSha].addRule(groups, host, ip, localIp)
|
||||
err := fc.CAShas[caSha].addRule(f, groups, host, ip, localIp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -684,7 +717,7 @@ func (fc *FirewallCA) addRule(groups []string, host string, ip, localIp *net.IPN
|
||||
if _, ok := fc.CANames[caName]; !ok {
|
||||
fc.CANames[caName] = fr()
|
||||
}
|
||||
err := fc.CANames[caName].addRule(groups, host, ip, localIp)
|
||||
err := fc.CANames[caName].addRule(f, groups, host, ip, localIp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -716,41 +749,63 @@ func (fc *FirewallCA) match(p firewall.Packet, c *cert.NebulaCertificate, caPool
|
||||
return fc.CANames[s.Details.Name].match(p, c)
|
||||
}
|
||||
|
||||
func (fr *FirewallRule) addRule(groups []string, host string, ip *net.IPNet, localIp *net.IPNet) error {
|
||||
if fr.Any {
|
||||
return nil
|
||||
func (fr *FirewallRule) addRule(f *Firewall, groups []string, host string, ip, localCIDR netip.Prefix) error {
|
||||
flc := func() *firewallLocalCIDR {
|
||||
return &firewallLocalCIDR{
|
||||
LocalCIDR: new(bart.Table[struct{}]),
|
||||
}
|
||||
}
|
||||
|
||||
if fr.isAny(groups, host, ip, localIp) {
|
||||
fr.Any = true
|
||||
// If it's any we need to wipe out any pre-existing rules to save on memory
|
||||
fr.Groups = make([][]string, 0)
|
||||
fr.Hosts = make(map[string]struct{})
|
||||
fr.CIDR = cidr.NewTree4()
|
||||
fr.LocalCIDR = cidr.NewTree4()
|
||||
} else {
|
||||
if len(groups) > 0 {
|
||||
fr.Groups = append(fr.Groups, groups)
|
||||
if fr.isAny(groups, host, ip) {
|
||||
if fr.Any == nil {
|
||||
fr.Any = flc()
|
||||
}
|
||||
|
||||
if host != "" {
|
||||
fr.Hosts[host] = struct{}{}
|
||||
return fr.Any.addRule(f, localCIDR)
|
||||
}
|
||||
|
||||
if len(groups) > 0 {
|
||||
nlc := flc()
|
||||
err := nlc.addRule(f, localCIDR)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ip != nil {
|
||||
fr.CIDR.AddCIDR(ip, struct{}{})
|
||||
}
|
||||
fr.Groups = append(fr.Groups, &firewallGroups{
|
||||
Groups: groups,
|
||||
LocalCIDR: nlc,
|
||||
})
|
||||
}
|
||||
|
||||
if localIp != nil {
|
||||
fr.LocalCIDR.AddCIDR(localIp, struct{}{})
|
||||
if host != "" {
|
||||
nlc := fr.Hosts[host]
|
||||
if nlc == nil {
|
||||
nlc = flc()
|
||||
}
|
||||
err := nlc.addRule(f, localCIDR)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fr.Hosts[host] = nlc
|
||||
}
|
||||
|
||||
if ip.IsValid() {
|
||||
nlc, _ := fr.CIDR.Get(ip)
|
||||
if nlc == nil {
|
||||
nlc = flc()
|
||||
}
|
||||
err := nlc.addRule(f, localCIDR)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fr.CIDR.Insert(ip, nlc)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fr *FirewallRule) isAny(groups []string, host string, ip, localIp *net.IPNet) bool {
|
||||
if len(groups) == 0 && host == "" && ip == nil && localIp == nil {
|
||||
func (fr *FirewallRule) isAny(groups []string, host string, ip netip.Prefix) bool {
|
||||
if len(groups) == 0 && host == "" && !ip.IsValid() {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -764,11 +819,7 @@ func (fr *FirewallRule) isAny(groups []string, host string, ip, localIp *net.IPN
|
||||
return true
|
||||
}
|
||||
|
||||
if ip != nil && ip.Contains(net.IPv4(0, 0, 0, 0)) {
|
||||
return true
|
||||
}
|
||||
|
||||
if localIp != nil && localIp.Contains(net.IPv4(0, 0, 0, 0)) {
|
||||
if ip.IsValid() && ip.Bits() == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -781,7 +832,7 @@ func (fr *FirewallRule) match(p firewall.Packet, c *cert.NebulaCertificate) bool
|
||||
}
|
||||
|
||||
// Shortcut path for if groups, hosts, or cidr contained an `any`
|
||||
if fr.Any {
|
||||
if fr.Any.match(p, c) {
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -789,7 +840,7 @@ func (fr *FirewallRule) match(p firewall.Packet, c *cert.NebulaCertificate) bool
|
||||
for _, sg := range fr.Groups {
|
||||
found := false
|
||||
|
||||
for _, g := range sg {
|
||||
for _, g := range sg.Groups {
|
||||
if _, ok := c.Details.InvertedGroups[g]; !ok {
|
||||
found = false
|
||||
break
|
||||
@@ -798,27 +849,58 @@ func (fr *FirewallRule) match(p firewall.Packet, c *cert.NebulaCertificate) bool
|
||||
found = true
|
||||
}
|
||||
|
||||
if found {
|
||||
if found && sg.LocalCIDR.match(p, c) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if fr.Hosts != nil {
|
||||
if _, ok := fr.Hosts[c.Details.Name]; ok {
|
||||
return true
|
||||
if flc, ok := fr.Hosts[c.Details.Name]; ok {
|
||||
if flc.match(p, c) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if fr.CIDR != nil && fr.CIDR.Contains(p.RemoteIP) != nil {
|
||||
matched := false
|
||||
prefix := netip.PrefixFrom(p.RemoteIP, p.RemoteIP.BitLen())
|
||||
fr.CIDR.EachLookupPrefix(prefix, func(prefix netip.Prefix, val *firewallLocalCIDR) bool {
|
||||
if prefix.Contains(p.RemoteIP) && val.match(p, c) {
|
||||
matched = true
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
return matched
|
||||
}
|
||||
|
||||
func (flc *firewallLocalCIDR) addRule(f *Firewall, localIp netip.Prefix) error {
|
||||
if !localIp.IsValid() {
|
||||
if !f.hasSubnets || f.defaultLocalCIDRAny {
|
||||
flc.Any = true
|
||||
return nil
|
||||
}
|
||||
|
||||
localIp = f.assignedCIDR
|
||||
} else if localIp.Bits() == 0 {
|
||||
flc.Any = true
|
||||
}
|
||||
|
||||
flc.LocalCIDR.Insert(localIp, struct{}{})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (flc *firewallLocalCIDR) match(p firewall.Packet, c *cert.NebulaCertificate) bool {
|
||||
if flc == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if flc.Any {
|
||||
return true
|
||||
}
|
||||
|
||||
if fr.LocalCIDR != nil && fr.LocalCIDR.Contains(p.LocalIP) != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
// No host, group, or cidr matched, bye bye
|
||||
return false
|
||||
_, ok := flc.LocalCIDR.Lookup(p.LocalIP)
|
||||
return ok
|
||||
}
|
||||
|
||||
type rule struct {
|
||||
@@ -934,42 +1016,3 @@ func parsePort(s string) (startPort, endPort int32, err error) {
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: write tests for these
|
||||
func setTCPRTTTracking(c *conn, p []byte) {
|
||||
if c.Seq != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
ihl := int(p[0]&0x0f) << 2
|
||||
|
||||
// Don't track FIN packets
|
||||
if p[ihl+13]&tcpFIN != 0 {
|
||||
return
|
||||
}
|
||||
|
||||
c.Seq = binary.BigEndian.Uint32(p[ihl+4 : ihl+8])
|
||||
c.Sent = time.Now()
|
||||
}
|
||||
|
||||
func (f *Firewall) checkTCPRTT(c *conn, p []byte) bool {
|
||||
if c.Seq == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
ihl := int(p[0]&0x0f) << 2
|
||||
if p[ihl+13]&tcpACK == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Deal with wrap around, signed int cuts the ack window in half
|
||||
// 0 is a bad ack, no data acknowledged
|
||||
// positive number is a bad ack, ack is over half the window away
|
||||
if int32(c.Seq-binary.BigEndian.Uint32(p[ihl+8:ihl+12])) >= 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
f.metricTCPRTT.Update(time.Since(c.Sent).Nanoseconds())
|
||||
c.Seq = 0
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -3,8 +3,7 @@ package firewall
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"net/netip"
|
||||
)
|
||||
|
||||
type m map[string]interface{}
|
||||
@@ -20,8 +19,8 @@ const (
|
||||
)
|
||||
|
||||
type Packet struct {
|
||||
LocalIP iputil.VpnIp
|
||||
RemoteIP iputil.VpnIp
|
||||
LocalIP netip.Addr
|
||||
RemoteIP netip.Addr
|
||||
LocalPort uint16
|
||||
RemotePort uint16
|
||||
Protocol uint8
|
||||
|
||||
462
firewall_test.go
462
firewall_test.go
@@ -2,18 +2,16 @@ package nebula
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math"
|
||||
"net"
|
||||
"net/netip"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/firewall"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -67,77 +65,62 @@ func TestFirewall_AddRule(t *testing.T) {
|
||||
assert.NotNil(t, fw.InRules)
|
||||
assert.NotNil(t, fw.OutRules)
|
||||
|
||||
_, ti, _ := net.ParseCIDR("1.2.3.4/32")
|
||||
ti, err := netip.ParsePrefix("1.2.3.4/32")
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", nil, nil, "", ""))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoTCP, 1, 1, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
// An empty rule is any
|
||||
assert.True(t, fw.InRules.TCP[1].Any.Any)
|
||||
assert.True(t, fw.InRules.TCP[1].Any.Any.Any)
|
||||
assert.Empty(t, fw.InRules.TCP[1].Any.Groups)
|
||||
assert.Empty(t, fw.InRules.TCP[1].Any.Hosts)
|
||||
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "", ""))
|
||||
assert.False(t, fw.InRules.UDP[1].Any.Any)
|
||||
assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0], "g1")
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
assert.Nil(t, fw.InRules.UDP[1].Any.Any)
|
||||
assert.Contains(t, fw.InRules.UDP[1].Any.Groups[0].Groups, "g1")
|
||||
assert.Empty(t, fw.InRules.UDP[1].Any.Hosts)
|
||||
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoICMP, 1, 1, []string{}, "h1", nil, nil, "", ""))
|
||||
assert.False(t, fw.InRules.ICMP[1].Any.Any)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoICMP, 1, 1, []string{}, "h1", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
assert.Nil(t, fw.InRules.ICMP[1].Any.Any)
|
||||
assert.Empty(t, fw.InRules.ICMP[1].Any.Groups)
|
||||
assert.Contains(t, fw.InRules.ICMP[1].Any.Hosts, "h1")
|
||||
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", ti, nil, "", ""))
|
||||
assert.False(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups)
|
||||
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts)
|
||||
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP)))
|
||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", ti, netip.Prefix{}, "", ""))
|
||||
assert.Nil(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||
_, ok := fw.OutRules.AnyProto[1].Any.CIDR.Get(ti)
|
||||
assert.True(t, ok)
|
||||
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", nil, ti, "", ""))
|
||||
assert.False(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Groups)
|
||||
assert.Empty(t, fw.OutRules.AnyProto[1].Any.Hosts)
|
||||
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.LocalCIDR.Match(iputil.Ip2VpnIp(ti.IP)))
|
||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 1, 1, []string{}, "", netip.Prefix{}, ti, "", ""))
|
||||
assert.NotNil(t, fw.OutRules.AnyProto[1].Any.Any)
|
||||
_, ok = fw.OutRules.AnyProto[1].Any.Any.LocalCIDR.Get(ti)
|
||||
assert.True(t, ok)
|
||||
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "ca-name", ""))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", netip.Prefix{}, netip.Prefix{}, "ca-name", ""))
|
||||
assert.Contains(t, fw.InRules.UDP[1].CANames, "ca-name")
|
||||
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", nil, nil, "", "ca-sha"))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoUDP, 1, 1, []string{"g1"}, "", netip.Prefix{}, netip.Prefix{}, "", "ca-sha"))
|
||||
assert.Contains(t, fw.InRules.UDP[1].CAShas, "ca-sha")
|
||||
|
||||
// Set any and clear fields
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"g1", "g2"}, "h1", ti, ti, "", ""))
|
||||
assert.Equal(t, []string{"g1", "g2"}, fw.OutRules.AnyProto[0].Any.Groups[0])
|
||||
assert.Contains(t, fw.OutRules.AnyProto[0].Any.Hosts, "h1")
|
||||
assert.NotNil(t, fw.OutRules.AnyProto[0].Any.CIDR.Match(iputil.Ip2VpnIp(ti.IP)))
|
||||
assert.NotNil(t, fw.OutRules.AnyProto[0].Any.LocalCIDR.Match(iputil.Ip2VpnIp(ti.IP)))
|
||||
|
||||
// run twice just to make sure
|
||||
//TODO: these ANY rules should clear the CA firewall portion
|
||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", ""))
|
||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, nil, "", ""))
|
||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
||||
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Groups)
|
||||
assert.Empty(t, fw.OutRules.AnyProto[0].Any.Hosts)
|
||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any.Any)
|
||||
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "any", nil, nil, "", ""))
|
||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
||||
anyIp, err := netip.ParsePrefix("0.0.0.0/0")
|
||||
assert.NoError(t, err)
|
||||
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
_, anyIp, _ := net.ParseCIDR("0.0.0.0/0")
|
||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, nil, "", ""))
|
||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any)
|
||||
assert.Nil(t, fw.AddRule(false, firewall.ProtoAny, 0, 0, []string{}, "", anyIp, netip.Prefix{}, "", ""))
|
||||
assert.True(t, fw.OutRules.AnyProto[0].Any.Any.Any)
|
||||
|
||||
// Test error conditions
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, c)
|
||||
assert.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", nil, nil, "", ""))
|
||||
assert.Error(t, fw.AddRule(true, firewall.ProtoAny, 10, 0, []string{}, "", nil, nil, "", ""))
|
||||
assert.Error(t, fw.AddRule(true, math.MaxUint8, 0, 0, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
assert.Error(t, fw.AddRule(true, firewall.ProtoAny, 10, 0, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
}
|
||||
|
||||
func TestFirewall_Drop(t *testing.T) {
|
||||
@@ -146,8 +129,8 @@ func TestFirewall_Drop(t *testing.T) {
|
||||
l.SetOutput(ob)
|
||||
|
||||
p := firewall.Packet{
|
||||
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||
LocalIP: netip.MustParseAddr("1.2.3.4"),
|
||||
RemoteIP: netip.MustParseAddr("1.2.3.4"),
|
||||
LocalPort: 10,
|
||||
RemotePort: 90,
|
||||
Protocol: firewall.ProtoUDP,
|
||||
@@ -172,83 +155,91 @@ func TestFirewall_Drop(t *testing.T) {
|
||||
ConnectionState: &ConnectionState{
|
||||
peerCert: &c,
|
||||
},
|
||||
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||
vpnIp: netip.MustParseAddr("1.2.3.4"),
|
||||
}
|
||||
h.CreateRemoteCIDR(&c)
|
||||
|
||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", ""))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
cp := cert.NewCAPool()
|
||||
|
||||
// Drop outbound
|
||||
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule)
|
||||
assert.Equal(t, ErrNoMatchingRule, fw.Drop(p, false, &h, cp, nil))
|
||||
// Allow inbound
|
||||
resetConntrack(fw)
|
||||
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||
assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||
// Allow outbound because conntrack
|
||||
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil))
|
||||
assert.NoError(t, fw.Drop(p, false, &h, cp, nil))
|
||||
|
||||
// test remote mismatch
|
||||
oldRemote := p.RemoteIP
|
||||
p.RemoteIP = iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 10))
|
||||
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrInvalidRemoteIP)
|
||||
p.RemoteIP = netip.MustParseAddr("1.2.3.10")
|
||||
assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrInvalidRemoteIP)
|
||||
p.RemoteIP = oldRemote
|
||||
|
||||
// ensure signer doesn't get in the way of group checks
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "", "signer-shasum"))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "", "signer-shasum-bad"))
|
||||
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum"))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum-bad"))
|
||||
assert.Equal(t, fw.Drop(p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||
|
||||
// test caSha doesn't drop on match
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "", "signer-shasum-bad"))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "", "signer-shasum"))
|
||||
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum-bad"))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-shasum"))
|
||||
assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||
|
||||
// ensure ca name doesn't get in the way of group checks
|
||||
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "ca-good", ""))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "ca-good-bad", ""))
|
||||
assert.Equal(t, fw.Drop([]byte{}, p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good", ""))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good-bad", ""))
|
||||
assert.Equal(t, fw.Drop(p, true, &h, cp, nil), ErrNoMatchingRule)
|
||||
|
||||
// test caName doesn't drop on match
|
||||
cp.CAs["signer-shasum"] = &cert.NebulaCertificate{Details: cert.NebulaCertificateDetails{Name: "ca-good"}}
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", nil, nil, "ca-good-bad", ""))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", nil, nil, "ca-good", ""))
|
||||
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"nope"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good-bad", ""))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group"}, "", netip.Prefix{}, netip.Prefix{}, "ca-good", ""))
|
||||
assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||
}
|
||||
|
||||
func BenchmarkFirewallTable_match(b *testing.B) {
|
||||
f := &Firewall{}
|
||||
ft := FirewallTable{
|
||||
TCP: firewallPort{},
|
||||
}
|
||||
|
||||
_, n, _ := net.ParseCIDR("172.1.1.1/32")
|
||||
_ = ft.TCP.addRule(10, 10, []string{"good-group"}, "good-host", n, n, "", "")
|
||||
_ = ft.TCP.addRule(10, 10, []string{"good-group2"}, "good-host", n, n, "", "")
|
||||
_ = ft.TCP.addRule(10, 10, []string{"good-group3"}, "good-host", n, n, "", "")
|
||||
_ = ft.TCP.addRule(10, 10, []string{"good-group4"}, "good-host", n, n, "", "")
|
||||
_ = ft.TCP.addRule(10, 10, []string{"good-group, good-group1"}, "good-host", n, n, "", "")
|
||||
pfix := netip.MustParsePrefix("172.1.1.1/32")
|
||||
_ = ft.TCP.addRule(f, 10, 10, []string{"good-group"}, "good-host", pfix, netip.Prefix{}, "", "")
|
||||
_ = ft.TCP.addRule(f, 100, 100, []string{"good-group"}, "good-host", netip.Prefix{}, pfix, "", "")
|
||||
cp := cert.NewCAPool()
|
||||
|
||||
b.Run("fail on proto", func(b *testing.B) {
|
||||
// This benchmark is showing us the cost of failing to match the protocol
|
||||
c := &cert.NebulaCertificate{}
|
||||
for n := 0; n < b.N; n++ {
|
||||
ft.match(firewall.Packet{Protocol: firewall.ProtoUDP}, true, c, cp)
|
||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoUDP}, true, c, cp))
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("fail on port", func(b *testing.B) {
|
||||
b.Run("pass proto, fail on port", func(b *testing.B) {
|
||||
// This benchmark is showing us the cost of matching a specific protocol but failing to match the port
|
||||
c := &cert.NebulaCertificate{}
|
||||
for n := 0; n < b.N; n++ {
|
||||
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 1}, true, c, cp)
|
||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 1}, true, c, cp))
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("fail all group, name, and cidr", func(b *testing.B) {
|
||||
b.Run("pass proto, port, fail on local CIDR", func(b *testing.B) {
|
||||
c := &cert.NebulaCertificate{}
|
||||
ip := netip.MustParsePrefix("9.254.254.254/32")
|
||||
for n := 0; n < b.N; n++ {
|
||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: ip.Addr()}, true, c, cp))
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("pass proto, port, any local CIDR, fail all group, name, and cidr", func(b *testing.B) {
|
||||
_, ip, _ := net.ParseCIDR("9.254.254.254/32")
|
||||
c := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
@@ -258,11 +249,25 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
||||
},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp)
|
||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp))
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("pass on group", func(b *testing.B) {
|
||||
b.Run("pass proto, port, specific local CIDR, fail all group, name, and cidr", func(b *testing.B) {
|
||||
_, ip, _ := net.ParseCIDR("9.254.254.254/32")
|
||||
c := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||
Name: "nope",
|
||||
Ips: []*net.IPNet{ip},
|
||||
},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
assert.False(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: pfix.Addr()}, true, c, cp))
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("pass on group on any local cidr", func(b *testing.B) {
|
||||
c := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
InvertedGroups: map[string]struct{}{"good-group": {}},
|
||||
@@ -270,7 +275,19 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
||||
},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp)
|
||||
assert.True(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp))
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("pass on group on specific local cidr", func(b *testing.B) {
|
||||
c := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
InvertedGroups: map[string]struct{}{"good-group": {}},
|
||||
Name: "nope",
|
||||
},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
assert.True(b, ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: pfix.Addr()}, true, c, cp))
|
||||
}
|
||||
})
|
||||
|
||||
@@ -285,60 +302,60 @@ func BenchmarkFirewallTable_match(b *testing.B) {
|
||||
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10}, true, c, cp)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("pass on ip", func(b *testing.B) {
|
||||
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||
c := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||
Name: "good-host",
|
||||
},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, RemoteIP: ip}, true, c, cp)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("pass on local ip", func(b *testing.B) {
|
||||
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||
c := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||
Name: "good-host",
|
||||
},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, LocalIP: ip}, true, c, cp)
|
||||
}
|
||||
})
|
||||
|
||||
_ = ft.TCP.addRule(0, 0, []string{"good-group"}, "good-host", n, n, "", "")
|
||||
|
||||
b.Run("pass on ip with any port", func(b *testing.B) {
|
||||
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||
c := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||
Name: "good-host",
|
||||
},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, RemoteIP: ip}, true, c, cp)
|
||||
}
|
||||
})
|
||||
|
||||
b.Run("pass on local ip with any port", func(b *testing.B) {
|
||||
ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||
c := &cert.NebulaCertificate{
|
||||
Details: cert.NebulaCertificateDetails{
|
||||
InvertedGroups: map[string]struct{}{"nope": {}},
|
||||
Name: "good-host",
|
||||
},
|
||||
}
|
||||
for n := 0; n < b.N; n++ {
|
||||
ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: ip}, true, c, cp)
|
||||
}
|
||||
})
|
||||
//
|
||||
//b.Run("pass on ip", func(b *testing.B) {
|
||||
// ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||
// c := &cert.NebulaCertificate{
|
||||
// Details: cert.NebulaCertificateDetails{
|
||||
// InvertedGroups: map[string]struct{}{"nope": {}},
|
||||
// Name: "good-host",
|
||||
// },
|
||||
// }
|
||||
// for n := 0; n < b.N; n++ {
|
||||
// ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, RemoteIP: ip}, true, c, cp)
|
||||
// }
|
||||
//})
|
||||
//
|
||||
//b.Run("pass on local ip", func(b *testing.B) {
|
||||
// ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||
// c := &cert.NebulaCertificate{
|
||||
// Details: cert.NebulaCertificateDetails{
|
||||
// InvertedGroups: map[string]struct{}{"nope": {}},
|
||||
// Name: "good-host",
|
||||
// },
|
||||
// }
|
||||
// for n := 0; n < b.N; n++ {
|
||||
// ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 10, LocalIP: ip}, true, c, cp)
|
||||
// }
|
||||
//})
|
||||
//
|
||||
//_ = ft.TCP.addRule(0, 0, []string{"good-group"}, "good-host", n, n, "", "")
|
||||
//
|
||||
//b.Run("pass on ip with any port", func(b *testing.B) {
|
||||
// ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||
// c := &cert.NebulaCertificate{
|
||||
// Details: cert.NebulaCertificateDetails{
|
||||
// InvertedGroups: map[string]struct{}{"nope": {}},
|
||||
// Name: "good-host",
|
||||
// },
|
||||
// }
|
||||
// for n := 0; n < b.N; n++ {
|
||||
// ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, RemoteIP: ip}, true, c, cp)
|
||||
// }
|
||||
//})
|
||||
//
|
||||
//b.Run("pass on local ip with any port", func(b *testing.B) {
|
||||
// ip := iputil.Ip2VpnIp(net.IPv4(172, 1, 1, 1))
|
||||
// c := &cert.NebulaCertificate{
|
||||
// Details: cert.NebulaCertificateDetails{
|
||||
// InvertedGroups: map[string]struct{}{"nope": {}},
|
||||
// Name: "good-host",
|
||||
// },
|
||||
// }
|
||||
// for n := 0; n < b.N; n++ {
|
||||
// ft.match(firewall.Packet{Protocol: firewall.ProtoTCP, LocalPort: 100, LocalIP: ip}, true, c, cp)
|
||||
// }
|
||||
//})
|
||||
}
|
||||
|
||||
func TestFirewall_Drop2(t *testing.T) {
|
||||
@@ -347,8 +364,8 @@ func TestFirewall_Drop2(t *testing.T) {
|
||||
l.SetOutput(ob)
|
||||
|
||||
p := firewall.Packet{
|
||||
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||
LocalIP: netip.MustParseAddr("1.2.3.4"),
|
||||
RemoteIP: netip.MustParseAddr("1.2.3.4"),
|
||||
LocalPort: 10,
|
||||
RemotePort: 90,
|
||||
Protocol: firewall.ProtoUDP,
|
||||
@@ -371,7 +388,7 @@ func TestFirewall_Drop2(t *testing.T) {
|
||||
ConnectionState: &ConnectionState{
|
||||
peerCert: &c,
|
||||
},
|
||||
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||
vpnIp: netip.MustParseAddr(ipNet.IP.String()),
|
||||
}
|
||||
h.CreateRemoteCIDR(&c)
|
||||
|
||||
@@ -390,14 +407,14 @@ func TestFirewall_Drop2(t *testing.T) {
|
||||
h1.CreateRemoteCIDR(&c1)
|
||||
|
||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group", "test-group"}, "", nil, nil, "", ""))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"default-group", "test-group"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
cp := cert.NewCAPool()
|
||||
|
||||
// h1/c1 lacks the proper groups
|
||||
assert.Error(t, fw.Drop([]byte{}, p, true, &h1, cp, nil), ErrNoMatchingRule)
|
||||
assert.Error(t, fw.Drop(p, true, &h1, cp, nil), ErrNoMatchingRule)
|
||||
// c has the proper groups
|
||||
resetConntrack(fw)
|
||||
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||
assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||
}
|
||||
|
||||
func TestFirewall_Drop3(t *testing.T) {
|
||||
@@ -406,8 +423,8 @@ func TestFirewall_Drop3(t *testing.T) {
|
||||
l.SetOutput(ob)
|
||||
|
||||
p := firewall.Packet{
|
||||
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||
LocalIP: netip.MustParseAddr("1.2.3.4"),
|
||||
RemoteIP: netip.MustParseAddr("1.2.3.4"),
|
||||
LocalPort: 1,
|
||||
RemotePort: 1,
|
||||
Protocol: firewall.ProtoUDP,
|
||||
@@ -437,7 +454,7 @@ func TestFirewall_Drop3(t *testing.T) {
|
||||
ConnectionState: &ConnectionState{
|
||||
peerCert: &c1,
|
||||
},
|
||||
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||
vpnIp: netip.MustParseAddr(ipNet.IP.String()),
|
||||
}
|
||||
h1.CreateRemoteCIDR(&c1)
|
||||
|
||||
@@ -452,7 +469,7 @@ func TestFirewall_Drop3(t *testing.T) {
|
||||
ConnectionState: &ConnectionState{
|
||||
peerCert: &c2,
|
||||
},
|
||||
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||
vpnIp: netip.MustParseAddr(ipNet.IP.String()),
|
||||
}
|
||||
h2.CreateRemoteCIDR(&c2)
|
||||
|
||||
@@ -467,23 +484,23 @@ func TestFirewall_Drop3(t *testing.T) {
|
||||
ConnectionState: &ConnectionState{
|
||||
peerCert: &c3,
|
||||
},
|
||||
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||
vpnIp: netip.MustParseAddr(ipNet.IP.String()),
|
||||
}
|
||||
h3.CreateRemoteCIDR(&c3)
|
||||
|
||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "host1", nil, nil, "", ""))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", nil, nil, "", "signer-sha"))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "host1", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 1, 1, []string{}, "", netip.Prefix{}, netip.Prefix{}, "", "signer-sha"))
|
||||
cp := cert.NewCAPool()
|
||||
|
||||
// c1 should pass because host match
|
||||
assert.NoError(t, fw.Drop([]byte{}, p, true, &h1, cp, nil))
|
||||
assert.NoError(t, fw.Drop(p, true, &h1, cp, nil))
|
||||
// c2 should pass because ca sha match
|
||||
resetConntrack(fw)
|
||||
assert.NoError(t, fw.Drop([]byte{}, p, true, &h2, cp, nil))
|
||||
assert.NoError(t, fw.Drop(p, true, &h2, cp, nil))
|
||||
// c3 should fail because no match
|
||||
resetConntrack(fw)
|
||||
assert.Equal(t, fw.Drop([]byte{}, p, true, &h3, cp, nil), ErrNoMatchingRule)
|
||||
assert.Equal(t, fw.Drop(p, true, &h3, cp, nil), ErrNoMatchingRule)
|
||||
}
|
||||
|
||||
func TestFirewall_DropConntrackReload(t *testing.T) {
|
||||
@@ -492,8 +509,8 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
|
||||
l.SetOutput(ob)
|
||||
|
||||
p := firewall.Packet{
|
||||
LocalIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||
RemoteIP: iputil.Ip2VpnIp(net.IPv4(1, 2, 3, 4)),
|
||||
LocalIP: netip.MustParseAddr("1.2.3.4"),
|
||||
RemoteIP: netip.MustParseAddr("1.2.3.4"),
|
||||
LocalPort: 10,
|
||||
RemotePort: 90,
|
||||
Protocol: firewall.ProtoUDP,
|
||||
@@ -518,39 +535,39 @@ func TestFirewall_DropConntrackReload(t *testing.T) {
|
||||
ConnectionState: &ConnectionState{
|
||||
peerCert: &c,
|
||||
},
|
||||
vpnIp: iputil.Ip2VpnIp(ipNet.IP),
|
||||
vpnIp: netip.MustParseAddr(ipNet.IP.String()),
|
||||
}
|
||||
h.CreateRemoteCIDR(&c)
|
||||
|
||||
fw := NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", nil, nil, "", ""))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 0, 0, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
cp := cert.NewCAPool()
|
||||
|
||||
// Drop outbound
|
||||
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule)
|
||||
assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrNoMatchingRule)
|
||||
// Allow inbound
|
||||
resetConntrack(fw)
|
||||
assert.NoError(t, fw.Drop([]byte{}, p, true, &h, cp, nil))
|
||||
assert.NoError(t, fw.Drop(p, true, &h, cp, nil))
|
||||
// Allow outbound because conntrack
|
||||
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil))
|
||||
assert.NoError(t, fw.Drop(p, false, &h, cp, nil))
|
||||
|
||||
oldFw := fw
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 10, 10, []string{"any"}, "", nil, nil, "", ""))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 10, 10, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
fw.Conntrack = oldFw.Conntrack
|
||||
fw.rulesVersion = oldFw.rulesVersion + 1
|
||||
|
||||
// Allow outbound because conntrack and new rules allow port 10
|
||||
assert.NoError(t, fw.Drop([]byte{}, p, false, &h, cp, nil))
|
||||
assert.NoError(t, fw.Drop(p, false, &h, cp, nil))
|
||||
|
||||
oldFw = fw
|
||||
fw = NewFirewall(l, time.Second, time.Minute, time.Hour, &c)
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 11, 11, []string{"any"}, "", nil, nil, "", ""))
|
||||
assert.Nil(t, fw.AddRule(true, firewall.ProtoAny, 11, 11, []string{"any"}, "", netip.Prefix{}, netip.Prefix{}, "", ""))
|
||||
fw.Conntrack = oldFw.Conntrack
|
||||
fw.rulesVersion = oldFw.rulesVersion + 1
|
||||
|
||||
// Drop outbound because conntrack doesn't match new ruleset
|
||||
assert.Equal(t, fw.Drop([]byte{}, p, false, &h, cp, nil), ErrNoMatchingRule)
|
||||
assert.Equal(t, fw.Drop(p, false, &h, cp, nil), ErrNoMatchingRule)
|
||||
}
|
||||
|
||||
func BenchmarkLookup(b *testing.B) {
|
||||
@@ -709,13 +726,13 @@ func TestNewFirewallFromConfig(t *testing.T) {
|
||||
conf = config.NewC(l)
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "cidr": "testh", "proto": "any"}}}
|
||||
_, err = NewFirewallFromConfig(l, c, conf)
|
||||
assert.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; invalid CIDR address: testh")
|
||||
assert.EqualError(t, err, "firewall.outbound rule #0; cidr did not parse; netip.ParsePrefix(\"testh\"): no '/'")
|
||||
|
||||
// Test local_cidr parse error
|
||||
conf = config.NewC(l)
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"code": "1", "local_cidr": "testh", "proto": "any"}}}
|
||||
_, err = NewFirewallFromConfig(l, c, conf)
|
||||
assert.EqualError(t, err, "firewall.outbound rule #0; local_cidr did not parse; invalid CIDR address: testh")
|
||||
assert.EqualError(t, err, "firewall.outbound rule #0; local_cidr did not parse; netip.ParsePrefix(\"testh\"): no '/'")
|
||||
|
||||
// Test both group and groups
|
||||
conf = config.NewC(l)
|
||||
@@ -731,78 +748,78 @@ func TestAddFirewallRulesFromConfig(t *testing.T) {
|
||||
mf := &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "tcp", "host": "a"}}}
|
||||
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall)
|
||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoTCP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||
|
||||
// Test adding udp rule
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "udp", "host": "a"}}}
|
||||
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall)
|
||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoUDP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||
|
||||
// Test adding icmp rule
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"outbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "icmp", "host": "a"}}}
|
||||
assert.Nil(t, AddFirewallRulesFromConfig(l, false, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall)
|
||||
assert.Equal(t, addRuleCall{incoming: false, proto: firewall.ProtoICMP, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||
|
||||
// Test adding any rule
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "host": "a"}}}
|
||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: nil, localIp: nil}, mf.lastCall)
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, host: "a", ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||
|
||||
// Test adding rule with cidr
|
||||
cidr := &net.IPNet{IP: net.ParseIP("10.0.0.0").To4(), Mask: net.IPv4Mask(255, 0, 0, 0)}
|
||||
cidr := netip.MustParsePrefix("10.0.0.0/8")
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "cidr": cidr.String()}}}
|
||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: cidr, localIp: nil}, mf.lastCall)
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: cidr, localIp: netip.Prefix{}}, mf.lastCall)
|
||||
|
||||
// Test adding rule with local_cidr
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "local_cidr": cidr.String()}}}
|
||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: cidr}, mf.lastCall)
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: cidr}, mf.lastCall)
|
||||
|
||||
// Test adding rule with ca_sha
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_sha": "12312313123"}}}
|
||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: nil, caSha: "12312313123"}, mf.lastCall)
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: netip.Prefix{}, caSha: "12312313123"}, mf.lastCall)
|
||||
|
||||
// Test adding rule with ca_name
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "ca_name": "root01"}}}
|
||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: nil, localIp: nil, caName: "root01"}, mf.lastCall)
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: nil, ip: netip.Prefix{}, localIp: netip.Prefix{}, caName: "root01"}, mf.lastCall)
|
||||
|
||||
// Test single group
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "group": "a"}}}
|
||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil, localIp: nil}, mf.lastCall)
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||
|
||||
// Test single groups
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": "a"}}}
|
||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: nil, localIp: nil}, mf.lastCall)
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a"}, ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||
|
||||
// Test multiple AND groups
|
||||
conf = config.NewC(l)
|
||||
mf = &mockFirewall{}
|
||||
conf.Settings["firewall"] = map[interface{}]interface{}{"inbound": []interface{}{map[interface{}]interface{}{"port": "1", "proto": "any", "groups": []string{"a", "b"}}}}
|
||||
assert.Nil(t, AddFirewallRulesFromConfig(l, true, conf, mf))
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: nil, localIp: nil}, mf.lastCall)
|
||||
assert.Equal(t, addRuleCall{incoming: true, proto: firewall.ProtoAny, startPort: 1, endPort: 1, groups: []string{"a", "b"}, ip: netip.Prefix{}, localIp: netip.Prefix{}}, mf.lastCall)
|
||||
|
||||
// Test Add error
|
||||
conf = config.NewC(l)
|
||||
@@ -812,97 +829,6 @@ func TestAddFirewallRulesFromConfig(t *testing.T) {
|
||||
assert.EqualError(t, AddFirewallRulesFromConfig(l, true, conf, mf), "firewall.inbound rule #0; `test error`")
|
||||
}
|
||||
|
||||
func TestTCPRTTTracking(t *testing.T) {
|
||||
b := make([]byte, 200)
|
||||
|
||||
// Max ip IHL (60 bytes) and tcp IHL (60 bytes)
|
||||
b[0] = 15
|
||||
b[60+12] = 15 << 4
|
||||
f := Firewall{
|
||||
metricTCPRTT: metrics.GetOrRegisterHistogram("nope", nil, metrics.NewExpDecaySample(1028, 0.015)),
|
||||
}
|
||||
|
||||
// Set SEQ to 1
|
||||
binary.BigEndian.PutUint32(b[60+4:60+8], 1)
|
||||
|
||||
c := &conn{}
|
||||
setTCPRTTTracking(c, b)
|
||||
assert.Equal(t, uint32(1), c.Seq)
|
||||
|
||||
// Bad ack - no ack flag
|
||||
binary.BigEndian.PutUint32(b[60+8:60+12], 80)
|
||||
assert.False(t, f.checkTCPRTT(c, b))
|
||||
|
||||
// Bad ack, number is too low
|
||||
binary.BigEndian.PutUint32(b[60+8:60+12], 0)
|
||||
b[60+13] = uint8(0x10)
|
||||
assert.False(t, f.checkTCPRTT(c, b))
|
||||
|
||||
// Good ack
|
||||
binary.BigEndian.PutUint32(b[60+8:60+12], 80)
|
||||
assert.True(t, f.checkTCPRTT(c, b))
|
||||
assert.Equal(t, uint32(0), c.Seq)
|
||||
|
||||
// Set SEQ to 1
|
||||
binary.BigEndian.PutUint32(b[60+4:60+8], 1)
|
||||
c = &conn{}
|
||||
setTCPRTTTracking(c, b)
|
||||
assert.Equal(t, uint32(1), c.Seq)
|
||||
|
||||
// Good acks
|
||||
binary.BigEndian.PutUint32(b[60+8:60+12], 81)
|
||||
assert.True(t, f.checkTCPRTT(c, b))
|
||||
assert.Equal(t, uint32(0), c.Seq)
|
||||
|
||||
// Set SEQ to max uint32 - 20
|
||||
binary.BigEndian.PutUint32(b[60+4:60+8], ^uint32(0)-20)
|
||||
c = &conn{}
|
||||
setTCPRTTTracking(c, b)
|
||||
assert.Equal(t, ^uint32(0)-20, c.Seq)
|
||||
|
||||
// Good acks
|
||||
binary.BigEndian.PutUint32(b[60+8:60+12], 81)
|
||||
assert.True(t, f.checkTCPRTT(c, b))
|
||||
assert.Equal(t, uint32(0), c.Seq)
|
||||
|
||||
// Set SEQ to max uint32 / 2
|
||||
binary.BigEndian.PutUint32(b[60+4:60+8], ^uint32(0)/2)
|
||||
c = &conn{}
|
||||
setTCPRTTTracking(c, b)
|
||||
assert.Equal(t, ^uint32(0)/2, c.Seq)
|
||||
|
||||
// Below
|
||||
binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0)/2-1)
|
||||
assert.False(t, f.checkTCPRTT(c, b))
|
||||
assert.Equal(t, ^uint32(0)/2, c.Seq)
|
||||
|
||||
// Halfway below
|
||||
binary.BigEndian.PutUint32(b[60+8:60+12], uint32(0))
|
||||
assert.False(t, f.checkTCPRTT(c, b))
|
||||
assert.Equal(t, ^uint32(0)/2, c.Seq)
|
||||
|
||||
// Halfway above is ok
|
||||
binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0))
|
||||
assert.True(t, f.checkTCPRTT(c, b))
|
||||
assert.Equal(t, uint32(0), c.Seq)
|
||||
|
||||
// Set SEQ to max uint32
|
||||
binary.BigEndian.PutUint32(b[60+4:60+8], ^uint32(0))
|
||||
c = &conn{}
|
||||
setTCPRTTTracking(c, b)
|
||||
assert.Equal(t, ^uint32(0), c.Seq)
|
||||
|
||||
// Halfway + 1 above
|
||||
binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0)/2+1)
|
||||
assert.False(t, f.checkTCPRTT(c, b))
|
||||
assert.Equal(t, ^uint32(0), c.Seq)
|
||||
|
||||
// Halfway above
|
||||
binary.BigEndian.PutUint32(b[60+8:60+12], ^uint32(0)/2)
|
||||
assert.True(t, f.checkTCPRTT(c, b))
|
||||
assert.Equal(t, uint32(0), c.Seq)
|
||||
}
|
||||
|
||||
func TestFirewall_convertRule(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
ob := &bytes.Buffer{}
|
||||
@@ -946,8 +872,8 @@ type addRuleCall struct {
|
||||
endPort int32
|
||||
groups []string
|
||||
host string
|
||||
ip *net.IPNet
|
||||
localIp *net.IPNet
|
||||
ip netip.Prefix
|
||||
localIp netip.Prefix
|
||||
caName string
|
||||
caSha string
|
||||
}
|
||||
@@ -957,7 +883,7 @@ type mockFirewall struct {
|
||||
nextCallReturn error
|
||||
}
|
||||
|
||||
func (mf *mockFirewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip *net.IPNet, localIp *net.IPNet, caName string, caSha string) error {
|
||||
func (mf *mockFirewall) AddRule(incoming bool, proto uint8, startPort int32, endPort int32, groups []string, host string, ip netip.Prefix, localIp netip.Prefix, caName string, caSha string) error {
|
||||
mf.lastCall = addRuleCall{
|
||||
incoming: incoming,
|
||||
proto: proto,
|
||||
|
||||
50
go.mod
50
go.mod
@@ -1,49 +1,55 @@
|
||||
module github.com/slackhq/nebula
|
||||
|
||||
go 1.20
|
||||
go 1.22.0
|
||||
|
||||
toolchain go1.22.2
|
||||
|
||||
require (
|
||||
dario.cat/mergo v1.0.0
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be
|
||||
github.com/armon/go-radix v1.0.0
|
||||
github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432
|
||||
github.com/flynn/noise v1.0.0
|
||||
github.com/flynn/noise v1.1.0
|
||||
github.com/gaissmai/bart v0.11.1
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/google/gopacket v1.1.19
|
||||
github.com/imdario/mergo v0.3.15
|
||||
github.com/kardianos/service v1.2.2
|
||||
github.com/miekg/dns v1.1.54
|
||||
github.com/miekg/dns v1.1.61
|
||||
github.com/nbrownus/go-metrics-prometheus v0.0.0-20210712211119-974a6260965f
|
||||
github.com/prometheus/client_golang v1.15.1
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e
|
||||
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8
|
||||
github.com/stretchr/testify v1.8.2
|
||||
github.com/vishvananda/netlink v1.1.0
|
||||
golang.org/x/crypto v0.8.0
|
||||
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53
|
||||
golang.org/x/net v0.9.0
|
||||
golang.org/x/sys v0.8.0
|
||||
golang.org/x/term v0.8.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2
|
||||
golang.org/x/crypto v0.26.0
|
||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090
|
||||
golang.org/x/net v0.28.0
|
||||
golang.org/x/sync v0.8.0
|
||||
golang.org/x/sys v0.24.0
|
||||
golang.org/x/term v0.23.0
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
|
||||
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3
|
||||
google.golang.org/protobuf v1.30.0
|
||||
google.golang.org/protobuf v1.34.2
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gvisor.dev/gvisor v0.0.0-20240423190808-9d7a357edefe
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/bits-and-blooms/bitset v1.13.0 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/golang/protobuf v1.5.3 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/google/btree v1.1.2 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.4.0 // indirect
|
||||
github.com/prometheus/common v0.42.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
github.com/prometheus/client_model v0.5.0 // indirect
|
||||
github.com/prometheus/common v0.48.0 // indirect
|
||||
github.com/prometheus/procfs v0.12.0 // indirect
|
||||
github.com/vishvananda/netns v0.0.4 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/tools v0.8.0 // indirect
|
||||
golang.org/x/mod v0.18.0 // indirect
|
||||
golang.org/x/time v0.5.0 // indirect
|
||||
golang.org/x/tools v0.22.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
106
go.sum
106
go.sum
@@ -1,4 +1,6 @@
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
|
||||
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
@@ -12,6 +14,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.13.0 h1:bAQ9OPNFYbGHV6Nez0tmNI0RiEu7/hxlYJRUA0wFAVE=
|
||||
github.com/bits-and-blooms/bitset v1.13.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
@@ -20,8 +24,10 @@ github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ=
|
||||
github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
github.com/flynn/noise v1.1.0 h1:KjPQoQCEFdZDiP03phOvGi11+SVVhBG2wOWAorLsstg=
|
||||
github.com/flynn/noise v1.1.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag=
|
||||
github.com/gaissmai/bart v0.11.1 h1:5Uv5XwsaFBRo4E5VBcb9TzY8B7zxFf+U7isDxqOrRfc=
|
||||
github.com/gaissmai/bart v0.11.1/go.mod h1:KHeYECXQiBjTzQz/om2tqn3sZF1J7hw9m6z41ftj3fg=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
@@ -42,20 +48,18 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
|
||||
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
|
||||
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||
github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM=
|
||||
github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
@@ -72,14 +76,13 @@ github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFB
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/miekg/dns v1.1.54 h1:5jon9mWcb0sFJGpnI99tOMhCPyJ+RPVz5b63MQG0VWI=
|
||||
github.com/miekg/dns v1.1.54/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY=
|
||||
github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs=
|
||||
github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
@@ -97,24 +100,24 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI=
|
||||
github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk=
|
||||
github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE=
|
||||
github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
|
||||
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
|
||||
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
|
||||
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
|
||||
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
|
||||
github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE=
|
||||
github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
|
||||
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
@@ -122,27 +125,23 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
|
||||
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e h1:MRM5ITcdelLK2j1vwZ3Je0FKVCfqOLp5zO6trqMLYs0=
|
||||
github.com/skip2/go-qrcode v0.0.0-20200617195104-da1b6568686e/go.mod h1:XV66xRDqSt+GTGFMVlhk3ULuV0y9ZmzeVGR4mloJI3M=
|
||||
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8=
|
||||
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8/go.mod h1:P5HUIBuIWKbyjl083/loAegFkfbFNx5i2qEP4CNbm7E=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
|
||||
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2 h1:Llsql0lnQEbHj0I1OuKyp8otXp0r3q0mPkuhwHfStVs=
|
||||
github.com/vishvananda/netlink v1.2.1-beta.2/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
|
||||
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||
github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8=
|
||||
github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
@@ -152,16 +151,16 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
|
||||
golang.org/x/crypto v0.8.0/go.mod h1:mRqEX+O9/h5TFCrQhkgjo2yKi0yYA+9ecGkdQoHrywE=
|
||||
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 h1:5llv2sWeaMSnA3w2kS57ouQQ4pudlXrR0dCgw51QK9o=
|
||||
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
|
||||
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
|
||||
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
|
||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090 h1:Di6/M8l0O2lCLc6VVRWhgCiApHV8MnQurBnFSHsQtNY=
|
||||
golang.org/x/exp v0.0.0-20230725093048-515e97ebf090/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
|
||||
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
|
||||
golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
@@ -172,8 +171,8 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
|
||||
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -181,44 +180,50 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
||||
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
|
||||
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
|
||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
|
||||
golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
|
||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||
golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
|
||||
golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
|
||||
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
|
||||
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
|
||||
golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
|
||||
golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
|
||||
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b h1:J1CaxgLerRR5lgx3wnr6L04cJFbWoceSK9JWBdglINo=
|
||||
golang.zx2c4.com/wireguard v0.0.0-20230325221338-052af4a8072b/go.mod h1:tqur9LnfstdR9ep2LaJT4lFUl0EjlHtge+gAjmsHUG4=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus8eIuExIE=
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -229,9 +234,8 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
|
||||
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
|
||||
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -247,3 +251,5 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gvisor.dev/gvisor v0.0.0-20240423190808-9d7a357edefe h1:fre4i6mv4iBuz5lCMOzHD1rH1ljqHWSICFmZRbbgp3g=
|
||||
gvisor.dev/gvisor v0.0.0-20240423190808-9d7a357edefe/go.mod h1:sxc3Uvk/vHcd3tj7/DHVBoR5wvWT/MmRq2pj7HRJnwU=
|
||||
|
||||
31
handshake.go
31
handshake.go
@@ -1,31 +0,0 @@
|
||||
package nebula
|
||||
|
||||
import (
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
)
|
||||
|
||||
func HandleIncomingHandshake(f *Interface, addr *udp.Addr, via *ViaSender, packet []byte, h *header.H, hostinfo *HostInfo) {
|
||||
// First remote allow list check before we know the vpnIp
|
||||
if addr != nil {
|
||||
if !f.lightHouse.GetRemoteAllowList().AllowUnknownVpnIp(addr.IP) {
|
||||
f.l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch h.Subtype {
|
||||
case header.HandshakeIXPSK0:
|
||||
switch h.MessageCounter {
|
||||
case 1:
|
||||
ixHandshakeStage1(f, addr, via, packet, h)
|
||||
case 2:
|
||||
newHostinfo, _ := f.handshakeManager.QueryIndex(h.RemoteIndex)
|
||||
tearDown := ixHandshakeStage2(f, addr, via, newHostinfo, packet, h)
|
||||
if tearDown && newHostinfo != nil {
|
||||
f.handshakeManager.DeleteHostInfo(newHostinfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
233
handshake_ix.go
233
handshake_ix.go
@@ -1,39 +1,34 @@
|
||||
package nebula
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
"time"
|
||||
|
||||
"github.com/flynn/noise"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
)
|
||||
|
||||
// NOISE IX Handshakes
|
||||
|
||||
// This function constructs a handshake packet, but does not actually send it
|
||||
// Sending is done by the handshake manager
|
||||
func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) {
|
||||
// This queries the lighthouse if we don't know a remote for the host
|
||||
// We do it here to provoke the lighthouse to preempt our timer wheel and trigger the stage 1 packet to send
|
||||
// more quickly, effect is a quicker handshake.
|
||||
if hostinfo.remote == nil {
|
||||
f.lightHouse.QueryServer(vpnIp, f)
|
||||
}
|
||||
|
||||
err := f.handshakeManager.AddIndexHostInfo(hostinfo)
|
||||
func ixHandshakeStage0(f *Interface, hh *HandshakeHostInfo) bool {
|
||||
err := f.handshakeManager.allocateIndex(hh)
|
||||
if err != nil {
|
||||
f.l.WithError(err).WithField("vpnIp", vpnIp).
|
||||
f.l.WithError(err).WithField("vpnIp", hh.hostinfo.vpnIp).
|
||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to generate index")
|
||||
return
|
||||
return false
|
||||
}
|
||||
|
||||
ci := hostinfo.ConnectionState
|
||||
certState := f.pki.GetCertState()
|
||||
ci := NewConnectionState(f.l, f.cipher, certState, true, noise.HandshakeIX, []byte{}, 0)
|
||||
hh.hostinfo.ConnectionState = ci
|
||||
|
||||
hsProto := &NebulaHandshakeDetails{
|
||||
InitiatorIndex: hostinfo.localIndexId,
|
||||
InitiatorIndex: hh.hostinfo.localIndexId,
|
||||
Time: uint64(time.Now().UnixNano()),
|
||||
Cert: ci.certState.rawCertificateNoKey,
|
||||
Cert: certState.RawCertificateNoKey,
|
||||
}
|
||||
|
||||
hsBytes := []byte{}
|
||||
@@ -44,32 +39,32 @@ func ixHandshakeStage0(f *Interface, vpnIp iputil.VpnIp, hostinfo *HostInfo) {
|
||||
hsBytes, err = hs.Marshal()
|
||||
|
||||
if err != nil {
|
||||
f.l.WithError(err).WithField("vpnIp", vpnIp).
|
||||
f.l.WithError(err).WithField("vpnIp", hh.hostinfo.vpnIp).
|
||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to marshal handshake message")
|
||||
return
|
||||
return false
|
||||
}
|
||||
|
||||
h := header.Encode(make([]byte, header.Len), header.Version, header.Handshake, header.HandshakeIXPSK0, 0, 1)
|
||||
ci.messageCounter.Add(1)
|
||||
|
||||
msg, _, _, err := ci.H.WriteMessage(h, hsBytes)
|
||||
if err != nil {
|
||||
f.l.WithError(err).WithField("vpnIp", vpnIp).
|
||||
f.l.WithError(err).WithField("vpnIp", hh.hostinfo.vpnIp).
|
||||
WithField("handshake", m{"stage": 0, "style": "ix_psk0"}).Error("Failed to call noise.WriteMessage")
|
||||
return
|
||||
return false
|
||||
}
|
||||
|
||||
// We are sending handshake packet 1, so we don't expect to receive
|
||||
// handshake packet 1 from the responder
|
||||
ci.window.Update(f.l, 1)
|
||||
|
||||
hostinfo.HandshakePacket[0] = msg
|
||||
hostinfo.HandshakeReady = true
|
||||
hostinfo.handshakeStart = time.Now()
|
||||
hh.hostinfo.HandshakePacket[0] = msg
|
||||
hh.ready = true
|
||||
return true
|
||||
}
|
||||
|
||||
func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []byte, h *header.H) {
|
||||
ci := f.newConnectionState(f.l, false, noise.HandshakeIX, []byte{}, 0)
|
||||
func ixHandshakeStage1(f *Interface, addr netip.AddrPort, via *ViaSender, packet []byte, h *header.H) {
|
||||
certState := f.pki.GetCertState()
|
||||
ci := NewConnectionState(f.l, f.cipher, certState, false, noise.HandshakeIX, []byte{}, 0)
|
||||
// Mark packet 1 as seen so it doesn't show up as missed
|
||||
ci.window.Update(f.l, 1)
|
||||
|
||||
@@ -91,19 +86,38 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
|
||||
return
|
||||
}
|
||||
|
||||
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.caPool)
|
||||
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.pki.GetCAPool())
|
||||
if err != nil {
|
||||
f.l.WithError(err).WithField("udpAddr", addr).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).WithField("cert", remoteCert).
|
||||
Info("Invalid certificate from host")
|
||||
e := f.l.WithError(err).WithField("udpAddr", addr).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"})
|
||||
|
||||
if f.l.Level > logrus.DebugLevel {
|
||||
e = e.WithField("cert", remoteCert)
|
||||
}
|
||||
|
||||
e.Info("Invalid certificate from host")
|
||||
return
|
||||
}
|
||||
vpnIp := iputil.Ip2VpnIp(remoteCert.Details.Ips[0].IP)
|
||||
|
||||
vpnIp, ok := netip.AddrFromSlice(remoteCert.Details.Ips[0].IP)
|
||||
if !ok {
|
||||
e := f.l.WithError(err).WithField("udpAddr", addr).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"})
|
||||
|
||||
if f.l.Level > logrus.DebugLevel {
|
||||
e = e.WithField("cert", remoteCert)
|
||||
}
|
||||
|
||||
e.Info("Invalid vpn ip from host")
|
||||
return
|
||||
}
|
||||
|
||||
vpnIp = vpnIp.Unmap()
|
||||
certName := remoteCert.Details.Name
|
||||
fingerprint, _ := remoteCert.Sha256Sum()
|
||||
issuer := remoteCert.Details.Issuer
|
||||
|
||||
if vpnIp == f.myVpnIp {
|
||||
if vpnIp == f.myVpnNet.Addr() {
|
||||
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("fingerprint", fingerprint).
|
||||
@@ -112,8 +126,8 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
|
||||
return
|
||||
}
|
||||
|
||||
if addr != nil {
|
||||
if !f.lightHouse.GetRemoteAllowList().Allow(vpnIp, addr.IP) {
|
||||
if addr.IsValid() {
|
||||
if !f.lightHouse.GetRemoteAllowList().Allow(vpnIp, addr.Addr()) {
|
||||
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
||||
return
|
||||
}
|
||||
@@ -137,15 +151,12 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
|
||||
HandshakePacket: make(map[uint8][]byte, 0),
|
||||
lastHandshakeTime: hs.Details.Time,
|
||||
relayState: RelayState{
|
||||
relays: map[iputil.VpnIp]struct{}{},
|
||||
relayForByIp: map[iputil.VpnIp]*Relay{},
|
||||
relays: nil,
|
||||
relayForByIp: map[netip.Addr]*Relay{},
|
||||
relayForByIdx: map[uint32]*Relay{},
|
||||
},
|
||||
}
|
||||
|
||||
hostinfo.Lock()
|
||||
defer hostinfo.Unlock()
|
||||
|
||||
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("fingerprint", fingerprint).
|
||||
@@ -155,7 +166,7 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
|
||||
Info("Handshake message received")
|
||||
|
||||
hs.Details.ResponderIndex = myIndex
|
||||
hs.Details.Cert = ci.certState.rawCertificateNoKey
|
||||
hs.Details.Cert = certState.RawCertificateNoKey
|
||||
// Update the time in case their clock is way off from ours
|
||||
hs.Details.Time = uint64(time.Now().UnixNano())
|
||||
|
||||
@@ -211,23 +222,16 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
|
||||
if err != nil {
|
||||
switch err {
|
||||
case ErrAlreadySeen:
|
||||
// Update remote if preferred (Note we have to switch to locking
|
||||
// the existing hostinfo, and then switch back so the defer Unlock
|
||||
// higher in this function still works)
|
||||
hostinfo.Unlock()
|
||||
existing.Lock()
|
||||
// Update remote if preferred
|
||||
if existing.SetRemoteIfPreferred(f.hostMap, addr) {
|
||||
// Send a test packet to ensure the other side has also switched to
|
||||
// the preferred remote
|
||||
f.SendMessageToVpnIp(header.Test, header.TestRequest, vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
||||
}
|
||||
existing.Unlock()
|
||||
hostinfo.Lock()
|
||||
|
||||
msg = existing.HandshakePacket[2]
|
||||
f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1)
|
||||
if addr != nil {
|
||||
if addr.IsValid() {
|
||||
err := f.outside.WriteTo(msg, addr)
|
||||
if err != nil {
|
||||
f.l.WithField("vpnIp", existing.vpnIp).WithField("udpAddr", addr).
|
||||
@@ -293,7 +297,7 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
|
||||
|
||||
// Do the send
|
||||
f.messageMetrics.Tx(header.Handshake, header.MessageSubType(msg[1]), 1)
|
||||
if addr != nil {
|
||||
if addr.IsValid() {
|
||||
err = f.outside.WriteTo(msg, addr)
|
||||
if err != nil {
|
||||
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||
@@ -310,7 +314,6 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
|
||||
WithField("issuer", issuer).
|
||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||
WithField("sentCachedPackets", len(hostinfo.packetStore)).
|
||||
Info("Handshake message sent")
|
||||
}
|
||||
} else {
|
||||
@@ -319,6 +322,9 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
|
||||
return
|
||||
}
|
||||
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
|
||||
// I successfully received a handshake. Just in case I marked this tunnel as 'Disestablished', ensure
|
||||
// it's correctly marked as working.
|
||||
via.relayHI.relayState.UpdateRelayForByIdxState(via.remoteIdx, Established)
|
||||
f.SendVia(via.relayHI, via.relay, msg, make([]byte, 12), make([]byte, mtu), false)
|
||||
f.l.WithField("vpnIp", vpnIp).WithField("relay", via.relayHI.vpnIp).
|
||||
WithField("certName", certName).
|
||||
@@ -326,49 +332,34 @@ func ixHandshakeStage1(f *Interface, addr *udp.Addr, via *ViaSender, packet []by
|
||||
WithField("issuer", issuer).
|
||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||
WithField("sentCachedPackets", len(hostinfo.packetStore)).
|
||||
Info("Handshake message sent")
|
||||
}
|
||||
|
||||
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
|
||||
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
|
||||
f.connectionManager.AddTrafficWatch(hostinfo)
|
||||
|
||||
hostinfo.remotes.ResetBlockedRemotes()
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *HostInfo, packet []byte, h *header.H) bool {
|
||||
if hostinfo == nil {
|
||||
func ixHandshakeStage2(f *Interface, addr netip.AddrPort, via *ViaSender, hh *HandshakeHostInfo, packet []byte, h *header.H) bool {
|
||||
if hh == nil {
|
||||
// Nothing here to tear down, got a bogus stage 2 packet
|
||||
return true
|
||||
}
|
||||
|
||||
hostinfo.Lock()
|
||||
defer hostinfo.Unlock()
|
||||
hh.Lock()
|
||||
defer hh.Unlock()
|
||||
|
||||
if addr != nil {
|
||||
if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, addr.IP) {
|
||||
hostinfo := hh.hostinfo
|
||||
if addr.IsValid() {
|
||||
if !f.lightHouse.GetRemoteAllowList().Allow(hostinfo.vpnIp, addr.Addr()) {
|
||||
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
ci := hostinfo.ConnectionState
|
||||
if ci.ready {
|
||||
f.l.WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).WithField("header", h).
|
||||
Info("Handshake is already complete")
|
||||
|
||||
// Update remote if preferred
|
||||
if hostinfo.SetRemoteIfPreferred(f.hostMap, addr) {
|
||||
// Send a test packet to ensure the other side has also switched to
|
||||
// the preferred remote
|
||||
f.SendMessageToVpnIp(header.Test, header.TestRequest, hostinfo.vpnIp, []byte(""), make([]byte, 12, 12), make([]byte, mtu))
|
||||
}
|
||||
|
||||
// We already have a complete tunnel, there is nothing that can be done by processing further stage 1 packets
|
||||
return false
|
||||
}
|
||||
|
||||
msg, eKey, dKey, err := ci.H.ReadMessage(nil, packet[header.Len:])
|
||||
if err != nil {
|
||||
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||
@@ -399,17 +390,35 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
|
||||
return true
|
||||
}
|
||||
|
||||
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.caPool)
|
||||
remoteCert, err := RecombineCertAndValidate(ci.H, hs.Details.Cert, f.pki.GetCAPool())
|
||||
if err != nil {
|
||||
f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||
WithField("cert", remoteCert).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||
Error("Invalid certificate from host")
|
||||
e := f.l.WithError(err).WithField("vpnIp", hostinfo.vpnIp).WithField("udpAddr", addr).
|
||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"})
|
||||
|
||||
if f.l.Level > logrus.DebugLevel {
|
||||
e = e.WithField("cert", remoteCert)
|
||||
}
|
||||
|
||||
e.Error("Invalid certificate from host")
|
||||
|
||||
// The handshake state machine is complete, if things break now there is no chance to recover. Tear down and start again
|
||||
return true
|
||||
}
|
||||
|
||||
vpnIp := iputil.Ip2VpnIp(remoteCert.Details.Ips[0].IP)
|
||||
vpnIp, ok := netip.AddrFromSlice(remoteCert.Details.Ips[0].IP)
|
||||
if !ok {
|
||||
e := f.l.WithError(err).WithField("udpAddr", addr).
|
||||
WithField("handshake", m{"stage": 2, "style": "ix_psk0"})
|
||||
|
||||
if f.l.Level > logrus.DebugLevel {
|
||||
e = e.WithField("cert", remoteCert)
|
||||
}
|
||||
|
||||
e.Info("Invalid vpn ip from host")
|
||||
return true
|
||||
}
|
||||
|
||||
vpnIp = vpnIp.Unmap()
|
||||
certName := remoteCert.Details.Name
|
||||
fingerprint, _ := remoteCert.Sha256Sum()
|
||||
issuer := remoteCert.Details.Issuer
|
||||
@@ -422,34 +431,30 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
|
||||
Info("Incorrect host responded to handshake")
|
||||
|
||||
// Release our old handshake from pending, it should not continue
|
||||
f.handshakeManager.pendingHostMap.DeleteHostInfo(hostinfo)
|
||||
f.handshakeManager.DeleteHostInfo(hostinfo)
|
||||
|
||||
// Create a new hostinfo/handshake for the intended vpn ip
|
||||
//TODO: this adds it to the timer wheel in a way that aggressively retries
|
||||
newHostInfo := f.getOrHandshake(hostinfo.vpnIp)
|
||||
newHostInfo.Lock()
|
||||
f.handshakeManager.StartHandshake(hostinfo.vpnIp, func(newHH *HandshakeHostInfo) {
|
||||
//TODO: this doesnt know if its being added or is being used for caching a packet
|
||||
// Block the current used address
|
||||
newHH.hostinfo.remotes = hostinfo.remotes
|
||||
newHH.hostinfo.remotes.BlockRemote(addr)
|
||||
|
||||
// Block the current used address
|
||||
newHostInfo.remotes = hostinfo.remotes
|
||||
newHostInfo.remotes.BlockRemote(addr)
|
||||
// Get the correct remote list for the host we did handshake with
|
||||
hostinfo.remotes = f.lightHouse.QueryCache(vpnIp)
|
||||
|
||||
// Get the correct remote list for the host we did handshake with
|
||||
hostinfo.remotes = f.lightHouse.QueryCache(vpnIp)
|
||||
f.l.WithField("blockedUdpAddrs", newHH.hostinfo.remotes.CopyBlockedRemotes()).WithField("vpnIp", vpnIp).
|
||||
WithField("remotes", newHH.hostinfo.remotes.CopyAddrs(f.hostMap.GetPreferredRanges())).
|
||||
Info("Blocked addresses for handshakes")
|
||||
|
||||
f.l.WithField("blockedUdpAddrs", newHostInfo.remotes.CopyBlockedRemotes()).WithField("vpnIp", vpnIp).
|
||||
WithField("remotes", newHostInfo.remotes.CopyAddrs(f.hostMap.preferredRanges)).
|
||||
Info("Blocked addresses for handshakes")
|
||||
// Swap the packet store to benefit the original intended recipient
|
||||
newHH.packetStore = hh.packetStore
|
||||
hh.packetStore = []*cachedPacket{}
|
||||
|
||||
// Swap the packet store to benefit the original intended recipient
|
||||
hostinfo.ConnectionState.queueLock.Lock()
|
||||
newHostInfo.packetStore = hostinfo.packetStore
|
||||
hostinfo.packetStore = []*cachedPacket{}
|
||||
hostinfo.ConnectionState.queueLock.Unlock()
|
||||
|
||||
// Finally, put the correct vpn ip in the host info, tell them to close the tunnel, and return true to tear down
|
||||
hostinfo.vpnIp = vpnIp
|
||||
f.sendCloseTunnel(hostinfo)
|
||||
newHostInfo.Unlock()
|
||||
// Finally, put the correct vpn ip in the host info, tell them to close the tunnel, and return true to tear down
|
||||
hostinfo.vpnIp = vpnIp
|
||||
f.sendCloseTunnel(hostinfo)
|
||||
})
|
||||
|
||||
return true
|
||||
}
|
||||
@@ -457,7 +462,7 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
|
||||
// Mark packet 2 as seen so it doesn't show up as missed
|
||||
ci.window.Update(f.l, 2)
|
||||
|
||||
duration := time.Since(hostinfo.handshakeStart).Nanoseconds()
|
||||
duration := time.Since(hh.startTime).Nanoseconds()
|
||||
f.l.WithField("vpnIp", vpnIp).WithField("udpAddr", addr).
|
||||
WithField("certName", certName).
|
||||
WithField("fingerprint", fingerprint).
|
||||
@@ -465,7 +470,7 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
|
||||
WithField("initiatorIndex", hs.Details.InitiatorIndex).WithField("responderIndex", hs.Details.ResponderIndex).
|
||||
WithField("remoteIndex", h.RemoteIndex).WithField("handshake", m{"stage": 2, "style": "ix_psk0"}).
|
||||
WithField("durationNs", duration).
|
||||
WithField("sentCachedPackets", len(hostinfo.packetStore)).
|
||||
WithField("sentCachedPackets", len(hh.packetStore)).
|
||||
Info("Handshake message received")
|
||||
|
||||
hostinfo.remoteIndexId = hs.Details.ResponderIndex
|
||||
@@ -477,7 +482,7 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
|
||||
ci.eKey = NewNebulaCipherState(eKey)
|
||||
|
||||
// Make sure the current udpAddr being used is set for responding
|
||||
if addr != nil {
|
||||
if addr.IsValid() {
|
||||
hostinfo.SetRemote(addr)
|
||||
} else {
|
||||
hostinfo.relayState.InsertRelayTo(via.relayHI.vpnIp)
|
||||
@@ -488,8 +493,22 @@ func ixHandshakeStage2(f *Interface, addr *udp.Addr, via *ViaSender, hostinfo *H
|
||||
|
||||
// Complete our handshake and update metrics, this will replace any existing tunnels for this vpnIp
|
||||
f.handshakeManager.Complete(hostinfo, f)
|
||||
f.connectionManager.AddTrafficWatch(hostinfo.localIndexId)
|
||||
hostinfo.handshakeComplete(f.l, f.cachedPacketMetrics)
|
||||
f.connectionManager.AddTrafficWatch(hostinfo)
|
||||
|
||||
if f.l.Level >= logrus.DebugLevel {
|
||||
hostinfo.logger(f.l).Debugf("Sending %d stored packets", len(hh.packetStore))
|
||||
}
|
||||
|
||||
if len(hh.packetStore) > 0 {
|
||||
nb := make([]byte, 12, 12)
|
||||
out := make([]byte, mtu)
|
||||
for _, cp := range hh.packetStore {
|
||||
cp.callback(cp.messageType, cp.messageSubType, hostinfo, cp.packet, nb, out)
|
||||
}
|
||||
f.cachedPacketMetrics.sent.Inc(int64(len(hh.packetStore)))
|
||||
}
|
||||
|
||||
hostinfo.remotes.ResetBlockedRemotes()
|
||||
f.metricHandshakes.Update(duration)
|
||||
|
||||
return false
|
||||
|
||||
@@ -6,14 +6,15 @@ import (
|
||||
"crypto/rand"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"net"
|
||||
"net/netip"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -34,7 +35,7 @@ var (
|
||||
|
||||
type HandshakeConfig struct {
|
||||
tryInterval time.Duration
|
||||
retries int
|
||||
retries int64
|
||||
triggerBuffer int
|
||||
useRelays bool
|
||||
|
||||
@@ -42,30 +43,74 @@ type HandshakeConfig struct {
|
||||
}
|
||||
|
||||
type HandshakeManager struct {
|
||||
pendingHostMap *HostMap
|
||||
// Mutex for interacting with the vpnIps and indexes maps
|
||||
sync.RWMutex
|
||||
|
||||
vpnIps map[netip.Addr]*HandshakeHostInfo
|
||||
indexes map[uint32]*HandshakeHostInfo
|
||||
|
||||
mainHostMap *HostMap
|
||||
lightHouse *LightHouse
|
||||
outside *udp.Conn
|
||||
outside udp.Conn
|
||||
config HandshakeConfig
|
||||
OutboundHandshakeTimer *LockingTimerWheel[iputil.VpnIp]
|
||||
OutboundHandshakeTimer *LockingTimerWheel[netip.Addr]
|
||||
messageMetrics *MessageMetrics
|
||||
metricInitiated metrics.Counter
|
||||
metricTimedOut metrics.Counter
|
||||
f *Interface
|
||||
l *logrus.Logger
|
||||
|
||||
// can be used to trigger outbound handshake for the given vpnIp
|
||||
trigger chan iputil.VpnIp
|
||||
trigger chan netip.Addr
|
||||
}
|
||||
|
||||
func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges []*net.IPNet, mainHostMap *HostMap, lightHouse *LightHouse, outside *udp.Conn, config HandshakeConfig) *HandshakeManager {
|
||||
type HandshakeHostInfo struct {
|
||||
sync.Mutex
|
||||
|
||||
startTime time.Time // Time that we first started trying with this handshake
|
||||
ready bool // Is the handshake ready
|
||||
counter int64 // How many attempts have we made so far
|
||||
lastRemotes []netip.AddrPort // Remotes that we sent to during the previous attempt
|
||||
packetStore []*cachedPacket // A set of packets to be transmitted once the handshake completes
|
||||
|
||||
hostinfo *HostInfo
|
||||
}
|
||||
|
||||
func (hh *HandshakeHostInfo) cachePacket(l *logrus.Logger, t header.MessageType, st header.MessageSubType, packet []byte, f packetCallback, m *cachedPacketMetrics) {
|
||||
if len(hh.packetStore) < 100 {
|
||||
tempPacket := make([]byte, len(packet))
|
||||
copy(tempPacket, packet)
|
||||
|
||||
hh.packetStore = append(hh.packetStore, &cachedPacket{t, st, f, tempPacket})
|
||||
if l.Level >= logrus.DebugLevel {
|
||||
hh.hostinfo.logger(l).
|
||||
WithField("length", len(hh.packetStore)).
|
||||
WithField("stored", true).
|
||||
Debugf("Packet store")
|
||||
}
|
||||
|
||||
} else {
|
||||
m.dropped.Inc(1)
|
||||
|
||||
if l.Level >= logrus.DebugLevel {
|
||||
hh.hostinfo.logger(l).
|
||||
WithField("length", len(hh.packetStore)).
|
||||
WithField("stored", false).
|
||||
Debugf("Packet store")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewHandshakeManager(l *logrus.Logger, mainHostMap *HostMap, lightHouse *LightHouse, outside udp.Conn, config HandshakeConfig) *HandshakeManager {
|
||||
return &HandshakeManager{
|
||||
pendingHostMap: NewHostMap(l, "pending", tunCidr, preferredRanges),
|
||||
vpnIps: map[netip.Addr]*HandshakeHostInfo{},
|
||||
indexes: map[uint32]*HandshakeHostInfo{},
|
||||
mainHostMap: mainHostMap,
|
||||
lightHouse: lightHouse,
|
||||
outside: outside,
|
||||
config: config,
|
||||
trigger: make(chan iputil.VpnIp, config.triggerBuffer),
|
||||
OutboundHandshakeTimer: NewLockingTimerWheel[iputil.VpnIp](config.tryInterval, hsTimeout(config.retries, config.tryInterval)),
|
||||
trigger: make(chan netip.Addr, config.triggerBuffer),
|
||||
OutboundHandshakeTimer: NewLockingTimerWheel[netip.Addr](config.tryInterval, hsTimeout(config.retries, config.tryInterval)),
|
||||
messageMetrics: config.messageMetrics,
|
||||
metricInitiated: metrics.GetOrRegisterCounter("handshake_manager.initiated", nil),
|
||||
metricTimedOut: metrics.GetOrRegisterCounter("handshake_manager.timed_out", nil),
|
||||
@@ -73,7 +118,7 @@ func NewHandshakeManager(l *logrus.Logger, tunCidr *net.IPNet, preferredRanges [
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) Run(ctx context.Context, f EncWriter) {
|
||||
func (c *HandshakeManager) Run(ctx context.Context) {
|
||||
clockSource := time.NewTicker(c.config.tryInterval)
|
||||
defer clockSource.Stop()
|
||||
|
||||
@@ -82,58 +127,80 @@ func (c *HandshakeManager) Run(ctx context.Context, f EncWriter) {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case vpnIP := <-c.trigger:
|
||||
c.handleOutbound(vpnIP, f, true)
|
||||
c.handleOutbound(vpnIP, true)
|
||||
case now := <-clockSource.C:
|
||||
c.NextOutboundHandshakeTimerTick(now, f)
|
||||
c.NextOutboundHandshakeTimerTick(now)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time, f EncWriter) {
|
||||
func (hm *HandshakeManager) HandleIncoming(addr netip.AddrPort, via *ViaSender, packet []byte, h *header.H) {
|
||||
// First remote allow list check before we know the vpnIp
|
||||
if addr.IsValid() {
|
||||
if !hm.lightHouse.GetRemoteAllowList().AllowUnknownVpnIp(addr.Addr()) {
|
||||
hm.l.WithField("udpAddr", addr).Debug("lighthouse.remote_allow_list denied incoming handshake")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch h.Subtype {
|
||||
case header.HandshakeIXPSK0:
|
||||
switch h.MessageCounter {
|
||||
case 1:
|
||||
ixHandshakeStage1(hm.f, addr, via, packet, h)
|
||||
|
||||
case 2:
|
||||
newHostinfo := hm.queryIndex(h.RemoteIndex)
|
||||
tearDown := ixHandshakeStage2(hm.f, addr, via, newHostinfo, packet, h)
|
||||
if tearDown && newHostinfo != nil {
|
||||
hm.DeleteHostInfo(newHostinfo.hostinfo)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) NextOutboundHandshakeTimerTick(now time.Time) {
|
||||
c.OutboundHandshakeTimer.Advance(now)
|
||||
for {
|
||||
vpnIp, has := c.OutboundHandshakeTimer.Purge()
|
||||
if !has {
|
||||
break
|
||||
}
|
||||
c.handleOutbound(vpnIp, f, false)
|
||||
c.handleOutbound(vpnIp, false)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, lighthouseTriggered bool) {
|
||||
hostinfo, err := c.pendingHostMap.QueryVpnIp(vpnIp)
|
||||
if err != nil {
|
||||
func (hm *HandshakeManager) handleOutbound(vpnIp netip.Addr, lighthouseTriggered bool) {
|
||||
hh := hm.queryVpnIp(vpnIp)
|
||||
if hh == nil {
|
||||
return
|
||||
}
|
||||
hostinfo.Lock()
|
||||
defer hostinfo.Unlock()
|
||||
hh.Lock()
|
||||
defer hh.Unlock()
|
||||
|
||||
// We may have raced to completion but now that we have a lock we should ensure we have not yet completed.
|
||||
if hostinfo.HandshakeComplete {
|
||||
// Ensure we don't exist in the pending hostmap anymore since we have completed
|
||||
c.pendingHostMap.DeleteHostInfo(hostinfo)
|
||||
hostinfo := hh.hostinfo
|
||||
// If we are out of time, clean up
|
||||
if hh.counter >= hm.config.retries {
|
||||
hh.hostinfo.logger(hm.l).WithField("udpAddrs", hh.hostinfo.remotes.CopyAddrs(hm.mainHostMap.GetPreferredRanges())).
|
||||
WithField("initiatorIndex", hh.hostinfo.localIndexId).
|
||||
WithField("remoteIndex", hh.hostinfo.remoteIndexId).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||
WithField("durationNs", time.Since(hh.startTime).Nanoseconds()).
|
||||
Info("Handshake timed out")
|
||||
hm.metricTimedOut.Inc(1)
|
||||
hm.DeleteHostInfo(hostinfo)
|
||||
return
|
||||
}
|
||||
|
||||
// Increment the counter to increase our delay, linear backoff
|
||||
hh.counter++
|
||||
|
||||
// Check if we have a handshake packet to transmit yet
|
||||
if !hostinfo.HandshakeReady {
|
||||
// There is currently a slight race in getOrHandshake due to ConnectionState not being part of the HostInfo directly
|
||||
// Our hostinfo here was added to the pending map and the wheel may have ticked to us before we created ConnectionState
|
||||
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter))
|
||||
return
|
||||
}
|
||||
|
||||
// If we are out of time, clean up
|
||||
if hostinfo.HandshakeCounter >= c.config.retries {
|
||||
hostinfo.logger(c.l).WithField("udpAddrs", hostinfo.remotes.CopyAddrs(c.pendingHostMap.preferredRanges)).
|
||||
WithField("initiatorIndex", hostinfo.localIndexId).
|
||||
WithField("remoteIndex", hostinfo.remoteIndexId).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||
WithField("durationNs", time.Since(hostinfo.handshakeStart).Nanoseconds()).
|
||||
Info("Handshake timed out")
|
||||
c.metricTimedOut.Inc(1)
|
||||
c.pendingHostMap.DeleteHostInfo(hostinfo)
|
||||
return
|
||||
if !hh.ready {
|
||||
if !ixHandshakeStage0(hm.f, hh) {
|
||||
hm.OutboundHandshakeTimer.Add(vpnIp, hm.config.tryInterval*time.Duration(hh.counter))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Get a remotes object if we don't already have one.
|
||||
@@ -141,11 +208,11 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
|
||||
// NB ^ This comment doesn't jive. It's how the thing gets initialized.
|
||||
// It's the common path. Should it update every time, in case a future LH query/queries give us more info?
|
||||
if hostinfo.remotes == nil {
|
||||
hostinfo.remotes = c.lightHouse.QueryCache(vpnIp)
|
||||
hostinfo.remotes = hm.lightHouse.QueryCache(vpnIp)
|
||||
}
|
||||
|
||||
remotes := hostinfo.remotes.CopyAddrs(c.pendingHostMap.preferredRanges)
|
||||
remotesHaveChanged := !udp.AddrSlice(remotes).Equal(hostinfo.HandshakeLastRemotes)
|
||||
remotes := hostinfo.remotes.CopyAddrs(hm.mainHostMap.GetPreferredRanges())
|
||||
remotesHaveChanged := !slices.Equal(remotes, hh.lastRemotes)
|
||||
|
||||
// We only care about a lighthouse trigger if we have new remotes to send to.
|
||||
// This is a very specific optimization for a fast lighthouse reply.
|
||||
@@ -154,25 +221,25 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
|
||||
return
|
||||
}
|
||||
|
||||
hostinfo.HandshakeLastRemotes = remotes
|
||||
hh.lastRemotes = remotes
|
||||
|
||||
// TODO: this will generate a load of queries for hosts with only 1 ip
|
||||
// (such as ones registered to the lighthouse with only a private IP)
|
||||
// So we only do it one time after attempting 5 handshakes already.
|
||||
if len(remotes) <= 1 && hostinfo.HandshakeCounter == 5 {
|
||||
if len(remotes) <= 1 && hh.counter == 5 {
|
||||
// If we only have 1 remote it is highly likely our query raced with the other host registered within the lighthouse
|
||||
// Our vpnIp here has a tunnel with a lighthouse but has yet to send a host update packet there so we only know about
|
||||
// the learned public ip for them. Query again to short circuit the promotion counter
|
||||
c.lightHouse.QueryServer(vpnIp, f)
|
||||
hm.lightHouse.QueryServer(vpnIp)
|
||||
}
|
||||
|
||||
// Send the handshake to all known ips, stage 2 takes care of assigning the hostinfo.remote based on the first to reply
|
||||
var sentTo []*udp.Addr
|
||||
hostinfo.remotes.ForEach(c.pendingHostMap.preferredRanges, func(addr *udp.Addr, _ bool) {
|
||||
c.messageMetrics.Tx(header.Handshake, header.MessageSubType(hostinfo.HandshakePacket[0][1]), 1)
|
||||
err = c.outside.WriteTo(hostinfo.HandshakePacket[0], addr)
|
||||
var sentTo []netip.AddrPort
|
||||
hostinfo.remotes.ForEach(hm.mainHostMap.GetPreferredRanges(), func(addr netip.AddrPort, _ bool) {
|
||||
hm.messageMetrics.Tx(header.Handshake, header.MessageSubType(hostinfo.HandshakePacket[0][1]), 1)
|
||||
err := hm.outside.WriteTo(hostinfo.HandshakePacket[0], addr)
|
||||
if err != nil {
|
||||
hostinfo.logger(c.l).WithField("udpAddr", addr).
|
||||
hostinfo.logger(hm.l).WithField("udpAddr", addr).
|
||||
WithField("initiatorIndex", hostinfo.localIndexId).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||
WithError(err).Error("Failed to send handshake message")
|
||||
@@ -185,118 +252,192 @@ func (c *HandshakeManager) handleOutbound(vpnIp iputil.VpnIp, f EncWriter, light
|
||||
// Don't be too noisy or confusing if we fail to send a handshake - if we don't get through we'll eventually log a timeout,
|
||||
// so only log when the list of remotes has changed
|
||||
if remotesHaveChanged {
|
||||
hostinfo.logger(c.l).WithField("udpAddrs", sentTo).
|
||||
hostinfo.logger(hm.l).WithField("udpAddrs", sentTo).
|
||||
WithField("initiatorIndex", hostinfo.localIndexId).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||
Info("Handshake message sent")
|
||||
} else if c.l.IsLevelEnabled(logrus.DebugLevel) {
|
||||
hostinfo.logger(c.l).WithField("udpAddrs", sentTo).
|
||||
} else if hm.l.IsLevelEnabled(logrus.DebugLevel) {
|
||||
hostinfo.logger(hm.l).WithField("udpAddrs", sentTo).
|
||||
WithField("initiatorIndex", hostinfo.localIndexId).
|
||||
WithField("handshake", m{"stage": 1, "style": "ix_psk0"}).
|
||||
Debug("Handshake message sent")
|
||||
}
|
||||
|
||||
if c.config.useRelays && len(hostinfo.remotes.relays) > 0 {
|
||||
hostinfo.logger(c.l).WithField("relays", hostinfo.remotes.relays).Info("Attempt to relay through hosts")
|
||||
if hm.config.useRelays && len(hostinfo.remotes.relays) > 0 {
|
||||
hostinfo.logger(hm.l).WithField("relays", hostinfo.remotes.relays).Info("Attempt to relay through hosts")
|
||||
// Send a RelayRequest to all known Relay IP's
|
||||
for _, relay := range hostinfo.remotes.relays {
|
||||
// Don't relay to myself, and don't relay through the host I'm trying to connect to
|
||||
if *relay == vpnIp || *relay == c.lightHouse.myVpnIp {
|
||||
if relay == vpnIp || relay == hm.lightHouse.myVpnNet.Addr() {
|
||||
continue
|
||||
}
|
||||
relayHostInfo, err := c.mainHostMap.QueryVpnIp(*relay)
|
||||
if err != nil || relayHostInfo.remote == nil {
|
||||
hostinfo.logger(c.l).WithError(err).WithField("relay", relay.String()).Info("Establish tunnel to relay target")
|
||||
f.Handshake(*relay)
|
||||
relayHostInfo := hm.mainHostMap.QueryVpnIp(relay)
|
||||
if relayHostInfo == nil || !relayHostInfo.remote.IsValid() {
|
||||
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Establish tunnel to relay target")
|
||||
hm.f.Handshake(relay)
|
||||
continue
|
||||
}
|
||||
// Check the relay HostInfo to see if we already established a relay through it
|
||||
if existingRelay, ok := relayHostInfo.relayState.QueryRelayForByIp(vpnIp); ok {
|
||||
switch existingRelay.State {
|
||||
case Established:
|
||||
hostinfo.logger(c.l).WithField("relay", relay.String()).Info("Send handshake via relay")
|
||||
f.SendVia(relayHostInfo, existingRelay, hostinfo.HandshakePacket[0], make([]byte, 12), make([]byte, mtu), false)
|
||||
case Requested:
|
||||
hostinfo.logger(c.l).WithField("relay", relay.String()).Info("Re-send CreateRelay request")
|
||||
// Re-send the CreateRelay request, in case the previous one was lost.
|
||||
m := NebulaControl{
|
||||
Type: NebulaControl_CreateRelayRequest,
|
||||
InitiatorRelayIndex: existingRelay.LocalIndex,
|
||||
RelayFromIp: uint32(c.lightHouse.myVpnIp),
|
||||
RelayToIp: uint32(vpnIp),
|
||||
}
|
||||
msg, err := m.Marshal()
|
||||
if err != nil {
|
||||
hostinfo.logger(c.l).
|
||||
WithError(err).
|
||||
Error("Failed to marshal Control message to create relay")
|
||||
} else {
|
||||
// This must send over the hostinfo, not over hm.Hosts[ip]
|
||||
f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||
c.l.WithFields(logrus.Fields{
|
||||
"relayFrom": c.lightHouse.myVpnIp,
|
||||
"relayTo": vpnIp,
|
||||
"initiatorRelayIndex": existingRelay.LocalIndex,
|
||||
"relay": *relay}).
|
||||
Info("send CreateRelayRequest")
|
||||
}
|
||||
default:
|
||||
hostinfo.logger(c.l).
|
||||
WithField("vpnIp", vpnIp).
|
||||
WithField("state", existingRelay.State).
|
||||
WithField("relay", relayHostInfo.vpnIp).
|
||||
Errorf("Relay unexpected state")
|
||||
}
|
||||
} else {
|
||||
existingRelay, ok := relayHostInfo.relayState.QueryRelayForByIp(vpnIp)
|
||||
if !ok {
|
||||
// No relays exist or requested yet.
|
||||
if relayHostInfo.remote != nil {
|
||||
idx, err := AddRelay(c.l, relayHostInfo, c.mainHostMap, vpnIp, nil, TerminalType, Requested)
|
||||
if relayHostInfo.remote.IsValid() {
|
||||
idx, err := AddRelay(hm.l, relayHostInfo, hm.mainHostMap, vpnIp, nil, TerminalType, Requested)
|
||||
if err != nil {
|
||||
hostinfo.logger(c.l).WithField("relay", relay.String()).WithError(err).Info("Failed to add relay to hostmap")
|
||||
hostinfo.logger(hm.l).WithField("relay", relay.String()).WithError(err).Info("Failed to add relay to hostmap")
|
||||
}
|
||||
|
||||
//TODO: IPV6-WORK
|
||||
myVpnIpB := hm.f.myVpnNet.Addr().As4()
|
||||
theirVpnIpB := vpnIp.As4()
|
||||
|
||||
m := NebulaControl{
|
||||
Type: NebulaControl_CreateRelayRequest,
|
||||
InitiatorRelayIndex: idx,
|
||||
RelayFromIp: uint32(c.lightHouse.myVpnIp),
|
||||
RelayToIp: uint32(vpnIp),
|
||||
RelayFromIp: binary.BigEndian.Uint32(myVpnIpB[:]),
|
||||
RelayToIp: binary.BigEndian.Uint32(theirVpnIpB[:]),
|
||||
}
|
||||
msg, err := m.Marshal()
|
||||
if err != nil {
|
||||
hostinfo.logger(c.l).
|
||||
hostinfo.logger(hm.l).
|
||||
WithError(err).
|
||||
Error("Failed to marshal Control message to create relay")
|
||||
} else {
|
||||
f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||
c.l.WithFields(logrus.Fields{
|
||||
"relayFrom": c.lightHouse.myVpnIp,
|
||||
hm.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||
hm.l.WithFields(logrus.Fields{
|
||||
"relayFrom": hm.f.myVpnNet.Addr(),
|
||||
"relayTo": vpnIp,
|
||||
"initiatorRelayIndex": idx,
|
||||
"relay": *relay}).
|
||||
"relay": relay}).
|
||||
Info("send CreateRelayRequest")
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
switch existingRelay.State {
|
||||
case Established:
|
||||
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Send handshake via relay")
|
||||
hm.f.SendVia(relayHostInfo, existingRelay, hostinfo.HandshakePacket[0], make([]byte, 12), make([]byte, mtu), false)
|
||||
case Disestablished:
|
||||
// Mark this relay as 'requested'
|
||||
relayHostInfo.relayState.UpdateRelayForByIpState(vpnIp, Requested)
|
||||
fallthrough
|
||||
case Requested:
|
||||
hostinfo.logger(hm.l).WithField("relay", relay.String()).Info("Re-send CreateRelay request")
|
||||
// Re-send the CreateRelay request, in case the previous one was lost.
|
||||
relayFrom := hm.f.myVpnNet.Addr().As4()
|
||||
relayTo := vpnIp.As4()
|
||||
m := NebulaControl{
|
||||
Type: NebulaControl_CreateRelayRequest,
|
||||
InitiatorRelayIndex: existingRelay.LocalIndex,
|
||||
RelayFromIp: binary.BigEndian.Uint32(relayFrom[:]),
|
||||
RelayToIp: binary.BigEndian.Uint32(relayTo[:]),
|
||||
}
|
||||
|
||||
msg, err := m.Marshal()
|
||||
if err != nil {
|
||||
hostinfo.logger(hm.l).
|
||||
WithError(err).
|
||||
Error("Failed to marshal Control message to create relay")
|
||||
} else {
|
||||
// This must send over the hostinfo, not over hm.Hosts[ip]
|
||||
hm.f.SendMessageToHostInfo(header.Control, 0, relayHostInfo, msg, make([]byte, 12), make([]byte, mtu))
|
||||
hm.l.WithFields(logrus.Fields{
|
||||
"relayFrom": hm.f.myVpnNet,
|
||||
"relayTo": vpnIp,
|
||||
"initiatorRelayIndex": existingRelay.LocalIndex,
|
||||
"relay": relay}).
|
||||
Info("send CreateRelayRequest")
|
||||
}
|
||||
case PeerRequested:
|
||||
// PeerRequested only occurs in Forwarding relays, not Terminal relays, and this is a Terminal relay case.
|
||||
fallthrough
|
||||
default:
|
||||
hostinfo.logger(hm.l).
|
||||
WithField("vpnIp", vpnIp).
|
||||
WithField("state", existingRelay.State).
|
||||
WithField("relay", relay).
|
||||
Errorf("Relay unexpected state")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Increment the counter to increase our delay, linear backoff
|
||||
hostinfo.HandshakeCounter++
|
||||
|
||||
// If a lighthouse triggered this attempt then we are still in the timer wheel and do not need to re-add
|
||||
if !lighthouseTriggered {
|
||||
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval*time.Duration(hostinfo.HandshakeCounter))
|
||||
hm.OutboundHandshakeTimer.Add(vpnIp, hm.config.tryInterval*time.Duration(hh.counter))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) AddVpnIp(vpnIp iputil.VpnIp, init func(*HostInfo)) *HostInfo {
|
||||
hostinfo, created := c.pendingHostMap.AddVpnIp(vpnIp, init)
|
||||
// GetOrHandshake will try to find a hostinfo with a fully formed tunnel or start a new handshake if one is not present
|
||||
// The 2nd argument will be true if the hostinfo is ready to transmit traffic
|
||||
func (hm *HandshakeManager) GetOrHandshake(vpnIp netip.Addr, cacheCb func(*HandshakeHostInfo)) (*HostInfo, bool) {
|
||||
hm.mainHostMap.RLock()
|
||||
h, ok := hm.mainHostMap.Hosts[vpnIp]
|
||||
hm.mainHostMap.RUnlock()
|
||||
|
||||
if created {
|
||||
c.OutboundHandshakeTimer.Add(vpnIp, c.config.tryInterval)
|
||||
c.metricInitiated.Inc(1)
|
||||
if ok {
|
||||
// Do not attempt promotion if you are a lighthouse
|
||||
if !hm.lightHouse.amLighthouse {
|
||||
h.TryPromoteBest(hm.mainHostMap.GetPreferredRanges(), hm.f)
|
||||
}
|
||||
return h, true
|
||||
}
|
||||
|
||||
return hm.StartHandshake(vpnIp, cacheCb), false
|
||||
}
|
||||
|
||||
// StartHandshake will ensure a handshake is currently being attempted for the provided vpn ip
|
||||
func (hm *HandshakeManager) StartHandshake(vpnIp netip.Addr, cacheCb func(*HandshakeHostInfo)) *HostInfo {
|
||||
hm.Lock()
|
||||
|
||||
if hh, ok := hm.vpnIps[vpnIp]; ok {
|
||||
// We are already trying to handshake with this vpn ip
|
||||
if cacheCb != nil {
|
||||
cacheCb(hh)
|
||||
}
|
||||
hm.Unlock()
|
||||
return hh.hostinfo
|
||||
}
|
||||
|
||||
hostinfo := &HostInfo{
|
||||
vpnIp: vpnIp,
|
||||
HandshakePacket: make(map[uint8][]byte, 0),
|
||||
relayState: RelayState{
|
||||
relays: nil,
|
||||
relayForByIp: map[netip.Addr]*Relay{},
|
||||
relayForByIdx: map[uint32]*Relay{},
|
||||
},
|
||||
}
|
||||
|
||||
hh := &HandshakeHostInfo{
|
||||
hostinfo: hostinfo,
|
||||
startTime: time.Now(),
|
||||
}
|
||||
hm.vpnIps[vpnIp] = hh
|
||||
hm.metricInitiated.Inc(1)
|
||||
hm.OutboundHandshakeTimer.Add(vpnIp, hm.config.tryInterval)
|
||||
|
||||
if cacheCb != nil {
|
||||
cacheCb(hh)
|
||||
}
|
||||
|
||||
// If this is a static host, we don't need to wait for the HostQueryReply
|
||||
// We can trigger the handshake right now
|
||||
_, doTrigger := hm.lightHouse.GetStaticHostList()[vpnIp]
|
||||
if !doTrigger {
|
||||
// Add any calculated remotes, and trigger early handshake if one found
|
||||
doTrigger = hm.lightHouse.addCalculatedRemotes(vpnIp)
|
||||
}
|
||||
|
||||
if doTrigger {
|
||||
select {
|
||||
case hm.trigger <- vpnIp:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
hm.Unlock()
|
||||
hm.lightHouse.QueryServer(vpnIp)
|
||||
return hostinfo
|
||||
}
|
||||
|
||||
@@ -318,10 +459,10 @@ var (
|
||||
// ErrLocalIndexCollision if we already have an entry in the main or pending
|
||||
// hostmap for the hostinfo.localIndexId.
|
||||
func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket uint8, f *Interface) (*HostInfo, error) {
|
||||
c.pendingHostMap.Lock()
|
||||
defer c.pendingHostMap.Unlock()
|
||||
c.mainHostMap.Lock()
|
||||
defer c.mainHostMap.Unlock()
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
|
||||
// Check if we already have a tunnel with this vpn ip
|
||||
existingHostInfo, found := c.mainHostMap.Hosts[hostinfo.vpnIp]
|
||||
@@ -350,10 +491,10 @@ func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket
|
||||
return existingIndex, ErrLocalIndexCollision
|
||||
}
|
||||
|
||||
existingIndex, found = c.pendingHostMap.Indexes[hostinfo.localIndexId]
|
||||
if found && existingIndex != hostinfo {
|
||||
existingPendingIndex, found := c.indexes[hostinfo.localIndexId]
|
||||
if found && existingPendingIndex.hostinfo != hostinfo {
|
||||
// We have a collision, but for a different hostinfo
|
||||
return existingIndex, ErrLocalIndexCollision
|
||||
return existingPendingIndex.hostinfo, ErrLocalIndexCollision
|
||||
}
|
||||
|
||||
existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
|
||||
@@ -372,47 +513,47 @@ func (c *HandshakeManager) CheckAndComplete(hostinfo *HostInfo, handshakePacket
|
||||
// Complete is a simpler version of CheckAndComplete when we already know we
|
||||
// won't have a localIndexId collision because we already have an entry in the
|
||||
// pendingHostMap. An existing hostinfo is returned if there was one.
|
||||
func (c *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) {
|
||||
c.pendingHostMap.Lock()
|
||||
defer c.pendingHostMap.Unlock()
|
||||
c.mainHostMap.Lock()
|
||||
defer c.mainHostMap.Unlock()
|
||||
func (hm *HandshakeManager) Complete(hostinfo *HostInfo, f *Interface) {
|
||||
hm.mainHostMap.Lock()
|
||||
defer hm.mainHostMap.Unlock()
|
||||
hm.Lock()
|
||||
defer hm.Unlock()
|
||||
|
||||
existingRemoteIndex, found := c.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
|
||||
existingRemoteIndex, found := hm.mainHostMap.RemoteIndexes[hostinfo.remoteIndexId]
|
||||
if found && existingRemoteIndex != nil {
|
||||
// We have a collision, but this can happen since we can't control
|
||||
// the remote ID. Just log about the situation as a note.
|
||||
hostinfo.logger(c.l).
|
||||
hostinfo.logger(hm.l).
|
||||
WithField("remoteIndex", hostinfo.remoteIndexId).WithField("collision", existingRemoteIndex.vpnIp).
|
||||
Info("New host shadows existing host remoteIndex")
|
||||
}
|
||||
|
||||
// We need to remove from the pending hostmap first to avoid undoing work when after to the main hostmap.
|
||||
c.pendingHostMap.unlockedDeleteHostInfo(hostinfo)
|
||||
c.mainHostMap.unlockedAddHostInfo(hostinfo, f)
|
||||
hm.unlockedDeleteHostInfo(hostinfo)
|
||||
hm.mainHostMap.unlockedAddHostInfo(hostinfo, f)
|
||||
}
|
||||
|
||||
// AddIndexHostInfo generates a unique localIndexId for this HostInfo
|
||||
// allocateIndex generates a unique localIndexId for this HostInfo
|
||||
// and adds it to the pendingHostMap. Will error if we are unable to generate
|
||||
// a unique localIndexId
|
||||
func (c *HandshakeManager) AddIndexHostInfo(h *HostInfo) error {
|
||||
c.pendingHostMap.Lock()
|
||||
defer c.pendingHostMap.Unlock()
|
||||
c.mainHostMap.RLock()
|
||||
defer c.mainHostMap.RUnlock()
|
||||
func (hm *HandshakeManager) allocateIndex(hh *HandshakeHostInfo) error {
|
||||
hm.mainHostMap.RLock()
|
||||
defer hm.mainHostMap.RUnlock()
|
||||
hm.Lock()
|
||||
defer hm.Unlock()
|
||||
|
||||
for i := 0; i < 32; i++ {
|
||||
index, err := generateIndex(c.l)
|
||||
index, err := generateIndex(hm.l)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, inPending := c.pendingHostMap.Indexes[index]
|
||||
_, inMain := c.mainHostMap.Indexes[index]
|
||||
_, inPending := hm.indexes[index]
|
||||
_, inMain := hm.mainHostMap.Indexes[index]
|
||||
|
||||
if !inMain && !inPending {
|
||||
h.localIndexId = index
|
||||
c.pendingHostMap.Indexes[index] = h
|
||||
hh.hostinfo.localIndexId = index
|
||||
hm.indexes[index] = hh
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -420,22 +561,90 @@ func (c *HandshakeManager) AddIndexHostInfo(h *HostInfo) error {
|
||||
return errors.New("failed to generate unique localIndexId")
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) addRemoteIndexHostInfo(index uint32, h *HostInfo) {
|
||||
c.pendingHostMap.addRemoteIndexHostInfo(index, h)
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) DeleteHostInfo(hostinfo *HostInfo) {
|
||||
//l.Debugln("Deleting pending hostinfo :", hostinfo)
|
||||
c.pendingHostMap.DeleteHostInfo(hostinfo)
|
||||
c.Lock()
|
||||
defer c.Unlock()
|
||||
c.unlockedDeleteHostInfo(hostinfo)
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) QueryIndex(index uint32) (*HostInfo, error) {
|
||||
return c.pendingHostMap.QueryIndex(index)
|
||||
func (c *HandshakeManager) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
||||
delete(c.vpnIps, hostinfo.vpnIp)
|
||||
if len(c.vpnIps) == 0 {
|
||||
c.vpnIps = map[netip.Addr]*HandshakeHostInfo{}
|
||||
}
|
||||
|
||||
delete(c.indexes, hostinfo.localIndexId)
|
||||
if len(c.vpnIps) == 0 {
|
||||
c.indexes = map[uint32]*HandshakeHostInfo{}
|
||||
}
|
||||
|
||||
if c.l.Level >= logrus.DebugLevel {
|
||||
c.l.WithField("hostMap", m{"mapTotalSize": len(c.vpnIps),
|
||||
"vpnIp": hostinfo.vpnIp, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}).
|
||||
Debug("Pending hostmap hostInfo deleted")
|
||||
}
|
||||
}
|
||||
|
||||
func (hm *HandshakeManager) QueryVpnIp(vpnIp netip.Addr) *HostInfo {
|
||||
hh := hm.queryVpnIp(vpnIp)
|
||||
if hh != nil {
|
||||
return hh.hostinfo
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (hm *HandshakeManager) queryVpnIp(vpnIp netip.Addr) *HandshakeHostInfo {
|
||||
hm.RLock()
|
||||
defer hm.RUnlock()
|
||||
return hm.vpnIps[vpnIp]
|
||||
}
|
||||
|
||||
func (hm *HandshakeManager) QueryIndex(index uint32) *HostInfo {
|
||||
hh := hm.queryIndex(index)
|
||||
if hh != nil {
|
||||
return hh.hostinfo
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (hm *HandshakeManager) queryIndex(index uint32) *HandshakeHostInfo {
|
||||
hm.RLock()
|
||||
defer hm.RUnlock()
|
||||
return hm.indexes[index]
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) GetPreferredRanges() []netip.Prefix {
|
||||
return c.mainHostMap.GetPreferredRanges()
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) ForEachVpnIp(f controlEach) {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
for _, v := range c.vpnIps {
|
||||
f(v.hostinfo)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) ForEachIndex(f controlEach) {
|
||||
c.RLock()
|
||||
defer c.RUnlock()
|
||||
|
||||
for _, v := range c.indexes {
|
||||
f(v.hostinfo)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *HandshakeManager) EmitStats() {
|
||||
c.pendingHostMap.EmitStats("pending")
|
||||
c.mainHostMap.EmitStats("main")
|
||||
c.RLock()
|
||||
hostLen := len(c.vpnIps)
|
||||
indexLen := len(c.indexes)
|
||||
c.RUnlock()
|
||||
|
||||
metrics.GetOrRegisterGauge("hostmap.pending.hosts", nil).Update(int64(hostLen))
|
||||
metrics.GetOrRegisterGauge("hostmap.pending.indexes", nil).Update(int64(indexLen))
|
||||
c.mainHostMap.EmitStats()
|
||||
}
|
||||
|
||||
// Utility functions below
|
||||
@@ -462,6 +671,6 @@ func generateIndex(l *logrus.Logger) (uint32, error) {
|
||||
return index, nil
|
||||
}
|
||||
|
||||
func hsTimeout(tries int, interval time.Duration) time.Duration {
|
||||
return time.Duration(tries / 2 * ((2 * int(interval)) + (tries-1)*int(interval)))
|
||||
func hsTimeout(tries int64, interval time.Duration) time.Duration {
|
||||
return time.Duration(tries / 2 * ((2 * int64(interval)) + (tries-1)*int64(interval)))
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
package nebula
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/netip"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/test"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -14,59 +14,59 @@ import (
|
||||
|
||||
func Test_NewHandshakeManagerVpnIp(t *testing.T) {
|
||||
l := test.NewLogger()
|
||||
_, tuncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||
_, vpncidr, _ := net.ParseCIDR("172.1.1.1/24")
|
||||
_, localrange, _ := net.ParseCIDR("10.1.1.1/24")
|
||||
ip := iputil.Ip2VpnIp(net.ParseIP("172.1.1.2"))
|
||||
preferredRanges := []*net.IPNet{localrange}
|
||||
mw := &mockEncWriter{}
|
||||
mainHM := NewHostMap(l, "test", vpncidr, preferredRanges)
|
||||
vpncidr := netip.MustParsePrefix("172.1.1.1/24")
|
||||
localrange := netip.MustParsePrefix("10.1.1.1/24")
|
||||
ip := netip.MustParseAddr("172.1.1.2")
|
||||
|
||||
preferredRanges := []netip.Prefix{localrange}
|
||||
mainHM := newHostMap(l, vpncidr)
|
||||
mainHM.preferredRanges.Store(&preferredRanges)
|
||||
|
||||
lh := newTestLighthouse()
|
||||
|
||||
blah := NewHandshakeManager(l, tuncidr, preferredRanges, mainHM, lh, &udp.Conn{}, defaultHandshakeConfig)
|
||||
|
||||
now := time.Now()
|
||||
blah.NextOutboundHandshakeTimerTick(now, mw)
|
||||
|
||||
var initCalled bool
|
||||
initFunc := func(*HostInfo) {
|
||||
initCalled = true
|
||||
cs := &CertState{
|
||||
RawCertificate: []byte{},
|
||||
PrivateKey: []byte{},
|
||||
Certificate: &cert.NebulaCertificate{},
|
||||
RawCertificateNoKey: []byte{},
|
||||
}
|
||||
|
||||
i := blah.AddVpnIp(ip, initFunc)
|
||||
assert.True(t, initCalled)
|
||||
blah := NewHandshakeManager(l, mainHM, lh, &udp.NoopConn{}, defaultHandshakeConfig)
|
||||
blah.f = &Interface{handshakeManager: blah, pki: &PKI{}, l: l}
|
||||
blah.f.pki.cs.Store(cs)
|
||||
|
||||
initCalled = false
|
||||
i2 := blah.AddVpnIp(ip, initFunc)
|
||||
assert.False(t, initCalled)
|
||||
now := time.Now()
|
||||
blah.NextOutboundHandshakeTimerTick(now)
|
||||
|
||||
i := blah.StartHandshake(ip, nil)
|
||||
i2 := blah.StartHandshake(ip, nil)
|
||||
assert.Same(t, i, i2)
|
||||
|
||||
i.remotes = NewRemoteList(nil)
|
||||
i.HandshakeReady = true
|
||||
|
||||
// Adding something to pending should not affect the main hostmap
|
||||
assert.Len(t, mainHM.Hosts, 0)
|
||||
|
||||
// Confirm they are in the pending index list
|
||||
assert.Contains(t, blah.pendingHostMap.Hosts, ip)
|
||||
assert.Contains(t, blah.vpnIps, ip)
|
||||
|
||||
// Jump ahead `HandshakeRetries` ticks, offset by one to get the sleep logic right
|
||||
for i := 1; i <= DefaultHandshakeRetries+1; i++ {
|
||||
now = now.Add(time.Duration(i) * DefaultHandshakeTryInterval)
|
||||
blah.NextOutboundHandshakeTimerTick(now, mw)
|
||||
blah.NextOutboundHandshakeTimerTick(now)
|
||||
}
|
||||
|
||||
// Confirm they are still in the pending index list
|
||||
assert.Contains(t, blah.pendingHostMap.Hosts, ip)
|
||||
assert.Contains(t, blah.vpnIps, ip)
|
||||
|
||||
// Tick 1 more time, a minute will certainly flush it out
|
||||
blah.NextOutboundHandshakeTimerTick(now.Add(time.Minute), mw)
|
||||
blah.NextOutboundHandshakeTimerTick(now.Add(time.Minute))
|
||||
|
||||
// Confirm they have been removed
|
||||
assert.NotContains(t, blah.pendingHostMap.Hosts, ip)
|
||||
assert.NotContains(t, blah.vpnIps, ip)
|
||||
}
|
||||
|
||||
func testCountTimerWheelEntries(tw *LockingTimerWheel[iputil.VpnIp]) (c int) {
|
||||
func testCountTimerWheelEntries(tw *LockingTimerWheel[netip.Addr]) (c int) {
|
||||
for _, i := range tw.t.wheel {
|
||||
n := i.Head
|
||||
for n != nil {
|
||||
@@ -80,7 +80,7 @@ func testCountTimerWheelEntries(tw *LockingTimerWheel[iputil.VpnIp]) (c int) {
|
||||
type mockEncWriter struct {
|
||||
}
|
||||
|
||||
func (mw *mockEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp iputil.VpnIp, p, nb, out []byte) {
|
||||
func (mw *mockEncWriter) SendMessageToVpnIp(t header.MessageType, st header.MessageSubType, vpnIp netip.Addr, p, nb, out []byte) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -92,4 +92,4 @@ func (mw *mockEncWriter) SendMessageToHostInfo(t header.MessageType, st header.M
|
||||
return
|
||||
}
|
||||
|
||||
func (mw *mockEncWriter) Handshake(vpnIP iputil.VpnIp) {}
|
||||
func (mw *mockEncWriter) Handshake(vpnIP netip.Addr) {}
|
||||
|
||||
551
hostmap.go
551
hostmap.go
@@ -2,24 +2,25 @@ package nebula
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"slices"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/gaissmai/bart"
|
||||
"github.com/rcrowley/go-metrics"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/slackhq/nebula/cert"
|
||||
"github.com/slackhq/nebula/cidr"
|
||||
"github.com/slackhq/nebula/config"
|
||||
"github.com/slackhq/nebula/header"
|
||||
"github.com/slackhq/nebula/iputil"
|
||||
"github.com/slackhq/nebula/udp"
|
||||
)
|
||||
|
||||
// const ProbeLen = 100
|
||||
const PromoteEvery = 1000
|
||||
const ReQueryEvery = 5000
|
||||
const defaultPromoteEvery = 1000 // Count of packets sent before we try moving a tunnel to a preferred underlay ip address
|
||||
const defaultReQueryEvery = 5000 // Count of packets sent before re-querying a hostinfo to the lighthouse
|
||||
const defaultReQueryWait = time.Minute // Minimum amount of seconds to wait before re-querying a hostinfo the lighthouse. Evaluated every ReQueryEvery
|
||||
const MaxRemotes = 10
|
||||
|
||||
// MaxHostInfosPerVpnIp is the max number of hostinfos we will track for a given vpn ip
|
||||
@@ -34,6 +35,7 @@ const (
|
||||
Requested = iota
|
||||
PeerRequested
|
||||
Established
|
||||
Disestablished
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -47,19 +49,17 @@ type Relay struct {
|
||||
State int
|
||||
LocalIndex uint32
|
||||
RemoteIndex uint32
|
||||
PeerIp iputil.VpnIp
|
||||
PeerIp netip.Addr
|
||||
}
|
||||
|
||||
type HostMap struct {
|
||||
sync.RWMutex //Because we concurrently read and write to our maps
|
||||
name string
|
||||
Indexes map[uint32]*HostInfo
|
||||
Relays map[uint32]*HostInfo // Maps a Relay IDX to a Relay HostInfo object
|
||||
RemoteIndexes map[uint32]*HostInfo
|
||||
Hosts map[iputil.VpnIp]*HostInfo
|
||||
preferredRanges []*net.IPNet
|
||||
vpnCIDR *net.IPNet
|
||||
metricsEnabled bool
|
||||
Hosts map[netip.Addr]*HostInfo
|
||||
preferredRanges atomic.Pointer[[]netip.Prefix]
|
||||
vpnCIDR netip.Prefix
|
||||
l *logrus.Logger
|
||||
}
|
||||
|
||||
@@ -69,15 +69,42 @@ type HostMap struct {
|
||||
type RelayState struct {
|
||||
sync.RWMutex
|
||||
|
||||
relays map[iputil.VpnIp]struct{} // Set of VpnIp's of Hosts to use as relays to access this peer
|
||||
relayForByIp map[iputil.VpnIp]*Relay // Maps VpnIps of peers for which this HostInfo is a relay to some Relay info
|
||||
relayForByIdx map[uint32]*Relay // Maps a local index to some Relay info
|
||||
relays []netip.Addr // Ordered set of VpnIp's of Hosts to use as relays to access this peer
|
||||
relayForByIp map[netip.Addr]*Relay // Maps VpnIps of peers for which this HostInfo is a relay to some Relay info
|
||||
relayForByIdx map[uint32]*Relay // Maps a local index to some Relay info
|
||||
}
|
||||
|
||||
func (rs *RelayState) DeleteRelay(ip iputil.VpnIp) {
|
||||
func (rs *RelayState) DeleteRelay(ip netip.Addr) {
|
||||
rs.Lock()
|
||||
defer rs.Unlock()
|
||||
delete(rs.relays, ip)
|
||||
for idx, val := range rs.relays {
|
||||
if val == ip {
|
||||
rs.relays = append(rs.relays[:idx], rs.relays[idx+1:]...)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *RelayState) UpdateRelayForByIpState(vpnIp netip.Addr, state int) {
|
||||
rs.Lock()
|
||||
defer rs.Unlock()
|
||||
if r, ok := rs.relayForByIp[vpnIp]; ok {
|
||||
newRelay := *r
|
||||
newRelay.State = state
|
||||
rs.relayForByIp[newRelay.PeerIp] = &newRelay
|
||||
rs.relayForByIdx[newRelay.LocalIndex] = &newRelay
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *RelayState) UpdateRelayForByIdxState(idx uint32, state int) {
|
||||
rs.Lock()
|
||||
defer rs.Unlock()
|
||||
if r, ok := rs.relayForByIdx[idx]; ok {
|
||||
newRelay := *r
|
||||
newRelay.State = state
|
||||
rs.relayForByIp[newRelay.PeerIp] = &newRelay
|
||||
rs.relayForByIdx[newRelay.LocalIndex] = &newRelay
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *RelayState) CopyAllRelayFor() []*Relay {
|
||||
@@ -90,33 +117,33 @@ func (rs *RelayState) CopyAllRelayFor() []*Relay {
|
||||
return ret
|
||||
}
|
||||
|
||||
func (rs *RelayState) GetRelayForByIp(ip iputil.VpnIp) (*Relay, bool) {
|
||||
func (rs *RelayState) GetRelayForByIp(ip netip.Addr) (*Relay, bool) {
|
||||
rs.RLock()
|
||||
defer rs.RUnlock()
|
||||
r, ok := rs.relayForByIp[ip]
|
||||
return r, ok
|
||||
}
|
||||
|
||||
func (rs *RelayState) InsertRelayTo(ip iputil.VpnIp) {
|
||||
func (rs *RelayState) InsertRelayTo(ip netip.Addr) {
|
||||
rs.Lock()
|
||||
defer rs.Unlock()
|
||||
rs.relays[ip] = struct{}{}
|
||||
if !slices.Contains(rs.relays, ip) {
|
||||
rs.relays = append(rs.relays, ip)
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *RelayState) CopyRelayIps() []iputil.VpnIp {
|
||||
func (rs *RelayState) CopyRelayIps() []netip.Addr {
|
||||
ret := make([]netip.Addr, len(rs.relays))
|
||||
rs.RLock()
|
||||
defer rs.RUnlock()
|
||||
ret := make([]iputil.VpnIp, 0, len(rs.relays))
|
||||
for ip := range rs.relays {
|
||||
ret = append(ret, ip)
|
||||
}
|
||||
copy(ret, rs.relays)
|
||||
return ret
|
||||
}
|
||||
|
||||
func (rs *RelayState) CopyRelayForIps() []iputil.VpnIp {
|
||||
func (rs *RelayState) CopyRelayForIps() []netip.Addr {
|
||||
rs.RLock()
|
||||
defer rs.RUnlock()
|
||||
currentRelays := make([]iputil.VpnIp, 0, len(rs.relayForByIp))
|
||||
currentRelays := make([]netip.Addr, 0, len(rs.relayForByIp))
|
||||
for relayIp := range rs.relayForByIp {
|
||||
currentRelays = append(currentRelays, relayIp)
|
||||
}
|
||||
@@ -133,19 +160,7 @@ func (rs *RelayState) CopyRelayForIdxs() []uint32 {
|
||||
return ret
|
||||
}
|
||||
|
||||
func (rs *RelayState) RemoveRelay(localIdx uint32) (iputil.VpnIp, bool) {
|
||||
rs.Lock()
|
||||
defer rs.Unlock()
|
||||
r, ok := rs.relayForByIdx[localIdx]
|
||||
if !ok {
|
||||
return iputil.VpnIp(0), false
|
||||
}
|
||||
delete(rs.relayForByIdx, localIdx)
|
||||
delete(rs.relayForByIp, r.PeerIp)
|
||||
return r.PeerIp, true
|
||||
}
|
||||
|
||||
func (rs *RelayState) CompleteRelayByIP(vpnIp iputil.VpnIp, remoteIdx uint32) bool {
|
||||
func (rs *RelayState) CompleteRelayByIP(vpnIp netip.Addr, remoteIdx uint32) bool {
|
||||
rs.Lock()
|
||||
defer rs.Unlock()
|
||||
r, ok := rs.relayForByIp[vpnIp]
|
||||
@@ -175,7 +190,7 @@ func (rs *RelayState) CompleteRelayByIdx(localIdx uint32, remoteIdx uint32) (*Re
|
||||
return &newRelay, true
|
||||
}
|
||||
|
||||
func (rs *RelayState) QueryRelayForByIp(vpnIp iputil.VpnIp) (*Relay, bool) {
|
||||
func (rs *RelayState) QueryRelayForByIp(vpnIp netip.Addr) (*Relay, bool) {
|
||||
rs.RLock()
|
||||
defer rs.RUnlock()
|
||||
r, ok := rs.relayForByIp[vpnIp]
|
||||
@@ -189,7 +204,7 @@ func (rs *RelayState) QueryRelayForByIdx(idx uint32) (*Relay, bool) {
|
||||
return r, ok
|
||||
}
|
||||
|
||||
func (rs *RelayState) InsertRelay(ip iputil.VpnIp, idx uint32, r *Relay) {
|
||||
func (rs *RelayState) InsertRelay(ip netip.Addr, idx uint32, r *Relay) {
|
||||
rs.Lock()
|
||||
defer rs.Unlock()
|
||||
rs.relayForByIp[ip] = r
|
||||
@@ -197,25 +212,23 @@ func (rs *RelayState) InsertRelay(ip iputil.VpnIp, idx uint32, r *Relay) {
|
||||
}
|
||||
|
||||
type HostInfo struct {
|
||||
sync.RWMutex
|
||||
remote netip.AddrPort
|
||||
remotes *RemoteList
|
||||
promoteCounter atomic.Uint32
|
||||
ConnectionState *ConnectionState
|
||||
remoteIndexId uint32
|
||||
localIndexId uint32
|
||||
vpnIp netip.Addr
|
||||
remoteCidr *bart.Table[struct{}]
|
||||
relayState RelayState
|
||||
|
||||
remote *udp.Addr
|
||||
remotes *RemoteList
|
||||
promoteCounter atomic.Uint32
|
||||
ConnectionState *ConnectionState
|
||||
handshakeStart time.Time //todo: this an entry in the handshake manager
|
||||
HandshakeReady bool //todo: being in the manager means you are ready
|
||||
HandshakeCounter int //todo: another handshake manager entry
|
||||
HandshakeLastRemotes []*udp.Addr //todo: another handshake manager entry, which remotes we sent to last time
|
||||
HandshakeComplete bool //todo: this should go away in favor of ConnectionState.ready
|
||||
HandshakePacket map[uint8][]byte //todo: this is other handshake manager entry
|
||||
packetStore []*cachedPacket //todo: this is other handshake manager entry
|
||||
remoteIndexId uint32
|
||||
localIndexId uint32
|
||||
vpnIp iputil.VpnIp
|
||||
recvError int
|
||||
remoteCidr *cidr.Tree4
|
||||
relayState RelayState
|
||||
// HandshakePacket records the packets used to create this hostinfo
|
||||
// We need these to avoid replayed handshake packets creating new hostinfos which causes churn
|
||||
HandshakePacket map[uint8][]byte
|
||||
|
||||
// nextLHQuery is the earliest we can ask the lighthouse for new information.
|
||||
// This is used to limit lighthouse re-queries in chatty clients
|
||||
nextLHQuery atomic.Int64
|
||||
|
||||
// lastRebindCount is the other side of Interface.rebindCount, if these values don't match then we need to ask LH
|
||||
// for a punch from the remote end of this tunnel. The goal being to prime their conntrack for our traffic just like
|
||||
@@ -228,11 +241,19 @@ type HostInfo struct {
|
||||
lastHandshakeTime uint64
|
||||
|
||||
lastRoam time.Time
|
||||
lastRoamRemote *udp.Addr
|
||||
lastRoamRemote netip.AddrPort
|
||||
|
||||
// Used to track other hostinfos for this vpn ip since only 1 can be primary
|
||||
// Synchronised via hostmap lock and not the hostinfo lock.
|
||||
next, prev *HostInfo
|
||||
|
||||
//TODO: in, out, and others might benefit from being an atomic.Int32. We could collapse connectionManager pendingDeletion, relayUsed, and in/out into this 1 thing
|
||||
in, out, pendingDeletion atomic.Bool
|
||||
|
||||
// lastUsed tracks the last time ConnectionManager checked the tunnel and it was in use.
|
||||
// This value will be behind against actual tunnel utilization in the hot path.
|
||||
// This should only be used by the ConnectionManagers ticker routine.
|
||||
lastUsed time.Time
|
||||
}
|
||||
|
||||
type ViaSender struct {
|
||||
@@ -255,26 +276,57 @@ type cachedPacketMetrics struct {
|
||||
dropped metrics.Counter
|
||||
}
|
||||
|
||||
func NewHostMap(l *logrus.Logger, name string, vpnCIDR *net.IPNet, preferredRanges []*net.IPNet) *HostMap {
|
||||
h := map[iputil.VpnIp]*HostInfo{}
|
||||
i := map[uint32]*HostInfo{}
|
||||
r := map[uint32]*HostInfo{}
|
||||
relays := map[uint32]*HostInfo{}
|
||||
m := HostMap{
|
||||
name: name,
|
||||
Indexes: i,
|
||||
Relays: relays,
|
||||
RemoteIndexes: r,
|
||||
Hosts: h,
|
||||
preferredRanges: preferredRanges,
|
||||
vpnCIDR: vpnCIDR,
|
||||
l: l,
|
||||
}
|
||||
return &m
|
||||
func NewHostMapFromConfig(l *logrus.Logger, vpnCIDR netip.Prefix, c *config.C) *HostMap {
|
||||
hm := newHostMap(l, vpnCIDR)
|
||||
|
||||
hm.reload(c, true)
|
||||
c.RegisterReloadCallback(func(c *config.C) {
|
||||
hm.reload(c, false)
|
||||
})
|
||||
|
||||
l.WithField("network", hm.vpnCIDR.String()).
|
||||
WithField("preferredRanges", hm.GetPreferredRanges()).
|
||||
Info("Main HostMap created")
|
||||
|
||||
return hm
|
||||
}
|
||||
|
||||
// UpdateStats takes a name and reports host and index counts to the stats collection system
|
||||
func (hm *HostMap) EmitStats(name string) {
|
||||
func newHostMap(l *logrus.Logger, vpnCIDR netip.Prefix) *HostMap {
|
||||
return &HostMap{
|
||||
Indexes: map[uint32]*HostInfo{},
|
||||
Relays: map[uint32]*HostInfo{},
|
||||
RemoteIndexes: map[uint32]*HostInfo{},
|
||||
Hosts: map[netip.Addr]*HostInfo{},
|
||||
vpnCIDR: vpnCIDR,
|
||||
l: l,
|
||||
}
|
||||
}
|
||||
|
||||
func (hm *HostMap) reload(c *config.C, initial bool) {
|
||||
if initial || c.HasChanged("preferred_ranges") {
|
||||
var preferredRanges []netip.Prefix
|
||||
rawPreferredRanges := c.GetStringSlice("preferred_ranges", []string{})
|
||||
|
||||
for _, rawPreferredRange := range rawPreferredRanges {
|
||||
preferredRange, err := netip.ParsePrefix(rawPreferredRange)
|
||||
|
||||
if err != nil {
|
||||
hm.l.WithError(err).WithField("range", rawPreferredRanges).Warn("Failed to parse preferred ranges, ignoring")
|
||||
continue
|
||||
}
|
||||
|
||||
preferredRanges = append(preferredRanges, preferredRange)
|
||||
}
|
||||
|
||||
oldRanges := hm.preferredRanges.Swap(&preferredRanges)
|
||||
if !initial {
|
||||
hm.l.WithField("oldPreferredRanges", *oldRanges).WithField("newPreferredRanges", preferredRanges).Info("preferred_ranges changed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// EmitStats reports host, index, and relay counts to the stats collection system
|
||||
func (hm *HostMap) EmitStats() {
|
||||
hm.RLock()
|
||||
hostLen := len(hm.Hosts)
|
||||
indexLen := len(hm.Indexes)
|
||||
@@ -282,10 +334,10 @@ func (hm *HostMap) EmitStats(name string) {
|
||||
relaysLen := len(hm.Relays)
|
||||
hm.RUnlock()
|
||||
|
||||
metrics.GetOrRegisterGauge("hostmap."+name+".hosts", nil).Update(int64(hostLen))
|
||||
metrics.GetOrRegisterGauge("hostmap."+name+".indexes", nil).Update(int64(indexLen))
|
||||
metrics.GetOrRegisterGauge("hostmap."+name+".remoteIndexes", nil).Update(int64(remoteIndexLen))
|
||||
metrics.GetOrRegisterGauge("hostmap."+name+".relayIndexes", nil).Update(int64(relaysLen))
|
||||
metrics.GetOrRegisterGauge("hostmap.main.hosts", nil).Update(int64(hostLen))
|
||||
metrics.GetOrRegisterGauge("hostmap.main.indexes", nil).Update(int64(indexLen))
|
||||
metrics.GetOrRegisterGauge("hostmap.main.remoteIndexes", nil).Update(int64(remoteIndexLen))
|
||||
metrics.GetOrRegisterGauge("hostmap.main.relayIndexes", nil).Update(int64(relaysLen))
|
||||
}
|
||||
|
||||
func (hm *HostMap) RemoveRelay(localIdx uint32) {
|
||||
@@ -299,88 +351,6 @@ func (hm *HostMap) RemoveRelay(localIdx uint32) {
|
||||
hm.Unlock()
|
||||
}
|
||||
|
||||
func (hm *HostMap) GetIndexByVpnIp(vpnIp iputil.VpnIp) (uint32, error) {
|
||||
hm.RLock()
|
||||
if i, ok := hm.Hosts[vpnIp]; ok {
|
||||
index := i.localIndexId
|
||||
hm.RUnlock()
|
||||
return index, nil
|
||||
}
|
||||
hm.RUnlock()
|
||||
return 0, errors.New("vpn IP not found")
|
||||
}
|
||||
|
||||
func (hm *HostMap) Add(ip iputil.VpnIp, hostinfo *HostInfo) {
|
||||
hm.Lock()
|
||||
hm.Hosts[ip] = hostinfo
|
||||
hm.Unlock()
|
||||
}
|
||||
|
||||
func (hm *HostMap) AddVpnIp(vpnIp iputil.VpnIp, init func(hostinfo *HostInfo)) (hostinfo *HostInfo, created bool) {
|
||||
hm.RLock()
|
||||
if h, ok := hm.Hosts[vpnIp]; !ok {
|
||||
hm.RUnlock()
|
||||
h = &HostInfo{
|
||||
vpnIp: vpnIp,
|
||||
HandshakePacket: make(map[uint8][]byte, 0),
|
||||
relayState: RelayState{
|
||||
relays: map[iputil.VpnIp]struct{}{},
|
||||
relayForByIp: map[iputil.VpnIp]*Relay{},
|
||||
relayForByIdx: map[uint32]*Relay{},
|
||||
},
|
||||
}
|
||||
if init != nil {
|
||||
init(h)
|
||||
}
|
||||
hm.Lock()
|
||||
hm.Hosts[vpnIp] = h
|
||||
hm.Unlock()
|
||||
return h, true
|
||||
} else {
|
||||
hm.RUnlock()
|
||||
return h, false
|
||||
}
|
||||
}
|
||||
|
||||
// Only used by pendingHostMap when the remote index is not initially known
|
||||
func (hm *HostMap) addRemoteIndexHostInfo(index uint32, h *HostInfo) {
|
||||
hm.Lock()
|
||||
h.remoteIndexId = index
|
||||
hm.RemoteIndexes[index] = h
|
||||
hm.Unlock()
|
||||
|
||||
if hm.l.Level > logrus.DebugLevel {
|
||||
hm.l.WithField("hostMap", m{"mapName": hm.name, "indexNumber": index, "mapTotalSize": len(hm.Indexes),
|
||||
"hostinfo": m{"existing": true, "localIndexId": h.localIndexId, "hostId": h.vpnIp}}).
|
||||
Debug("Hostmap remoteIndex added")
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteReverseIndex is used to clean up on recv_error
|
||||
// This function should only ever be called on the pending hostmap
|
||||
func (hm *HostMap) DeleteReverseIndex(index uint32) {
|
||||
hm.Lock()
|
||||
hostinfo, ok := hm.RemoteIndexes[index]
|
||||
if ok {
|
||||
delete(hm.Indexes, hostinfo.localIndexId)
|
||||
delete(hm.RemoteIndexes, index)
|
||||
|
||||
// Check if we have an entry under hostId that matches the same hostinfo
|
||||
// instance. Clean it up as well if we do (they might not match in pendingHostmap)
|
||||
var hostinfo2 *HostInfo
|
||||
hostinfo2, ok = hm.Hosts[hostinfo.vpnIp]
|
||||
if ok && hostinfo2 == hostinfo {
|
||||
delete(hm.Hosts, hostinfo.vpnIp)
|
||||
}
|
||||
}
|
||||
hm.Unlock()
|
||||
|
||||
if hm.l.Level >= logrus.DebugLevel {
|
||||
hm.l.WithField("hostMap", m{"mapName": hm.name, "indexNumber": index, "mapTotalSize": len(hm.Indexes)}).
|
||||
Debug("Hostmap remote index deleted")
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteHostInfo will fully unlink the hostinfo and return true if it was the final hostinfo for this vpn ip
|
||||
func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) bool {
|
||||
// Delete the host itself, ensuring it's not modified anymore
|
||||
@@ -393,12 +363,6 @@ func (hm *HostMap) DeleteHostInfo(hostinfo *HostInfo) bool {
|
||||
return final
|
||||
}
|
||||
|
||||
func (hm *HostMap) DeleteRelayIdx(localIdx uint32) {
|
||||
hm.Lock()
|
||||
defer hm.Unlock()
|
||||
delete(hm.RemoteIndexes, localIdx)
|
||||
}
|
||||
|
||||
func (hm *HostMap) MakePrimary(hostinfo *HostInfo) {
|
||||
hm.Lock()
|
||||
defer hm.Unlock()
|
||||
@@ -432,11 +396,12 @@ func (hm *HostMap) unlockedMakePrimary(hostinfo *HostInfo) {
|
||||
|
||||
func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
||||
primary, ok := hm.Hosts[hostinfo.vpnIp]
|
||||
isLastHostinfo := hostinfo.next == nil && hostinfo.prev == nil
|
||||
if ok && primary == hostinfo {
|
||||
// The vpnIp pointer points to the same hostinfo as the local index id, we can remove it
|
||||
delete(hm.Hosts, hostinfo.vpnIp)
|
||||
if len(hm.Hosts) == 0 {
|
||||
hm.Hosts = map[iputil.VpnIp]*HostInfo{}
|
||||
hm.Hosts = map[netip.Addr]*HostInfo{}
|
||||
}
|
||||
|
||||
if hostinfo.next != nil {
|
||||
@@ -476,69 +441,60 @@ func (hm *HostMap) unlockedDeleteHostInfo(hostinfo *HostInfo) {
|
||||
}
|
||||
|
||||
if hm.l.Level >= logrus.DebugLevel {
|
||||
hm.l.WithField("hostMap", m{"mapName": hm.name, "mapTotalSize": len(hm.Hosts),
|
||||
hm.l.WithField("hostMap", m{"mapTotalSize": len(hm.Hosts),
|
||||
"vpnIp": hostinfo.vpnIp, "indexNumber": hostinfo.localIndexId, "remoteIndexNumber": hostinfo.remoteIndexId}).
|
||||
Debug("Hostmap hostInfo deleted")
|
||||
}
|
||||
|
||||
if isLastHostinfo {
|
||||
// I have lost connectivity to my peers. My relay tunnel is likely broken. Mark the next
|
||||
// hops as 'Disestablished' so that new relay tunnels are created in the future.
|
||||
hm.unlockedDisestablishVpnAddrRelayFor(hostinfo)
|
||||
}
|
||||
// Clean up any local relay indexes for which I am acting as a relay hop
|
||||
for _, localRelayIdx := range hostinfo.relayState.CopyRelayForIdxs() {
|
||||
delete(hm.Relays, localRelayIdx)
|
||||
}
|
||||
}
|
||||
|
||||
func (hm *HostMap) QueryIndex(index uint32) (*HostInfo, error) {
|
||||
//TODO: we probably just want to return bool instead of error, or at least a static error
|
||||
func (hm *HostMap) QueryIndex(index uint32) *HostInfo {
|
||||
hm.RLock()
|
||||
if h, ok := hm.Indexes[index]; ok {
|
||||
hm.RUnlock()
|
||||
return h, nil
|
||||
return h
|
||||
} else {
|
||||
hm.RUnlock()
|
||||
return nil, errors.New("unable to find index")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieves a HostInfo by Index. Returns whether the HostInfo is primary at time of query.
|
||||
// This helper exists so that the hostinfo.prev pointer can be read while the hostmap lock is held.
|
||||
func (hm *HostMap) QueryIndexIsPrimary(index uint32) (*HostInfo, bool, error) {
|
||||
//TODO: we probably just want to return bool instead of error, or at least a static error
|
||||
hm.RLock()
|
||||
if h, ok := hm.Indexes[index]; ok {
|
||||
hm.RUnlock()
|
||||
return h, h.prev == nil, nil
|
||||
} else {
|
||||
hm.RUnlock()
|
||||
return nil, false, errors.New("unable to find index")
|
||||
}
|
||||
}
|
||||
func (hm *HostMap) QueryRelayIndex(index uint32) (*HostInfo, error) {
|
||||
//TODO: we probably just want to return bool instead of error, or at least a static error
|
||||
func (hm *HostMap) QueryRelayIndex(index uint32) *HostInfo {
|
||||
hm.RLock()
|
||||
if h, ok := hm.Relays[index]; ok {
|
||||
hm.RUnlock()
|
||||
return h, nil
|
||||
return h
|
||||
} else {
|
||||
hm.RUnlock()
|
||||
return nil, errors.New("unable to find index")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (hm *HostMap) QueryReverseIndex(index uint32) (*HostInfo, error) {
|
||||
func (hm *HostMap) QueryReverseIndex(index uint32) *HostInfo {
|
||||
hm.RLock()
|
||||
if h, ok := hm.RemoteIndexes[index]; ok {
|
||||
hm.RUnlock()
|
||||
return h, nil
|
||||
return h
|
||||
} else {
|
||||
hm.RUnlock()
|
||||
return nil, fmt.Errorf("unable to find reverse index or connectionstate nil in %s hostmap", hm.name)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func (hm *HostMap) QueryVpnIp(vpnIp iputil.VpnIp) (*HostInfo, error) {
|
||||
func (hm *HostMap) QueryVpnIp(vpnIp netip.Addr) *HostInfo {
|
||||
return hm.queryVpnIp(vpnIp, nil)
|
||||
}
|
||||
|
||||
func (hm *HostMap) QueryVpnIpRelayFor(targetIp, relayHostIp iputil.VpnIp) (*HostInfo, *Relay, error) {
|
||||
func (hm *HostMap) QueryVpnIpRelayFor(targetIp, relayHostIp netip.Addr) (*HostInfo, *Relay, error) {
|
||||
hm.RLock()
|
||||
defer hm.RUnlock()
|
||||
|
||||
@@ -556,26 +512,41 @@ func (hm *HostMap) QueryVpnIpRelayFor(targetIp, relayHostIp iputil.VpnIp) (*Host
|
||||
return nil, nil, errors.New("unable to find host with relay")
|
||||
}
|
||||
|
||||
// PromoteBestQueryVpnIp will attempt to lazily switch to the best remote every
|
||||
// `PromoteEvery` calls to this function for a given host.
|
||||
func (hm *HostMap) PromoteBestQueryVpnIp(vpnIp iputil.VpnIp, ifce *Interface) (*HostInfo, error) {
|
||||
return hm.queryVpnIp(vpnIp, ifce)
|
||||
func (hm *HostMap) unlockedDisestablishVpnAddrRelayFor(hi *HostInfo) {
|
||||
for _, relayHostIp := range hi.relayState.CopyRelayIps() {
|
||||
if h, ok := hm.Hosts[relayHostIp]; ok {
|
||||
for h != nil {
|
||||
h.relayState.UpdateRelayForByIpState(hi.vpnIp, Disestablished)
|
||||
h = h.next
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, rs := range hi.relayState.CopyAllRelayFor() {
|
||||
if rs.Type == ForwardingType {
|
||||
if h, ok := hm.Hosts[rs.PeerIp]; ok {
|
||||
for h != nil {
|
||||
h.relayState.UpdateRelayForByIpState(hi.vpnIp, Disestablished)
|
||||
h = h.next
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (hm *HostMap) queryVpnIp(vpnIp iputil.VpnIp, promoteIfce *Interface) (*HostInfo, error) {
|
||||
func (hm *HostMap) queryVpnIp(vpnIp netip.Addr, promoteIfce *Interface) *HostInfo {
|
||||
hm.RLock()
|
||||
if h, ok := hm.Hosts[vpnIp]; ok {
|
||||
hm.RUnlock()
|
||||
// Do not attempt promotion if you are a lighthouse
|
||||
if promoteIfce != nil && !promoteIfce.lightHouse.amLighthouse {
|
||||
h.TryPromoteBest(hm.preferredRanges, promoteIfce)
|
||||
h.TryPromoteBest(hm.GetPreferredRanges(), promoteIfce)
|
||||
}
|
||||
return h, nil
|
||||
return h
|
||||
|
||||
}
|
||||
|
||||
hm.RUnlock()
|
||||
return nil, errors.New("unable to find host")
|
||||
return nil
|
||||
}
|
||||
|
||||
// unlockedAddHostInfo assumes you have a write-lock and will add a hostinfo object to the hostmap Indexes and RemoteIndexes maps.
|
||||
@@ -598,7 +569,7 @@ func (hm *HostMap) unlockedAddHostInfo(hostinfo *HostInfo, f *Interface) {
|
||||
hm.RemoteIndexes[hostinfo.remoteIndexId] = hostinfo
|
||||
|
||||
if hm.l.Level >= logrus.DebugLevel {
|
||||
hm.l.WithField("hostMap", m{"mapName": hm.name, "vpnIp": hostinfo.vpnIp, "mapTotalSize": len(hm.Hosts),
|
||||
hm.l.WithField("hostMap", m{"vpnIp": hostinfo.vpnIp, "mapTotalSize": len(hm.Hosts),
|
||||
"hostinfo": m{"existing": true, "localIndexId": hostinfo.localIndexId, "hostId": hostinfo.vpnIp}}).
|
||||
Debug("Hostmap vpnIp added")
|
||||
}
|
||||
@@ -614,19 +585,39 @@ func (hm *HostMap) unlockedAddHostInfo(hostinfo *HostInfo, f *Interface) {
|
||||
}
|
||||
}
|
||||
|
||||
func (hm *HostMap) GetPreferredRanges() []netip.Prefix {
|
||||
//NOTE: if preferredRanges is ever not stored before a load this will fail to dereference a nil pointer
|
||||
return *hm.preferredRanges.Load()
|
||||
}
|
||||
|
||||
func (hm *HostMap) ForEachVpnIp(f controlEach) {
|
||||
hm.RLock()
|
||||
defer hm.RUnlock()
|
||||
|
||||
for _, v := range hm.Hosts {
|
||||
f(v)
|
||||
}
|
||||
}
|
||||
|
||||
func (hm *HostMap) ForEachIndex(f controlEach) {
|
||||
hm.RLock()
|
||||
defer hm.RUnlock()
|
||||
|
||||
for _, v := range hm.Indexes {
|
||||
f(v)
|
||||
}
|
||||
}
|
||||
|
||||
// TryPromoteBest handles re-querying lighthouses and probing for better paths
|
||||
// NOTE: It is an error to call this if you are a lighthouse since they should not roam clients!
|
||||
func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface) {
|
||||
func (i *HostInfo) TryPromoteBest(preferredRanges []netip.Prefix, ifce *Interface) {
|
||||
c := i.promoteCounter.Add(1)
|
||||
if c%PromoteEvery == 0 {
|
||||
// The lock here is currently protecting i.remote access
|
||||
i.RLock()
|
||||
if c%ifce.tryPromoteEvery.Load() == 0 {
|
||||
remote := i.remote
|
||||
i.RUnlock()
|
||||
|
||||
// return early if we are already on a preferred remote
|
||||
if remote != nil {
|
||||
rIP := remote.IP
|
||||
if remote.IsValid() {
|
||||
rIP := remote.Addr()
|
||||
for _, l := range preferredRanges {
|
||||
if l.Contains(rIP) {
|
||||
return
|
||||
@@ -634,8 +625,8 @@ func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface)
|
||||
}
|
||||
}
|
||||
|
||||
i.remotes.ForEach(preferredRanges, func(addr *udp.Addr, preferred bool) {
|
||||
if remote != nil && (addr == nil || !preferred) {
|
||||
i.remotes.ForEach(preferredRanges, func(addr netip.AddrPort, preferred bool) {
|
||||
if remote.IsValid() && (!addr.IsValid() || !preferred) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -646,65 +637,17 @@ func (i *HostInfo) TryPromoteBest(preferredRanges []*net.IPNet, ifce *Interface)
|
||||
}
|
||||
|
||||
// Re query our lighthouses for new remotes occasionally
|
||||
if c%ReQueryEvery == 0 && ifce.lightHouse != nil {
|
||||
ifce.lightHouse.QueryServer(i.vpnIp, ifce)
|
||||
}
|
||||
}
|
||||
|
||||
func (i *HostInfo) cachePacket(l *logrus.Logger, t header.MessageType, st header.MessageSubType, packet []byte, f packetCallback, m *cachedPacketMetrics) {
|
||||
//TODO: return the error so we can log with more context
|
||||
if len(i.packetStore) < 100 {
|
||||
tempPacket := make([]byte, len(packet))
|
||||
copy(tempPacket, packet)
|
||||
//l.WithField("trace", string(debug.Stack())).Error("Caching packet", tempPacket)
|
||||
i.packetStore = append(i.packetStore, &cachedPacket{t, st, f, tempPacket})
|
||||
if l.Level >= logrus.DebugLevel {
|
||||
i.logger(l).
|
||||
WithField("length", len(i.packetStore)).
|
||||
WithField("stored", true).
|
||||
Debugf("Packet store")
|
||||
if c%ifce.reQueryEvery.Load() == 0 && ifce.lightHouse != nil {
|
||||
now := time.Now().UnixNano()
|
||||
if now < i.nextLHQuery.Load() {
|
||||
return
|
||||
}
|
||||
|
||||
} else if l.Level >= logrus.DebugLevel {
|
||||
m.dropped.Inc(1)
|
||||
i.logger(l).
|
||||
WithField("length", len(i.packetStore)).
|
||||
WithField("stored", false).
|
||||
Debugf("Packet store")
|
||||
i.nextLHQuery.Store(now + ifce.reQueryWait.Load())
|
||||
ifce.lightHouse.QueryServer(i.vpnIp)
|
||||
}
|
||||
}
|
||||
|
||||
// handshakeComplete will set the connection as ready to communicate, as well as flush any stored packets
|
||||
func (i *HostInfo) handshakeComplete(l *logrus.Logger, m *cachedPacketMetrics) {
|
||||
//TODO: I'm not certain the distinction between handshake complete and ConnectionState being ready matters because:
|
||||
//TODO: HandshakeComplete means send stored packets and ConnectionState.ready means we are ready to send
|
||||
//TODO: if the transition from HandhsakeComplete to ConnectionState.ready happens all within this function they are identical
|
||||
|
||||
i.ConnectionState.queueLock.Lock()
|
||||
i.HandshakeComplete = true
|
||||
//TODO: this should be managed by the handshake state machine to set it based on how many handshake were seen.
|
||||
// Clamping it to 2 gets us out of the woods for now
|
||||
i.ConnectionState.messageCounter.Store(2)
|
||||
|
||||
if l.Level >= logrus.DebugLevel {
|
||||
i.logger(l).Debugf("Sending %d stored packets", len(i.packetStore))
|
||||
}
|
||||
|
||||
if len(i.packetStore) > 0 {
|
||||
nb := make([]byte, 12, 12)
|
||||
out := make([]byte, mtu)
|
||||
for _, cp := range i.packetStore {
|
||||
cp.callback(cp.messageType, cp.messageSubType, i, cp.packet, nb, out)
|
||||
}
|
||||
m.sent.Inc(int64(len(i.packetStore)))
|
||||
}
|
||||
|
||||
i.remotes.ResetBlockedRemotes()
|
||||
i.packetStore = make([]*cachedPacket, 0)
|
||||
i.ConnectionState.ready = true
|
||||
i.ConnectionState.queueLock.Unlock()
|
||||
}
|
||||
|
||||
func (i *HostInfo) GetCert() *cert.NebulaCertificate {
|
||||
if i.ConnectionState != nil {
|
||||
return i.ConnectionState.peerCert
|
||||
@@ -712,23 +655,23 @@ func (i *HostInfo) GetCert() *cert.NebulaCertificate {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *HostInfo) SetRemote(remote *udp.Addr) {
|
||||
func (i *HostInfo) SetRemote(remote netip.AddrPort) {
|
||||
// We copy here because we likely got this remote from a source that reuses the object
|
||||
if !i.remote.Equals(remote) {
|
||||
i.remote = remote.Copy()
|
||||
i.remotes.LearnRemote(i.vpnIp, remote.Copy())
|
||||
if i.remote != remote {
|
||||
i.remote = remote
|
||||
i.remotes.LearnRemote(i.vpnIp, remote)
|
||||
}
|
||||
}
|
||||
|
||||
// SetRemoteIfPreferred returns true if the remote was changed. The lastRoam
|
||||
// time on the HostInfo will also be updated.
|
||||
func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote *udp.Addr) bool {
|
||||
if newRemote == nil {
|
||||
func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote netip.AddrPort) bool {
|
||||
if !newRemote.IsValid() {
|
||||
// relays have nil udp Addrs
|
||||
return false
|
||||
}
|
||||
currentRemote := i.remote
|
||||
if currentRemote == nil {
|
||||
if !currentRemote.IsValid() {
|
||||
i.SetRemote(newRemote)
|
||||
return true
|
||||
}
|
||||
@@ -736,13 +679,13 @@ func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote *udp.Addr) bool {
|
||||
// NOTE: We do this loop here instead of calling `isPreferred` in
|
||||
// remote_list.go so that we only have to loop over preferredRanges once.
|
||||
newIsPreferred := false
|
||||
for _, l := range hm.preferredRanges {
|
||||
for _, l := range hm.GetPreferredRanges() {
|
||||
// return early if we are already on a preferred remote
|
||||
if l.Contains(currentRemote.IP) {
|
||||
if l.Contains(currentRemote.Addr()) {
|
||||
return false
|
||||
}
|
||||
|
||||
if l.Contains(newRemote.IP) {
|
||||
if l.Contains(newRemote.Addr()) {
|
||||
newIsPreferred = true
|
||||
}
|
||||
}
|
||||
@@ -750,7 +693,7 @@ func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote *udp.Addr) bool {
|
||||
if newIsPreferred {
|
||||
// Consider this a roaming event
|
||||
i.lastRoam = time.Now()
|
||||
i.lastRoamRemote = currentRemote.Copy()
|
||||
i.lastRoamRemote = currentRemote
|
||||
|
||||
i.SetRemote(newRemote)
|
||||
|
||||
@@ -760,27 +703,26 @@ func (i *HostInfo) SetRemoteIfPreferred(hm *HostMap, newRemote *udp.Addr) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (i *HostInfo) RecvErrorExceeded() bool {
|
||||
if i.recvError < 3 {
|
||||
i.recvError += 1
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (i *HostInfo) CreateRemoteCIDR(c *cert.NebulaCertificate) {
|
||||
if len(c.Details.Ips) == 1 && len(c.Details.Subnets) == 0 {
|
||||
// Simple case, no CIDRTree needed
|
||||
return
|
||||
}
|
||||
|
||||
remoteCidr := cidr.NewTree4()
|
||||
remoteCidr := new(bart.Table[struct{}])
|
||||
for _, ip := range c.Details.Ips {
|
||||
remoteCidr.AddCIDR(&net.IPNet{IP: ip.IP, Mask: net.IPMask{255, 255, 255, 255}}, struct{}{})
|
||||
//TODO: IPV6-WORK what to do when ip is invalid?
|
||||
nip, _ := netip.AddrFromSlice(ip.IP)
|
||||
nip = nip.Unmap()
|
||||
remoteCidr.Insert(netip.PrefixFrom(nip, nip.BitLen()), struct{}{})
|
||||
}
|
||||
|
||||
for _, n := range c.Details.Subnets {
|
||||
remoteCidr.AddCIDR(n, struct{}{})
|
||||
//TODO: IPV6-WORK what to do when ip is invalid?
|
||||
nip, _ := netip.AddrFromSlice(n.IP)
|
||||
nip = nip.Unmap()
|
||||
bits, _ := n.Mask.Size()
|
||||
remoteCidr.Insert(netip.PrefixFrom(nip, bits), struct{}{})
|
||||
}
|
||||
i.remoteCidr = remoteCidr
|
||||
}
|
||||
@@ -805,9 +747,9 @@ func (i *HostInfo) logger(l *logrus.Logger) *logrus.Entry {
|
||||
|
||||
// Utility functions
|
||||
|
||||
func localIps(l *logrus.Logger, allowList *LocalAllowList) *[]net.IP {
|
||||
func localIps(l *logrus.Logger, allowList *LocalAllowList) []netip.Addr {
|
||||
//FIXME: This function is pretty garbage
|
||||
var ips []net.IP
|
||||
var ips []netip.Addr
|
||||
ifaces, _ := net.Interfaces()
|
||||
for _, i := range ifaces {
|
||||
allow := allowList.AllowName(i.Name)
|
||||
@@ -829,20 +771,29 @@ func localIps(l *logrus.Logger, allowList *LocalAllowList) *[]net.IP {
|
||||
ip = v.IP
|
||||
}
|
||||
|
||||
nip, ok := netip.AddrFromSlice(ip)
|
||||
if !ok {
|
||||
if l.Level >= logrus.DebugLevel {
|
||||
l.WithField("localIp", ip).Debug("ip was invalid for netip")
|
||||
}
|
||||
continue
|
||||
}
|
||||
nip = nip.Unmap()
|
||||
|
||||
//TODO: Filtering out link local for now, this is probably the most correct thing
|
||||
//TODO: Would be nice to filter out SLAAC MAC based ips as well
|
||||
if ip.IsLoopback() == false && !ip.IsLinkLocalUnicast() {
|
||||
allow := allowList.Allow(ip)
|
||||
if nip.IsLoopback() == false && nip.IsLinkLocalUnicast() == false {
|
||||
allow := allowList.Allow(nip)
|
||||
if l.Level >= logrus.TraceLevel {
|
||||
l.WithField("localIp", ip).WithField("allow", allow).Trace("localAllowList.Allow")
|
||||
l.WithField("localIp", nip).WithField("allow", allow).Trace("localAllowList.Allow")
|
||||
}
|
||||
if !allow {
|
||||
continue
|
||||
}
|
||||
|
||||
ips = append(ips, ip)
|
||||
ips = append(ips, nip)
|
||||
}
|
||||
}
|
||||
}
|
||||
return &ips
|
||||
return ips
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user